예제 #1
0
def test_flow_run_view_from_returns_instance(patch_post, from_method):
    patch_post({"data": {"flow_run": [FLOW_RUN_DATA_1]}})

    if from_method == "flow_run_id":
        flow_run = FlowRunView.from_flow_run_id("id-1",
                                                load_static_tasks=False)
    elif from_method == "flow_run_data":
        # Note the post patch will not be used since there is no query here
        flow_run = FlowRunView._from_flow_run_data(FLOW_RUN_DATA_1)

    assert flow_run.flow_run_id == "id-1"
    assert flow_run.name == "name-1"
    assert flow_run.flow_id == "flow_id-1"
    assert flow_run.parameters == {"param": "value"}
    assert flow_run.context == {"foo": "bar"}
    assert flow_run.labels == ["label"]
    assert isinstance(flow_run.run_config, UniversalRun)
    assert isinstance(flow_run.updated_at, pendulum.DateTime)
    # This state is deserialized at initialization
    assert flow_run.state == Success(message="state-1")
    # Assert that past states are in timestamp sorted order and deserialized
    assert flow_run.states[0].is_submitted()
    assert flow_run.states[1].is_running()
    for state in flow_run.states:
        assert isinstance(state.timestamp, pendulum.DateTime)
        assert state.message == "past-state"
    # There are no cached tasks
    assert flow_run._cached_task_runs == {}
예제 #2
0
def test_watch_flow_run_timeout(monkeypatch):
    flow_run = FlowRunView._from_flow_run_data(FLOW_RUN_DATA_1)
    flow_run.state = Running()  # Not finished
    flow_run.get_latest = MagicMock(return_value=flow_run)
    flow_run.get_logs = MagicMock()

    MockView = MagicMock()
    MockView.from_flow_run_id.return_value = flow_run

    monkeypatch.setattr("prefect.backend.flow_run.FlowRunView", MockView)

    # Mock sleep so that we do not have a slow test
    monkeypatch.setattr("prefect.backend.flow_run.time.sleep", MagicMock())

    with pytest.raises(RuntimeError,
                       match="timed out after 12 hours of waiting"):
        for log in watch_flow_run("id"):
            pass
예제 #3
0
def test_flow_run_view_get_logs(monkeypatch):
    post = MagicMock(return_value={"data": {"flow_run": [FLOW_RUN_DATA_1]}})
    monkeypatch.setattr("prefect.client.client.Client.post", post)

    flow_run_view = FlowRunView._from_flow_run_data(FLOW_RUN_DATA_1)

    flow_run_view.get_logs()

    query = post.call_args[1]["params"]["query"]

    assert ('flow_run(where: { id: { _eq: "id-1" } })'
            in query), "Queries for the correct flow run"

    assert ("logs(order_by: { timestamp: asc }"
            in query), "Retrieves logs, orders ascending"
    assert ('where: { _and: [{ timestamp: { _lte: "%s" } }, {}] }' %
            flow_run_view.updated_at.isoformat() in query), (
                "Where is less than the last time the flow run was updated\n" +
                query)
예제 #4
0
def test_watch_flow_run_default_timeout(monkeypatch):
    # Test the default behavior, which sets the timeout to 12 hours
    # when the `max_duration` kwarg is not provided
    flow_run = FlowRunView._from_flow_run_data(FLOW_RUN_DATA_1)
    flow_run.state = Running()  # Not finished
    flow_run.get_latest = MagicMock(return_value=flow_run)
    flow_run.get_logs = MagicMock()

    MockView = MagicMock()
    MockView.from_flow_run_id.return_value = flow_run

    monkeypatch.setattr("prefect.backend.flow_run.FlowRunView", MockView)

    # Mock sleep so that we do not have a slow test
    monkeypatch.setattr("prefect.backend.flow_run.time.sleep", MagicMock())

    with pytest.raises(RuntimeError,
                       match="timed out after 12.0 hours of waiting"):
        for log in watch_flow_run("id"):
            pass
예제 #5
0
def test_flow_run_view_get_logs_start_and_end_times(monkeypatch):
    post = MagicMock(return_value={"data": {"flow_run": [FLOW_RUN_DATA_1]}})
    monkeypatch.setattr("prefect.client.client.Client.post", post)

    flow_run_view = FlowRunView._from_flow_run_data(FLOW_RUN_DATA_1)

    start = pendulum.now()
    end = pendulum.now()

    flow_run_view.get_logs(start_time=start, end_time=end)

    query = post.call_args[1]["params"]["query"]

    assert 'flow_run(where: { id: { _eq: "id-1" } })' in query
    assert "logs(order_by: { timestamp: asc }" in query

    #
    assert (
        'where: { _and: [{ timestamp: { _lte: "%s" } }, { timestamp: { _gt: "%s" } }]'
        % (end.isoformat(), start.isoformat())
        in query), ("Where includes start and end time bounds\n" + query)
예제 #6
0
def test_flow_run_view_handles_null_run_config():
    flow_run_data = FLOW_RUN_DATA_1.copy()
    flow_run_data["run_config"] = None
    flow_run_view = FlowRunView._from_flow_run_data(flow_run_data)
    assert flow_run_view.run_config is None
예제 #7
0
def test_watch_flow_run(monkeypatch):
    flow_run = FlowRunView._from_flow_run_data(FLOW_RUN_DATA_1)
    flow_run.state = Scheduled()  # Not running
    flow_run.states = []
    flow_run.get_latest = MagicMock(return_value=flow_run)
    flow_run.get_logs = MagicMock()

    MockView = MagicMock()
    MockView.from_flow_run_id.return_value = flow_run

    monkeypatch.setattr("prefect.backend.flow_run.FlowRunView", MockView)
    monkeypatch.setattr(
        "prefect.backend.flow_run.check_for_compatible_agents",
        MagicMock(return_value="Helpful agent message."),
    )

    # Mock sleep so that we do not have a slow test
    monkeypatch.setattr("prefect.backend.flow_run.time.sleep", MagicMock())

    for i, log in enumerate(watch_flow_run("id")):
        # Assert that we get the agent warning a couple times then update the state
        if i == 0:
            assert log.message == (
                "It has been 15 seconds and your flow run has not been submitted by an agent. "
                "Helpful agent message.")
            assert log.level == logging.WARNING

        elif i == 1:
            assert log.message == (
                "It has been 50 seconds and your flow run has not been submitted by an agent. "
                "Helpful agent message.")

            # Mark the flow run as finished and give it a few past states to log
            # If this test times out, we did not reach this log
            flow_run.state = Success()
            scheduled = Scheduled("My message")
            scheduled.timestamp = pendulum.now()
            running = Running("Another message")
            running.timestamp = pendulum.now().add(seconds=10)

            # Given intentionally out of order states to prove sorting
            flow_run.states = [running, scheduled]

            # Add a log between the states and a log at the end
            flow_run.get_logs = MagicMock(return_value=[
                FlowRunLog(
                    timestamp=pendulum.now().add(seconds=5),
                    message="Foo",
                    level=logging.DEBUG,
                ),
                FlowRunLog(
                    timestamp=pendulum.now().add(seconds=15),
                    message="Bar",
                    level=logging.ERROR,
                ),
            ])

        elif i == 2:
            assert log.message == "Entered state <Scheduled>: My message"
            assert log.level == logging.INFO
        elif i == 3:
            assert log.message == "Foo"
            assert log.level == logging.DEBUG
        elif i == 4:
            assert log.message == "Entered state <Running>: Another message"
            assert log.level == logging.INFO
        elif i == 5:
            assert log.message == "Bar"
            assert log.level == logging.ERROR

    assert i == 5  # Assert we saw all of the expected logs
예제 #8
0
def test_flow_run_view_from_flow_run_data_fills_empty_state_with_pending():
    flow_run = FlowRunView._from_flow_run_data(FLOW_RUN_DATA_NULL_STATE)
    assert flow_run.state.is_pending()