Esempio n. 1
0
    def update_states(self, flow_runs: list) -> None:
        """
        After a flow run is grabbed this function sets the state to Submitted so it
        won't be picked up by any other processes

        Args:
            - flow_runs (list): A list of GraphQLResult flow run objects
        """
        for flow_run in flow_runs:

            # Set flow run state to `Submitted` if it is currently `Scheduled`
            if state.StateSchema().load(flow_run.serialized_state).is_scheduled():
                self.client.set_flow_run_state(
                    flow_run_id=flow_run.id,
                    version=flow_run.version,
                    state=Submitted(
                        message="Submitted for execution",
                        state=state.StateSchema().load(flow_run.serialized_state),
                    ),
                )

            # Set task run states to `Submitted` if they are currently `Scheduled`
            for task_run in flow_run.task_runs:
                if state.StateSchema().load(task_run.serialized_state).is_scheduled():
                    self.client.set_task_run_state(
                        task_run_id=task_run.id,
                        version=task_run.version,
                        state=Submitted(
                            message="Submitted for execution",
                            state=state.StateSchema().load(task_run.serialized_state),
                        ),
                    )
Esempio n. 2
0
def test_mark_flow_as_submitted(monkeypatch, cloud_api, with_task_runs):
    agent = Agent()
    agent.client = MagicMock()
    agent._mark_flow_as_submitted(flow_run=GraphQLResult({
        "id":
        "id",
        "serialized_state":
        Scheduled().serialize(),
        "version":
        1,
        "task_runs": ([
            GraphQLResult({
                "id": "task-id",
                "version": 1,
                "serialized_state": Scheduled().serialize(),
            })
        ] if with_task_runs else []),
    }))

    agent.client.set_flow_run_state.assert_called_once_with(
        flow_run_id="id",
        version=1,
        state=Submitted(message="Submitted for execution"))

    if with_task_runs:
        agent.client.set_task_run_state.assert_called_once_with(
            task_run_id="task-id",
            version=1,
            state=Submitted(message="Submitted for execution"),
        )
    else:
        agent.client.set_task_run_state.assert_not_called()
Esempio n. 3
0
    async def test_set_flow_run_states_submitted_lock_on_same_flow_run_id(
        self, run_query, flow_run_id
    ):
        result = await run_query(
            query=self.mutation,
            variables=dict(
                input=dict(
                    states=[
                        dict(flow_run_id=flow_run_id, state=Submitted().serialize())
                    ]
                )
            ),
        )
        bad_result = await run_query(
            query=self.mutation,
            variables=dict(
                input=dict(
                    states=[
                        dict(flow_run_id=flow_run_id, state=Submitted().serialize())
                    ]
                )
            ),
        )

        assert result.data.set_flow_run_states.states[0].status == "SUCCESS"
        assert (
            "State update failed: this run has already been submitted."
            in bad_result.errors[0].message
        )
Esempio n. 4
0
 async def test_state_submitted_lock_on_same_flow_run_id(self, flow_run_id):
     await api.states.set_flow_run_state(flow_run_id=flow_run_id,
                                         state=Submitted())
     with pytest.raises(
             ValueError,
             match=
             "State update failed: this run has already been submitted.",
     ):
         await api.states.set_flow_run_state(flow_run_id=flow_run_id,
                                             state=Submitted())
Esempio n. 5
0
    def update_state(self, flow_run: GraphQLResult, deployment_info: str) -> None:
        """
        After a flow run is grabbed this function sets the state to Submitted so it
        won't be picked up by any other processes

        Args:
            - flow_run (GraphQLResult): A GraphQLResult flow run object
            - deployment_info (str): Identifier information related to the Flow Run
                deployment
        """
        self.logger.debug(
            "Updating states for flow run {}".format(flow_run.id)  # type: ignore
        )

        # Set flow run state to `Submitted` if it is currently `Scheduled`
        if state.StateSchema().load(flow_run.serialized_state).is_scheduled():

            self.logger.debug(
                "Flow run {} is in a Scheduled state, updating to Submitted".format(
                    flow_run.id  # type: ignore
                )
            )
            self.client.set_flow_run_state(
                flow_run_id=flow_run.id,
                version=flow_run.version,
                state=Submitted(
                    message="Submitted for execution. {}".format(deployment_info),
                    state=state.StateSchema().load(flow_run.serialized_state),
                ),
            )

        # Set task run states to `Submitted` if they are currently `Scheduled`
        for task_run in flow_run.task_runs:
            if state.StateSchema().load(task_run.serialized_state).is_scheduled():

                self.logger.debug(
                    "Task run {} is in a Scheduled state, updating to Submitted".format(
                        task_run.id  # type: ignore
                    )
                )
                self.client.set_task_run_state(
                    task_run_id=task_run.id,
                    version=task_run.version,
                    state=Submitted(
                        message="Submitted for execution. {}".format(deployment_info),
                        state=state.StateSchema().load(task_run.serialized_state),
                    ),
                )
Esempio n. 6
0
class TestCancelFlowRun:
    mutation = """
        mutation($input: cancel_flow_run_input!) {
            cancel_flow_run(input: $input) {
                state
            }
        }
    """

    @pytest.mark.parametrize(
        "state,res_state,version",
        [
            (Running(), "Cancelling", 3),
            (Success(), "Success", 2),
            (Submitted(), "Cancelled", 3),
        ],
    )
    async def test_cancel_flow_run(
        self, run_query, flow_run_id, state, res_state, version
    ):
        await api.states.set_flow_run_state(
            flow_run_id=flow_run_id, version=1, state=state
        )

        result = await run_query(
            query=self.mutation,
            variables={"input": {"flow_run_id": flow_run_id}},
        )

        assert result.data.cancel_flow_run.state == res_state

        fr = await models.FlowRun.where(id=flow_run_id).first({"state", "version"})
        assert fr.version == version
        assert fr.state == res_state
Esempio n. 7
0
async def excess_submitted_task_runs(project_id):

    parameters = {}
    # pump up the task counter by creating artificial task runs
    flow = prefect.Flow(
        name="Test Flow",
        schedule=prefect.schedules.IntervalSchedule(
            start_date=pendulum.datetime(2018, 1, 1),
            interval=datetime.timedelta(days=1),
        ),
    )
    for i in range(config.queued_runs_returned_limit):
        flow.add_task(prefect.Parameter(f"x{i}", default=1))
        parameters.update({f"x{i}": 1})

    flow_id = await api.flows.create_flow(project_id=project_id,
                                          serialized_flow=flow.serialize())

    flow_run = await api.runs.create_flow_run(flow_id=flow_id,
                                              parameters=parameters)
    tasks = await models.Task.where({"flow_id": {"_eq": flow_id}}).get("id")

    for task in tasks:
        task_run = await api.runs.get_or_create_task_run(flow_run_id=flow_run,
                                                         task_id=task.id,
                                                         map_index=None)
        await api.states.set_task_run_state(task_run_id=task_run,
                                            state=Submitted())
    async def test_cancelling_non_running_flow_run_returns_cancelled(self, flow_run_id):
        result = await api.states.set_flow_run_state(
            flow_run_id=flow_run_id, state=Submitted()
        )
        assert result.flow_run_id == flow_run_id
        assert result.state == "Submitted"

        result = await api.states.cancel_flow_run(flow_run_id=flow_run_id)
        assert result.flow_run_id == flow_run_id
        assert result.state == "Cancelled"
Esempio n. 9
0
    def _mark_flow_as_submitted(self, flow_run: GraphQLResult) -> None:
        """
        After a flow run is grabbed this function sets the state to Submitted so it
        won't be picked up by any other processes

        Args:
            - flow_run (GraphQLResult): A GraphQLResult flow run object
        """
        # Set flow run state to `Submitted` if it is currently `Scheduled`
        if state.StateSchema().load(flow_run.serialized_state).is_scheduled():

            self.logger.debug(
                f"Updating flow run {flow_run.id} state from Scheduled -> Submitted..."
            )
            self.client.set_flow_run_state(
                flow_run_id=flow_run.id,
                version=flow_run.version,
                state=Submitted(
                    message="Submitted for execution",
                    state=state.StateSchema().load(flow_run.serialized_state),
                ),
            )

        # Set task run states to `Submitted` if they are currently `Scheduled`
        task_runs_updated = 0
        for task_run in flow_run.task_runs:
            if state.StateSchema().load(
                    task_run.serialized_state).is_scheduled():
                task_runs_updated += 1
                self.client.set_task_run_state(
                    task_run_id=task_run.id,
                    version=task_run.version,
                    state=Submitted(
                        message="Submitted for execution.",
                        state=state.StateSchema().load(
                            task_run.serialized_state),
                    ),
                )
        if task_runs_updated:
            self.logger.debug(
                f"Updated {task_runs_updated} task runs states for flow run "
                f"{flow_run.id} from  Scheduled -> Submitted")
Esempio n. 10
0
 def test_state_type_methods_with_submitted_state(self):
     state = Submitted()
     assert not state.is_cached()
     assert not state.is_retrying()
     assert not state.is_pending()
     assert not state.is_running()
     assert not state.is_finished()
     assert not state.is_skipped()
     assert not state.is_scheduled()
     assert not state.is_successful()
     assert not state.is_failed()
     assert not state.is_mapped()
     assert state.is_meta_state()
Esempio n. 11
0
def test_meta_states_dont_nest():
    state = Queued(state=Pending())

    for i in range(300):
        if i % 2:
            state = Queued(state=state)
        else:
            state = Submitted(state=state)

    assert state.state.is_pending()
    assert not state.state.is_meta_state()

    new_state = StateSchema().load(state.serialize())
    assert new_state.is_meta_state()
    assert not new_state.state.is_meta_state()
Esempio n. 12
0
        dict(state=Failed(), assert_true={"is_finished", "is_failed"}),
        dict(state=Finished(), assert_true={"is_finished"}),
        dict(state=Looped(), assert_true={"is_finished", "is_looped"}),
        dict(state=Mapped(),
             assert_true={"is_finished", "is_mapped", "is_successful"}),
        dict(state=Paused(), assert_true={"is_pending", "is_scheduled"}),
        dict(state=Pending(), assert_true={"is_pending"}),
        dict(state=Queued(), assert_true={"is_meta_state", "is_queued"}),
        dict(state=Resume(), assert_true={"is_pending", "is_scheduled"}),
        dict(state=Retrying(),
             assert_true={"is_pending", "is_scheduled", "is_retrying"}),
        dict(state=Running(), assert_true={"is_running"}),
        dict(state=Scheduled(), assert_true={"is_pending", "is_scheduled"}),
        dict(state=Skipped(),
             assert_true={"is_finished", "is_successful", "is_skipped"}),
        dict(state=Submitted(), assert_true={"is_meta_state", "is_submitted"}),
        dict(state=Success(), assert_true={"is_finished", "is_successful"}),
        dict(state=TimedOut(), assert_true={"is_finished", "is_failed"}),
        dict(state=TriggerFailed(), assert_true={"is_finished", "is_failed"}),
    ],
)
def test_state_is_methods(state_check):
    """
    Iterates over all of the "is_*()" methods of the state, asserting that each one is
    False, unless the name of that method is provided as `assert_true`.

    For example, if `state_check == (Pending(), {'is_pending'})`, then this method will
    assert that `state.is_running()` is False, `state.is_successful()` is False, etc. but
    `state.is_pending()` is True.
    """
    state = state_check["state"]
class TestFlowRunStates:
    async def test_set_flow_run_state(self, flow_run_id):
        result = await api.states.set_flow_run_state(
            flow_run_id=flow_run_id, state=Running()
        )

        assert result.flow_run_id == flow_run_id

        query = await models.FlowRun.where(id=flow_run_id).first(
            {"version", "state", "serialized_state"}
        )

        assert query.version == 3
        assert query.state == "Running"
        assert query.serialized_state["type"] == "Running"

    @pytest.mark.parametrize("state", [Running(), Success()])
    async def test_set_flow_run_state_fails_with_wrong_flow_run_id(self, state):
        with pytest.raises(ValueError, match="State update failed"):
            await api.states.set_flow_run_state(
                flow_run_id=str(uuid.uuid4()), state=state
            )

    async def test_trigger_failed_state_does_not_set_end_time(self, flow_run_id):
        # there is no logic in Prefect that would create this sequence of
        # events, but a user could manually do this
        await api.states.set_flow_run_state(
            flow_run_id=flow_run_id, state=TriggerFailed()
        )
        flow_run_info = await models.FlowRun.where(id=flow_run_id).first(
            {"id", "start_time", "end_time"}
        )
        assert not flow_run_info.start_time
        assert not flow_run_info.end_time

    @pytest.mark.parametrize(
        "state",
        [
            s()
            for s in State.children()
            if not s().is_running() and not s().is_submitted()
        ],
    )
    async def test_state_does_not_set_heartbeat_unless_running_or_submitted(
        self, state, flow_run_id
    ):
        flow_run = await models.FlowRun.where(id=flow_run_id).first({"heartbeat"})
        assert flow_run.heartbeat is None

        dt = pendulum.now("UTC")
        await api.states.set_flow_run_state(flow_run_id=flow_run_id, state=state)

        flow_run = await models.FlowRun.where(id=flow_run_id).first({"heartbeat"})
        assert flow_run.heartbeat is None

    @pytest.mark.parametrize("state", [Running(), Submitted()])
    async def test_running_and_submitted_state_sets_heartbeat(self, state, flow_run_id):
        """
        Both Running and Submitted states need to set heartbeats for services like Lazarus to
        function properly.
        """
        flow_run = await models.FlowRun.where(id=flow_run_id).first({"heartbeat"})
        assert flow_run.heartbeat is None

        dt = pendulum.now("UTC")
        await api.states.set_flow_run_state(flow_run_id=flow_run_id, state=state)

        flow_run = await models.FlowRun.where(id=flow_run_id).first({"heartbeat"})
        assert flow_run.heartbeat > dt

    async def test_setting_flow_run_to_cancelled_state_sets_unfinished_task_runs_to_cancelled(
        self, flow_run_id
    ):
        task_runs = await models.TaskRun.where(
            {"flow_run_id": {"_eq": flow_run_id}}
        ).get({"id"})
        task_run_ids = [run.id for run in task_runs]
        # update the state to Running
        await api.states.set_flow_run_state(flow_run_id=flow_run_id, state=Running())
        # Currently this flow_run_id fixture has at least 3 tasks, if this
        # changes the test will need to be updated
        assert len(task_run_ids) >= 3, "flow_run_id fixture has changed"
        # Set one task run to pending, one to running, and the rest to success
        pending_task_run = task_run_ids[0]
        running_task_run = task_run_ids[1]
        rest = task_run_ids[2:]
        await api.states.set_task_run_state(
            task_run_id=pending_task_run, state=Pending()
        )
        await api.states.set_task_run_state(
            task_run_id=running_task_run, state=Running()
        )
        for task_run_id in rest:
            await api.states.set_task_run_state(
                task_run_id=task_run_id, state=Success()
            )
        # set the flow run to a cancelled state
        await api.states.set_flow_run_state(flow_run_id=flow_run_id, state=Cancelled())
        # Confirm the unfinished task runs have been marked as cancelled
        task_runs = await models.TaskRun.where(
            {"flow_run_id": {"_eq": flow_run_id}}
        ).get({"id", "state"})
        new_states = {run.id: run.state for run in task_runs}
        assert new_states[pending_task_run] == "Cancelled"
        assert new_states[running_task_run] == "Cancelled"
        assert all(new_states[id] == "Success" for id in rest)
Esempio n. 14
0
class TestExecuteFlowRunInSubprocess:
    @pytest.fixture()
    def mocks(self, monkeypatch):
        class Mocks:
            subprocess = MagicMock()
            wait_for_flow_run_start_time = MagicMock()
            fail_flow_run = MagicMock()

        mocks = Mocks()
        monkeypatch.setattr("prefect.backend.execution.subprocess",
                            mocks.subprocess)
        monkeypatch.setattr(
            "prefect.backend.execution._wait_for_flow_run_start_time",
            mocks.wait_for_flow_run_start_time,
        )
        monkeypatch.setattr("prefect.backend.execution._fail_flow_run",
                            mocks.fail_flow_run)

        # Since we mocked the module this error cannot be used in try/catch without
        # replacing it with the correct type
        mocks.subprocess.CalledProcessError = CalledProcessError

        return mocks

    def test_creates_subprocess_correctly(self, cloud_mocks, mocks):
        # Returned a scheduled flow run to start
        cloud_mocks.FlowRunView.from_flow_run_id().state = Scheduled()
        # Return a finished flow run after the first iteration
        cloud_mocks.FlowRunView().get_latest().state = Success()

        execute_flow_run_in_subprocess("flow-run-id")

        # Should pass the correct flow run id to wait for
        mocks.wait_for_flow_run_start_time.assert_called_once_with(
            "flow-run-id")

        # Calls the correct command w/ environment variables
        mocks.subprocess.run.assert_called_once_with(
            [sys.executable, "-m", "prefect", "execute", "flow-run"],
            env={
                "PREFECT__CLOUD__SEND_FLOW_RUN_LOGS":
                "True",
                "PREFECT__LOGGING__LEVEL":
                "INFO",
                "PREFECT__LOGGING__FORMAT":
                "[%(asctime)s] %(levelname)s - %(name)s | %(message)s",
                "PREFECT__LOGGING__DATEFMT":
                "%Y-%m-%d %H:%M:%S%z",
                "PREFECT__BACKEND":
                "cloud",
                "PREFECT__CLOUD__API":
                "https://api.prefect.io",
                "PREFECT__CLOUD__TENANT_ID":
                "",
                "PREFECT__CLOUD__API_KEY":
                cloud_mocks.Client().api_key,
                "PREFECT__CONTEXT__FLOW_RUN_ID":
                "flow-run-id",
                "PREFECT__CONTEXT__FLOW_ID":
                cloud_mocks.FlowRunView.from_flow_run_id().flow_id,
                "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS":
                "prefect.engine.cloud.CloudFlowRunner",
                "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS":
                "prefect.engine.cloud.CloudTaskRunner",
            },
        )

        # Return code is checked
        mocks.subprocess.run().check_returncode.assert_called_once()

    @pytest.mark.parametrize("start_state", [Submitted(), Running()])
    def test_fails_immediately_if_flow_run_is_being_executed_elsewhere(
            self, cloud_mocks, start_state, mocks):
        cloud_mocks.FlowRunView.from_flow_run_id().state = start_state
        with pytest.raises(RuntimeError, match="already in state"):
            execute_flow_run_in_subprocess("flow-run-id")

    def test_handles_signal_interrupt(self, cloud_mocks, mocks):
        cloud_mocks.FlowRunView.from_flow_run_id().state = Scheduled()
        mocks.subprocess.run.side_effect = KeyboardInterrupt()

        # Keyboard interrupt should be re-raised
        with pytest.raises(KeyboardInterrupt):
            execute_flow_run_in_subprocess("flow-run-id")

        # Only tried to run once
        mocks.subprocess.run.assert_called_once()

        # Flow run is failed with the proper message
        mocks.fail_flow_run.assert_called_once_with(
            flow_run_id="flow-run-id",
            message="Flow run received an interrupt signal.")

    def test_handles_unexpected_exception(self, cloud_mocks, mocks):
        cloud_mocks.FlowRunView.from_flow_run_id().state = Scheduled()
        mocks.subprocess.run.side_effect = Exception("Foobar")

        # Re-raised as `RuntmeError`
        with pytest.raises(
                RuntimeError,
                match="encountered unexpected exception during execution"):
            execute_flow_run_in_subprocess("flow-run-id")

        # Only tried to run once
        mocks.subprocess.run.assert_called_once()

        # Flow run is failed with the proper message
        mocks.fail_flow_run.assert_called_once_with(
            flow_run_id="flow-run-id",
            message=(
                "Flow run encountered unexpected exception during execution: "
                f"{Exception('Foobar')!r}"),
        )

    def test_handles_bad_subprocess_result(self, cloud_mocks, mocks):
        cloud_mocks.FlowRunView.from_flow_run_id().state = Scheduled()
        mocks.subprocess.run.return_value.check_returncode.side_effect = (
            CalledProcessError(cmd="foo", returncode=1))

        # Re-raised as `RuntmeError`
        with pytest.raises(RuntimeError, match="flow run process failed"):
            execute_flow_run_in_subprocess("flow-run-id")

        # Only tried to run once
        mocks.subprocess.run.assert_called_once()

        # Flow run is not failed at this time -- left to the FlowRunner
        mocks.fail_flow_run.assert_not_called()

    def test_loops_until_flow_run_is_finished(self, cloud_mocks, mocks):
        cloud_mocks.FlowRunView.from_flow_run_id().state = Scheduled()
        cloud_mocks.FlowRunView.from_flow_run_id().get_latest.side_effect = [
            MagicMock(state=Running()),
            MagicMock(state=Running()),
            MagicMock(state=Success()),
        ]

        execute_flow_run_in_subprocess("flow-run-id")

        # Ran the subprocess twice
        assert mocks.subprocess.run.call_count == 2
        # Waited each time
        assert mocks.wait_for_flow_run_start_time.call_count == 2
Esempio n. 15
0
    async def run_scheduled(self, flow_id=None):
        """
        Queries for any flow runs that are SCHEDULED, OR any flow runs that have SCHEDULED
        task runs [if the flow run itself is RUNNING]. Sets all Scheduled runs to Submitted
        and runs the flow.

        If a flow_id is provided, only flow runs of that flow are matched.
        """
        now = pendulum.now()
        flow_runs = await models.FlowRun.where({
            "_or": [
                {
                    "state_start_time": {
                        "_lte": str(now)
                    }
                },
                {
                    "state": {
                        "_eq": "Running"
                    },
                    "task_runs": {
                        "state_start_time": {
                            "_lte": str(now)
                        }
                    },
                },
            ],
            "flow_id": {
                "_eq": flow_id
            },
        }).get(
            selection_set={
                "id": True,
                "version": True,
                "tenant_id": True,
                "state": True,
                "serialized_state": True,
                "parameters": True,
                "flow": {"id", "environment", "name", "storage"},
                with_args("task_runs", {
                    "where": {
                        "state_start_time": {
                            "_lte": str(now)
                        }
                    }
                }): {"id", "version", "task_id", "serialized_state"},
            },
            limit=100,
            order_by={"state_start_time": EnumValue("asc")},
        )
        for fr in flow_runs:

            skip_counter = 0

            fr_serialized_state = state_schema.load(fr.serialized_state)

            # set the flow run state to submitted, if it's scheduled
            if fr_serialized_state.is_scheduled():
                try:
                    await api.states.set_flow_run_state(
                        flow_run_id=fr.id,
                        state=Submitted(
                            message="Submitted for execution",
                            state=fr_serialized_state,
                        ),
                    )
                except ValueError as exc:
                    skip_counter += 1
                    if "Update failed" not in str(exc):
                        raise

            # set each task run state to submitted, if it's scheduled
            for tr in fr.task_runs:
                tr_serialized_state = state_schema.load(tr.serialized_state)

                try:
                    await api.states.set_task_run_state(
                        task_run_id=tr.id,
                        state=Submitted(
                            message="Submitted for execution",
                            state=tr_serialized_state,
                        ),
                    )
                except ValueError as exc:
                    skip_counter += 1
                    if "Update failed" not in str(exc):
                        raise

            # none of the states were set, so we shouldn't bother running
            if skip_counter == 1 + len(fr.task_runs):
                continue

            self.logger.info(f'Submitting flow run "{fr.id}" for execution.')

            # run the flow
            self.run_flow(
                flow_name=fr.flow.name,
                storage=storage_schema.load(fr.flow.storage),
                environment=environment_schema.load(fr.flow.environment),
                config={
                    "cloud.api":
                    f"http://localhost:4200",
                    "cloud.graphql":
                    "http://localhost:4200",
                    "engine.flow_runner.default_class":
                    "prefect.engine.cloud.CloudFlowRunner",
                    "engine.task_runner.default_class":
                    "prefect.engine.cloud.CloudTaskRunner",
                    "engine.executor.default_class":
                    "prefect.engine.executors.LocalExecutor",
                },
                context={"flow_run_id": fr.id},
            )
Esempio n. 16
0
    "id":
    "id-1",
    "name":
    "name-1",
    "flow_id":
    "flow_id-1",
    "serialized_state":
    Success(message="state-1").serialize(),
    "states": [
        {
            "timestamp": pendulum.now().subtract(seconds=10).isoformat(),
            "serialized_state": Running(message="past-state").serialize(),
        },
        {
            "timestamp": pendulum.now().subtract(seconds=20).isoformat(),
            "serialized_state": Submitted(message="past-state").serialize(),
        },
    ],
    "parameters": {
        "param": "value"
    },
    "context": {
        "foo": "bar"
    },
    "labels": ["label"],
    "updated":
    pendulum.now().isoformat(),
    "run_config":
    UniversalRun().serialize(),
}
FLOW_RUN_DATA_2 = {
Esempio n. 17
0
    state=Failed(message="state-1"),
    states=[],
    parameters={"param": "value"},
    context={"foo": "bar"},
    labels=["label"],
    updated_at=pendulum.now(),
    run_config=UniversalRun(),
)
# On `get_latest` return the same flow run view
FAILED_FLOW_RUN_VIEW.get_latest = MagicMock(return_value=FAILED_FLOW_RUN_VIEW)

SUBMITTED_FLOW_RUN_VIEW = FlowRunView(
    flow_run_id="flow-run-id",
    name="flow-run-name",
    flow_id="flow-id",
    state=Submitted(message="state-1"),
    states=[],
    parameters={"param": "value"},
    context={"foo": "bar"},
    labels=["label"],
    updated_at=pendulum.now(),
    run_config=UniversalRun(),
)
# On `get_latest` return the same flow run view
SUBMITTED_FLOW_RUN_VIEW.get_latest = MagicMock(
    return_value=SUBMITTED_FLOW_RUN_VIEW)

TEST_FLOW_RUN_VIEW = FlowRunView(
    flow_run_id="flow-run-id",
    name="flow-run-name",
    flow_id="flow-id",