예제 #1
0
def test_execute_step_with_structured_logs():
    with get_foo_pipeline_handle() as pipeline_handle:
        runner = CliRunner()

        with instance_for_test(
            overrides={
                "compute_logs": {
                    "module": "dagster.core.storage.noop_compute_log_manager",
                    "class": "NoOpComputeLogManager",
                }
            }
        ) as instance:
            run = create_run_for_test(instance, pipeline_name="foo", run_id="new_run")

            input_json = serialize_dagster_namedtuple(
                ExecuteStepArgs(
                    pipeline_origin=pipeline_handle.get_origin(),
                    pipeline_run_id=run.run_id,
                    instance_ref=instance.get_ref(),
                )
            )

            result = runner_execute_step_with_structured_logs(runner, [input_json],)

        assert "STEP_SUCCESS" in result.stdout
예제 #2
0
def test_execute_run_cannot_load():
    with get_foo_pipeline_handle() as pipeline_handle:
        runner = CliRunner()

        with instance_for_test(
                overrides={
                    "compute_logs": {
                        "module":
                        "dagster.core.storage.noop_compute_log_manager",
                        "class": "NoOpComputeLogManager",
                    }
                }) as instance:
            instance = DagsterInstance.get()

            input_json = serialize_dagster_namedtuple(
                ExecuteRunArgs(
                    pipeline_origin=pipeline_handle.get_python_origin(),
                    pipeline_run_id="FOOBAR",
                    instance_ref=instance.get_ref(),
                ))

            result = runner.invoke(
                api.execute_run_command,
                [input_json],
            )

            assert result.exit_code != 0

            assert "Pipeline run with id 'FOOBAR' not found for run execution" in str(
                result.exception), "no match, result: {}".format(result.stdout)
예제 #3
0
def test_execute_run():
    with get_foo_pipeline_handle() as pipeline_handle:
        runner = CliRunner()

        with instance_for_test(
                overrides={
                    "compute_logs": {
                        "module":
                        "dagster.core.storage.noop_compute_log_manager",
                        "class": "NoOpComputeLogManager",
                    }
                }) as instance:
            instance = DagsterInstance.get()
            run = create_run_for_test(instance,
                                      pipeline_name="foo",
                                      run_id="new_run")

            input_json = serialize_dagster_namedtuple(
                ExecuteRunArgs(
                    pipeline_origin=pipeline_handle.get_python_origin(),
                    pipeline_run_id=run.run_id,
                    instance_ref=instance.get_ref(),
                ))

            result = runner_execute_run(
                runner,
                [input_json],
            )

            assert "PIPELINE_SUCCESS" in result.stdout, "no match, result: {}".format(
                result.stdout)

            # Framework errors (e.g. running a run that has already run) still result in a non-zero error code
            result = runner.invoke(api.execute_run_command, [input_json])
            assert result.exit_code == 0
def create_run(instance, **kwargs):  # pylint: disable=redefined-outer-name
    with get_foo_pipeline_handle() as pipeline_handle:
        create_run_for_test(
            instance,
            external_pipeline_origin=pipeline_handle.get_external_origin(),
            pipeline_name="foo",
            **kwargs)
예제 #5
0
def create_run(instance, **kwargs):
    with get_foo_pipeline_handle() as pipeline_handle:
        create_run_for_test(
            instance,
            external_pipeline_origin=pipeline_handle.get_external_origin(),
            pipeline_name="foo",
            **kwargs,
        )
def create_run(instance, **kwargs):  # pylint: disable=redefined-outer-name
    with get_foo_pipeline_handle() as pipeline_handle:
        pipeline_args = merge_dicts(
            {
                "pipeline_name": "foo",
                "external_pipeline_origin": pipeline_handle.get_external_origin(),
            },
            kwargs,
        )
        yield create_run_for_test(instance, **pipeline_args)
예제 #7
0
def test_external_pipeline_from_run():
    with instance_for_test() as instance:
        with get_foo_pipeline_handle() as pipeline_handle:
            run = create_run_for_test(
                instance,
                pipeline_name=pipeline_handle.pipeline_name,
                external_pipeline_origin=pipeline_handle.get_external_origin(),
            )

            with external_pipeline_from_run(run) as external_pipeline:
                assert external_pipeline.name == pipeline_handle.pipeline_name
예제 #8
0
def test_execute_step_verify_step_framework_error(mock_verify_step):
    with get_foo_pipeline_handle() as pipeline_handle:
        runner = CliRunner()

        mock_verify_step.side_effect = Exception(
            "Unexpected framework error text")

        with instance_for_test(
                overrides={
                    "compute_logs": {
                        "module":
                        "dagster.core.storage.noop_compute_log_manager",
                        "class": "NoOpComputeLogManager",
                    }
                }) as instance:
            run = create_run_for_test(
                instance,
                pipeline_name="foo",
                run_id="new_run",
            )

            input_json = serialize_dagster_namedtuple(
                ExecuteStepArgs(
                    pipeline_origin=pipeline_handle.get_python_origin(),
                    pipeline_run_id=run.run_id,
                    step_keys_to_execute=["fake_step"],
                    instance_ref=instance.get_ref(),
                    should_verify_step=True,
                    known_state=KnownExecutionState(
                        {},
                        {
                            "blah": {
                                "result": ["0", "1", "2"]
                            },
                        },
                    ),
                ))
            result = runner.invoke(api.execute_step_command, [input_json])

            assert result.exit_code != 0

            # Framework error logged to event log
            logs = instance.all_logs(run.run_id)

            log_entry = logs[0]
            assert (
                log_entry.message ==
                "An exception was thrown during step execution that is likely a framework error, rather than an error in user code."
            )
            assert log_entry.step_key == "fake_step"

            assert "Unexpected framework error text" in str(
                log_entry.dagster_event.event_specific_data.error)
예제 #9
0
def test_submit_run():
    with instance_for_test(
            overrides={
                "run_coordinator": {
                    "module": "dagster.core.test_utils",
                    "class": "MockedRunCoordinator",
                }
            }) as instance:
        with get_foo_pipeline_handle() as pipeline_handle:

            run = create_run_for_test(
                instance=instance,
                pipeline_name=pipeline_handle.pipeline_name,
                run_id="foo-bar",
                external_pipeline_origin=pipeline_handle.get_external_origin(),
            )

            instance.submit_run(run.run_id, None)

            assert len(instance.run_coordinator.queue()) == 1
            assert instance.run_coordinator.queue()[0].run_id == "foo-bar"
예제 #10
0
            "dagster runner_execute_run_with_structured_logs commands with cli_args {cli_args} "
            'returned exit_code {exit_code} with stdout:\n"{stdout}"'
            '\n exception: "\n{exception}"'
            '\n and result as string: "{result}"').format(
                cli_args=cli_args,
                exit_code=result.exit_code,
                stdout=result.stdout,
                exception=result.exception,
                result=result,
            ))
    return result


@pytest.mark.parametrize(
    "pipeline_handle",
    [get_foo_pipeline_handle()],
)
def test_execute_run_with_structured_logs(pipeline_handle):
    runner = CliRunner()

    with instance_for_test(
            overrides={
                "compute_logs": {
                    "module": "dagster.core.storage.noop_compute_log_manager",
                    "class": "NoOpComputeLogManager",
                }
            }) as instance:
        instance = DagsterInstance.get()
        run = create_run_for_test(instance,
                                  pipeline_name="foo",
                                  run_id="new_run")
예제 #11
0
def test_execute_step_verify_step():
    with get_foo_pipeline_handle() as pipeline_handle:
        runner = CliRunner()

        with instance_for_test(
                overrides={
                    "compute_logs": {
                        "module":
                        "dagster.core.storage.noop_compute_log_manager",
                        "class": "NoOpComputeLogManager",
                    }
                }) as instance:
            run = create_run_for_test(
                instance,
                pipeline_name="foo",
                run_id="new_run",
            )

            input_json = serialize_dagster_namedtuple(
                ExecuteStepArgs(
                    pipeline_origin=pipeline_handle.get_python_origin(),
                    pipeline_run_id=run.run_id,
                    step_keys_to_execute=None,
                    instance_ref=instance.get_ref(),
                ))

            # Check that verify succeeds for step that has hasn't been fun (case 3)
            retries = RetryState()
            assert verify_step(instance,
                               run,
                               retries,
                               step_keys_to_execute=["do_something"])

            # Check that verify fails when trying to retry with no original attempt (case 3)
            retries = RetryState()
            retries.mark_attempt("do_something")
            assert not verify_step(
                instance, run, retries, step_keys_to_execute=["do_something"])

            # Test trying to re-run a retry fails verify_step (case 2)
            with mock.patch("dagster.cli.api.get_step_stats_by_key"
                            ) as _step_stats_by_key:
                _step_stats_by_key.return_value = {
                    "do_something":
                    RunStepKeyStatsSnapshot(run_id=run.run_id,
                                            step_key="do_something",
                                            attempts=2)
                }

                retries = RetryState()
                retries.mark_attempt("do_something")
                assert not verify_step(instance,
                                       run,
                                       retries,
                                       step_keys_to_execute=["do_something"])

            runner_execute_step(
                runner,
                [input_json],
            )

            # # Check that verify fails for step that has already run (case 1)
            retries = RetryState()
            assert not verify_step(
                instance, run, retries, step_keys_to_execute=["do_something"])