Esempio n. 1
0
def resolve_is_run_config_valid(graphene_info, represented_pipeline, mode,
                                run_config):
    check.inst_param(graphene_info, 'graphene_info', ResolveInfo)
    check.inst_param(represented_pipeline, 'represented_pipeline',
                     RepresentedPipeline)
    check.str_param(mode, 'mode')
    check.dict_param(run_config, 'run_config', key_type=str)

    mode_def_snap = represented_pipeline.get_mode_def_snap(mode)

    if not mode_def_snap.root_config_key:
        # historical pipeline with unknown environment type. blindly pass validation
        return graphene_info.schema.type_named(
            'PipelineConfigValidationValid')(represented_pipeline.name)

    validated_config = validate_config_from_snap(
        represented_pipeline.config_schema_snapshot,
        mode_def_snap.root_config_key, run_config)

    if not validated_config.success:
        raise UserFacingGraphQLError(
            graphene_info.schema.type_named('PipelineConfigValidationInvalid')(
                pipeline_name=represented_pipeline.name,
                errors=[
                    graphene_info.schema.type_named(
                        'PipelineConfigValidationError').from_dagster_error(
                            represented_pipeline.config_schema_snapshot,
                            err,
                        ) for err in validated_config.errors
                ],
            ))

    return graphene_info.schema.type_named('PipelineConfigValidationValid')(
        represented_pipeline.name)
Esempio n. 2
0
def resolve_is_environment_config_valid(graphene_info, pipeline_index, mode, environment_dict):
    check.inst_param(graphene_info, 'graphene_info', ResolveInfo)
    check.inst_param(pipeline_index, 'pipeline_index', PipelineIndex)
    check.str_param(mode, 'mode')
    check.dict_param(environment_dict, 'environment_dict', key_type=str)

    mode_def_snap = pipeline_index.get_mode_def_snap(mode)

    if not mode_def_snap.root_config_key:
        # historical pipeline with unknown environment type. blindly pass validation
        return graphene_info.schema.type_named('PipelineConfigValidationValid')(pipeline_index.name)

    validated_config = validate_config_from_snap(
        pipeline_index.config_schema_snapshot, mode_def_snap.root_config_key, environment_dict
    )

    if not validated_config.success:
        raise UserFacingGraphQLError(
            graphene_info.schema.type_named('PipelineConfigValidationInvalid')(
                pipeline_name=pipeline_index.name,
                errors=[
                    graphene_info.schema.type_named(
                        'PipelineConfigValidationError'
                    ).from_dagster_error(
                        pipeline_index.config_schema_snapshot, err,
                    )
                    for err in validated_config.errors
                ],
            )
        )

    return graphene_info.schema.type_named('PipelineConfigValidationValid')(pipeline_index.name)
Esempio n. 3
0
def _start_pipeline_execution_for_created_run(graphene_info, run_id):
    check.inst_param(graphene_info, 'graphene_info', ResolveInfo)

    instance = graphene_info.context.instance
    execution_manager_settings = instance.dagit_settings.get(
        'execution_manager')
    if execution_manager_settings and execution_manager_settings.get(
            'disabled'):
        return graphene_info.schema.type_named(
            'StartPipelineRunDisabledError')()

    pipeline_run = instance.get_run_by_id(run_id)
    if not pipeline_run:
        return graphene_info.schema.type_named('PipelineRunNotFoundError')(
            run_id)

    external_pipeline = get_external_pipeline_subset_or_raise(
        graphene_info, pipeline_run.selector.name,
        pipeline_run.selector.solid_subset)

    validated_config = validate_config_from_snap(
        external_pipeline.config_schema_snapshot,
        external_pipeline.root_config_key_for_mode(pipeline_run.mode),
        pipeline_run.environment_dict,
    )
    if not validated_config.success:
        # If the config is invalid, we construct a DagsterInvalidConfigError exception and
        # insert it into the event log. We also return a PipelineConfigValidationInvalid user facing
        # graphql error.

        # We currently re-use the engine events machinery to add the error to the event log, but
        # may need to create a new event type and instance method to handle these erros.
        invalid_config_exception = DagsterInvalidConfigError(
            'Error in config for pipeline {}'.format(external_pipeline.name),
            validated_config.errors,
            pipeline_run.environment_dict,
        )

        instance.report_engine_event(
            str(invalid_config_exception.message),
            pipeline_run,
            EngineEventData.engine_error(
                SerializableErrorInfo(
                    invalid_config_exception.message,
                    [],
                    DagsterInvalidConfigError.__class__.__name__,
                    None,
                )),
        )

        instance.report_run_failed(pipeline_run)

        return DauphinPipelineConfigValidationInvalid.for_validation_errors(
            external_pipeline.pipeline_index, validated_config.errors)

    graphene_info.context.execute_pipeline(external_pipeline, pipeline_run)

    return graphene_info.schema.type_named('StartPipelineRunSuccess')(
        run=graphene_info.schema.type_named('PipelineRun')(pipeline_run))
Esempio n. 4
0
def _execute_schedule(graphene_info, external_pipeline, execution_params,
                      errors):
    check.inst_param(external_pipeline, 'external_pipeline', ExternalPipeline)

    instance = graphene_info.context.instance

    mode, environment_dict = execution_params.mode, execution_params.environment_dict

    validation_result = validate_config_from_snap(
        external_pipeline.config_schema_snapshot,
        external_pipeline.root_config_key_for_mode(mode),
        environment_dict,
    )

    external_execution_plan = (
        graphene_info.context.get_external_execution_plan(
            external_pipeline, environment_dict, mode,
            execution_params.step_keys) if validation_result.success else None)

    pipeline_run = instance.create_run(
        pipeline_name=external_pipeline.name,
        run_id=None,
        environment_dict=environment_dict,
        mode=mode,
        solid_subset=(execution_params.selector.solid_subset
                      if execution_params.selector is not None else None),
        step_keys_to_execute=None,
        status=None,
        tags=execution_params.execution_metadata.tags,
        root_run_id=None,
        parent_run_id=None,
        pipeline_snapshot=external_pipeline.pipeline_snapshot,
        execution_plan_snapshot=external_execution_plan.execution_plan_snapshot
        if external_execution_plan else None,
        parent_pipeline_snapshot=external_pipeline.parent_pipeline_snapshot,
    )

    # Inject errors into event log at this point
    if len(errors) > 0:
        for error in errors:
            instance.report_engine_event(error.message, pipeline_run,
                                         EngineEventData.engine_error(error))

    # Launch run if run launcher is defined
    run_launcher = graphene_info.context.instance.run_launcher
    if run_launcher:
        result = do_launch_for_created_run(graphene_info, pipeline_run.run_id)
    else:
        result = _synchronously_execute_run_within_hosted_user_process(
            graphene_info, pipeline_run.run_id)

    return pipeline_run, result
Esempio n. 5
0
def ensure_valid_config(external_pipeline, mode, run_config):
    check.inst_param(external_pipeline, "external_pipeline", ExternalPipeline)
    check.str_param(mode, "mode")
    # do not type check run_config so that validate_config_from_snap throws

    validated_config = validate_config_from_snap(
        config_schema_snapshot=external_pipeline.config_schema_snapshot,
        config_type_key=external_pipeline.root_config_key_for_mode(mode),
        config_value=run_config,
    )

    if not validated_config.success:
        from dagster_graphql.schema.errors import DauphinPipelineConfigValidationInvalid

        raise UserFacingGraphQLError(
            DauphinPipelineConfigValidationInvalid.for_validation_errors(
                external_pipeline, validated_config.errors))

    return validated_config
Esempio n. 6
0
def ensure_valid_config(external_pipeline, mode, environment_dict):
    check.inst_param(external_pipeline, 'external_pipeline', ExternalPipeline)
    check.str_param(mode, 'mode')
    # do not type check environment_dict so that validate_config_from_snap throws

    validated_config = validate_config_from_snap(
        config_schema_snapshot=external_pipeline.config_schema_snapshot,
        config_type_key=external_pipeline.root_config_key_for_mode(mode),
        config_value=environment_dict,
    )

    if not validated_config.success:
        from dagster_graphql.schema.errors import DauphinPipelineConfigValidationInvalid

        raise UserFacingGraphQLError(
            DauphinPipelineConfigValidationInvalid.for_validation_errors(
                external_pipeline.pipeline_index, validated_config.errors))

    return validated_config
Esempio n. 7
0
def create_possibly_invalid_run(
    graphene_info,
    external_pipeline,
    execution_params,
):
    mode, environment_dict, step_keys = (
        execution_params.mode,
        execution_params.environment_dict,
        execution_params.step_keys,
    )
    validation_result = validate_config_from_snap(
        external_pipeline.config_schema_snapshot,
        external_pipeline.root_config_key_for_mode(mode),
        environment_dict,
    )

    external_execution_plan = (
        graphene_info.context.get_external_execution_plan(
            external_pipeline, environment_dict, mode, step_keys)
        if validation_result.success else None)

    return graphene_info.context.instance.create_run(
        pipeline_name=external_pipeline.name,
        run_id=None,
        environment_dict=environment_dict,
        mode=mode,
        solids_to_execute=frozenset(execution_params.selector.solid_selection)
        if execution_params.selector.solid_selection else None,
        step_keys_to_execute=step_keys,
        status=None,
        tags=merge_dicts(external_pipeline.tags,
                         execution_params.execution_metadata.tags),
        root_run_id=None,
        parent_run_id=None,
        pipeline_snapshot=external_pipeline.pipeline_snapshot,
        execution_plan_snapshot=external_execution_plan.execution_plan_snapshot
        if external_execution_plan else None,
        parent_pipeline_snapshot=external_pipeline.parent_pipeline_snapshot,
    )
Esempio n. 8
0
def resolve_is_run_config_valid(graphene_info, represented_pipeline, mode, run_config):
    from ..schema.pipelines.config import (
        GraphenePipelineConfigValidationError,
        GrapheneRunConfigValidationInvalid,
        GraphenePipelineConfigValidationValid,
    )

    check.inst_param(graphene_info, "graphene_info", ResolveInfo)
    check.inst_param(represented_pipeline, "represented_pipeline", RepresentedPipeline)
    check.str_param(mode, "mode")
    check.dict_param(run_config, "run_config", key_type=str)

    mode_def_snap = represented_pipeline.get_mode_def_snap(mode)

    if not mode_def_snap.root_config_key:
        # historical pipeline with unknown environment type. blindly pass validation
        return GraphenePipelineConfigValidationValid(represented_pipeline.name)

    validated_config = validate_config_from_snap(
        represented_pipeline.config_schema_snapshot, mode_def_snap.root_config_key, run_config
    )

    if not validated_config.success:
        raise UserFacingGraphQLError(
            GrapheneRunConfigValidationInvalid(
                pipeline_name=represented_pipeline.name,
                errors=[
                    GraphenePipelineConfigValidationError.from_dagster_error(
                        represented_pipeline.config_schema_snapshot,
                        err,
                    )
                    for err in validated_config.errors
                ],
            )
        )

    return GraphenePipelineConfigValidationValid(represented_pipeline.name)
Esempio n. 9
0
def get_run_execution_info_for_created_run_or_error(
    graphene_info, repository_location_name, repository_name, run_id
):
    """
    Previously created run could either be created in a different process *or*
    during the launchScheduledRun call where we want to have a record of
    a run the was created but have invalid configuration
    """
    check.inst_param(graphene_info, "graphene_info", ResolveInfo)
    check.str_param(repository_location_name, "repository_location_name")
    check.str_param(repository_name, "repository_name")
    check.str_param(run_id, "run_id")

    instance = graphene_info.context.instance

    pipeline_run = instance.get_run_by_id(run_id)
    if not pipeline_run:
        return graphene_info.schema.type_named("PipelineRunNotFoundError")(run_id)

    external_pipeline = get_external_pipeline_or_raise(
        graphene_info,
        _get_selector_with_workaround(
            graphene_info.context, repository_location_name, repository_name, pipeline_run
        ),
    )

    validated_config = validate_config_from_snap(
        external_pipeline.config_schema_snapshot,
        external_pipeline.root_config_key_for_mode(pipeline_run.mode),
        pipeline_run.run_config,
    )

    if not validated_config.success:
        # If the config is invalid, we construct a DagsterInvalidConfigError exception and
        # insert it into the event log. We also return a PipelineConfigValidationInvalid user facing
        # graphql error.

        # We currently re-use the engine events machinery to add the error to the event log, but
        # may need to create a new event type and instance method to handle these errors.
        invalid_config_exception = DagsterInvalidConfigError(
            "Error in config for pipeline {}".format(external_pipeline.name),
            validated_config.errors,
            pipeline_run.run_config,
        )

        instance.report_engine_event(
            str(invalid_config_exception.message),
            pipeline_run,
            EngineEventData.engine_error(
                SerializableErrorInfo(
                    invalid_config_exception.message,
                    [],
                    DagsterInvalidConfigError.__class__.__name__,
                    None,
                )
            ),
        )

        instance.report_run_failed(pipeline_run)

        return DauphinPipelineConfigValidationInvalid.for_validation_errors(
            external_pipeline, validated_config.errors
        )

    return RunExecutionInfo(external_pipeline, pipeline_run)
Esempio n. 10
0
def do_launch_for_created_run(graphene_info, run_id):
    check.inst_param(graphene_info, 'graphene_info', ResolveInfo)
    check.str_param(run_id, 'run_id')

    # First retrieve the pipeline run
    instance = graphene_info.context.instance
    pipeline_run = instance.get_run_by_id(run_id)
    if not pipeline_run:
        raise UserFacingGraphQLError(
            graphene_info.schema.type_named('PipelineRunNotFoundError')(run_id)
        )

    external_pipeline = legacy_get_external_pipeline_or_raise(
        graphene_info, pipeline_run.pipeline_name, pipeline_run.solid_subset
    )

    # Run config validation
    # If there are any config errors, then inject them into the event log
    validated_config = validate_config_from_snap(
        external_pipeline.config_schema_snapshot,
        external_pipeline.root_config_key_for_mode(pipeline_run.mode),
        pipeline_run.environment_dict,
    )

    if not validated_config.success:
        # If the config is invalid, we construct a DagsterInvalidConfigError exception and
        # insert it into the event log. We also return a PipelineConfigValidationInvalid user facing
        # graphql error.

        # We currently re-use the engine events machinery to add the error to the event log, but
        # may need to create a new event type and instance method to handle these errors.
        invalid_config_exception = DagsterInvalidConfigError(
            'Error in config for pipeline {}'.format(external_pipeline.name),
            validated_config.errors,
            pipeline_run.environment_dict,
        )

        instance.report_engine_event(
            str(invalid_config_exception.message),
            pipeline_run,
            EngineEventData.engine_error(
                SerializableErrorInfo(
                    invalid_config_exception.message,
                    [],
                    DagsterInvalidConfigError.__class__.__name__,
                    None,
                )
            ),
        )

        instance.report_run_failed(pipeline_run)

        return DauphinPipelineConfigValidationInvalid.for_validation_errors(
            external_pipeline, validated_config.errors
        )

    try:
        launched_run = instance.launch_run(pipeline_run.run_id, external_pipeline)
        return graphene_info.schema.type_named('LaunchPipelineRunSuccess')(
            run=graphene_info.schema.type_named('PipelineRun')(launched_run)
        )
    except DagsterLaunchFailedError:
        error = serializable_error_info_from_exc_info(sys.exc_info())
        instance.report_engine_event(
            error.message, pipeline_run, EngineEventData.engine_error(error),
        )
        instance.report_run_failed(pipeline_run)
        # https://github.com/dagster-io/dagster/issues/2508
        # We should return a proper GraphQL error here
        raise