def _start_pipeline_execution_for_created_run(graphene_info, run_id): check.inst_param(graphene_info, 'graphene_info', ResolveInfo) instance = graphene_info.context.instance execution_manager_settings = instance.dagit_settings.get( 'execution_manager') if execution_manager_settings and execution_manager_settings.get( 'disabled'): return graphene_info.schema.type_named( 'StartPipelineRunDisabledError')() pipeline_run = instance.get_run_by_id(run_id) if not pipeline_run: return graphene_info.schema.type_named('PipelineRunNotFoundError')( run_id) external_pipeline = get_external_pipeline_subset_or_raise( graphene_info, pipeline_run.selector.name, pipeline_run.selector.solid_subset) validated_config = validate_config_from_snap( external_pipeline.config_schema_snapshot, external_pipeline.root_config_key_for_mode(pipeline_run.mode), pipeline_run.environment_dict, ) if not validated_config.success: # If the config is invalid, we construct a DagsterInvalidConfigError exception and # insert it into the event log. We also return a PipelineConfigValidationInvalid user facing # graphql error. # We currently re-use the engine events machinery to add the error to the event log, but # may need to create a new event type and instance method to handle these erros. invalid_config_exception = DagsterInvalidConfigError( 'Error in config for pipeline {}'.format(external_pipeline.name), validated_config.errors, pipeline_run.environment_dict, ) instance.report_engine_event( str(invalid_config_exception.message), pipeline_run, EngineEventData.engine_error( SerializableErrorInfo( invalid_config_exception.message, [], DagsterInvalidConfigError.__class__.__name__, None, )), ) instance.report_run_failed(pipeline_run) return DauphinPipelineConfigValidationInvalid.for_validation_errors( external_pipeline.pipeline_index, validated_config.errors) graphene_info.context.execute_pipeline(external_pipeline, pipeline_run) return graphene_info.schema.type_named('StartPipelineRunSuccess')( run=graphene_info.schema.type_named('PipelineRun')(pipeline_run))
def get_validated_config(pipeline_def, environment_dict, mode): from dagster_graphql.schema.errors import DauphinPipelineConfigValidationInvalid check.str_param(mode, 'mode') check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition) run_config_schema = create_run_config_schema(pipeline_def, mode) validated_config = validate_config(run_config_schema.environment_type, environment_dict) if not validated_config.success: raise UserFacingGraphQLError( DauphinPipelineConfigValidationInvalid.for_validation_errors( pipeline_def.get_external_pipeline(), validated_config.errors)) return validated_config
def ensure_valid_config(external_pipeline, mode, run_config): check.inst_param(external_pipeline, "external_pipeline", ExternalPipeline) check.str_param(mode, "mode") # do not type check run_config so that validate_config_from_snap throws validated_config = validate_config_from_snap( config_schema_snapshot=external_pipeline.config_schema_snapshot, config_type_key=external_pipeline.root_config_key_for_mode(mode), config_value=run_config, ) if not validated_config.success: from dagster_graphql.schema.errors import DauphinPipelineConfigValidationInvalid raise UserFacingGraphQLError( DauphinPipelineConfigValidationInvalid.for_validation_errors( external_pipeline, validated_config.errors)) return validated_config
def ensure_valid_config(external_pipeline, mode, environment_dict): check.inst_param(external_pipeline, 'external_pipeline', ExternalPipeline) check.str_param(mode, 'mode') # do not type check environment_dict so that validate_config_from_snap throws validated_config = validate_config_from_snap( config_schema_snapshot=external_pipeline.config_schema_snapshot, config_type_key=external_pipeline.root_config_key_for_mode(mode), config_value=environment_dict, ) if not validated_config.success: from dagster_graphql.schema.errors import DauphinPipelineConfigValidationInvalid raise UserFacingGraphQLError( DauphinPipelineConfigValidationInvalid.for_validation_errors( external_pipeline.pipeline_index, validated_config.errors)) return validated_config
def get_run_execution_info_for_created_run_or_error( graphene_info, repository_location_name, repository_name, run_id ): """ Previously created run could either be created in a different process *or* during the launchScheduledRun call where we want to have a record of a run the was created but have invalid configuration """ check.inst_param(graphene_info, "graphene_info", ResolveInfo) check.str_param(repository_location_name, "repository_location_name") check.str_param(repository_name, "repository_name") check.str_param(run_id, "run_id") instance = graphene_info.context.instance pipeline_run = instance.get_run_by_id(run_id) if not pipeline_run: return graphene_info.schema.type_named("PipelineRunNotFoundError")(run_id) external_pipeline = get_external_pipeline_or_raise( graphene_info, _get_selector_with_workaround( graphene_info.context, repository_location_name, repository_name, pipeline_run ), ) validated_config = validate_config_from_snap( external_pipeline.config_schema_snapshot, external_pipeline.root_config_key_for_mode(pipeline_run.mode), pipeline_run.run_config, ) if not validated_config.success: # If the config is invalid, we construct a DagsterInvalidConfigError exception and # insert it into the event log. We also return a PipelineConfigValidationInvalid user facing # graphql error. # We currently re-use the engine events machinery to add the error to the event log, but # may need to create a new event type and instance method to handle these errors. invalid_config_exception = DagsterInvalidConfigError( "Error in config for pipeline {}".format(external_pipeline.name), validated_config.errors, pipeline_run.run_config, ) instance.report_engine_event( str(invalid_config_exception.message), pipeline_run, EngineEventData.engine_error( SerializableErrorInfo( invalid_config_exception.message, [], DagsterInvalidConfigError.__class__.__name__, None, ) ), ) instance.report_run_failed(pipeline_run) return DauphinPipelineConfigValidationInvalid.for_validation_errors( external_pipeline, validated_config.errors ) return RunExecutionInfo(external_pipeline, pipeline_run)
def _launch_pipeline_execution_for_created_run(graphene_info, run_id): check.inst_param(graphene_info, 'graphene_info', ResolveInfo) check.str_param(run_id, 'run_id') # First retrieve the pipeline run instance = graphene_info.context.instance pipeline_run = instance.get_run_by_id(run_id) if not pipeline_run: return graphene_info.schema.type_named('PipelineRunNotFoundError')( run_id) external_pipeline = get_external_pipeline_or_raise( graphene_info, pipeline_run.selector.name, pipeline_run.selector.solid_subset) # Run config validation # If there are any config errors, then inject them into the event log validated_config = ensure_valid_config(external_pipeline, pipeline_run.mode, pipeline_run.environment_dict) if not validated_config.success: # If the config is invalid, we construct a DagsterInvalidConfigError exception and # insert it into the event log. We also return a PipelineConfigValidationInvalid user facing # graphql error. # We currently re-use the engine events machinery to add the error to the event log, but # may need to create a new event type and instance method to handle these errors. invalid_config_exception = DagsterInvalidConfigError( 'Error in config for pipeline {}'.format(external_pipeline.name), validated_config.errors, pipeline_run.environment_dict, ) instance.report_engine_event( str(invalid_config_exception.message), pipeline_run, EngineEventData.engine_error( SerializableErrorInfo( invalid_config_exception.message, [], DagsterInvalidConfigError.__class__.__name__, None, )), ) instance.report_run_failed(pipeline_run) return DauphinPipelineConfigValidationInvalid.for_validation_errors( external_pipeline, validated_config.errors) try: pipeline_run = instance.launch_run(pipeline_run.run_id) except DagsterLaunchFailedError: error = serializable_error_info_from_exc_info(sys.exc_info()) instance.report_engine_event( error.message, pipeline_run, EngineEventData.engine_error(error), ) instance.report_run_failed(pipeline_run) return graphene_info.schema.type_named('LaunchPipelineRunSuccess')( run=graphene_info.schema.type_named('PipelineRun')(pipeline_run))
def _start_pipeline_execution_for_created_run(graphene_info, run_id): check.inst_param(graphene_info, 'graphene_info', ResolveInfo) instance = graphene_info.context.instance execution_manager_settings = instance.dagit_settings.get( 'execution_manager') if execution_manager_settings and execution_manager_settings.get( 'disabled'): return graphene_info.schema.type_named( 'StartPipelineRunDisabledError')() pipeline_run = instance.get_run_by_id(run_id) if not pipeline_run: return graphene_info.schema.type_named('PipelineRunNotFoundError')( run_id) pipeline_def = get_pipeline_def_from_selector(graphene_info, pipeline_run.selector) environment_schema = create_environment_schema(pipeline_def, pipeline_run.mode) validated_config = validate_config(environment_schema.environment_type, pipeline_run.environment_dict) if not validated_config.success: # If the config is invalid, we construct a DagsterInvalidConfigError exception and # insert it into the event log. We also return a PipelineConfigValidationInvalid user facing # graphql error. # We currently re-use the engine events machinery to add the error to the event log, but # may need to create a new event type and instance method to handle these erros. invalid_config_exception = DagsterInvalidConfigError( 'Error in config for pipeline {}'.format(pipeline_def.name), validated_config.errors, pipeline_run.environment_dict, ) instance.report_engine_event( str(invalid_config_exception.message), pipeline_run, EngineEventData.engine_error( SerializableErrorInfo( invalid_config_exception.message, [], DagsterInvalidConfigError.__class__.__name__, None, )), ) # TODO: also insert a pipeline init failure event # https://github.com/dagster-io/dagster/issues/2385 return DauphinPipelineConfigValidationInvalid.for_validation_errors( pipeline_def, validated_config.errors) create_execution_plan( pipeline_def, pipeline_run.environment_dict, mode=pipeline_run.mode, step_keys_to_execute=pipeline_run.step_keys_to_execute, ) graphene_info.context.execution_manager.execute_pipeline( graphene_info.context.get_handle(), pipeline_def, pipeline_run, instance=instance, ) return graphene_info.schema.type_named('StartPipelineRunSuccess')( run=graphene_info.schema.type_named('PipelineRun')(pipeline_run))