class DauphinQuery(dauphin.ObjectType): class Meta(object): name = 'Query' version = dauphin.NonNull(dauphin.String) reloadSupported = dauphin.NonNull(dauphin.Boolean) pipelineOrError = dauphin.Field( dauphin.NonNull('PipelineOrError'), params=dauphin.NonNull('ExecutionSelector')) pipeline = dauphin.Field(dauphin.NonNull('Pipeline'), params=dauphin.NonNull('ExecutionSelector')) pipelinesOrError = dauphin.NonNull('PipelinesOrError') pipelines = dauphin.Field(dauphin.NonNull('PipelineConnection')) pipelineSnapshot = dauphin.Field( dauphin.NonNull('PipelineSnapshot'), snapshotId=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) pipelineSnapshotOrError = dauphin.Field( dauphin.NonNull('PipelineSnapshotOrError'), snapshotId=dauphin.String(), activePipelineName=dauphin.String(), ) scheduler = dauphin.Field(dauphin.NonNull('SchedulerOrError')) scheduleOrError = dauphin.Field( dauphin.NonNull('ScheduleOrError'), schedule_name=dauphin.NonNull(dauphin.String), limit=dauphin.Int(), ) partitionSetsOrError = dauphin.Field( dauphin.NonNull('PartitionSetsOrError'), pipelineName=dauphin.String()) partitionSetOrError = dauphin.Field(dauphin.NonNull('PartitionSetOrError'), partitionSetName=dauphin.String()) pipelineRunsOrError = dauphin.Field( dauphin.NonNull('PipelineRunsOrError'), filter=dauphin.Argument('PipelineRunsFilter'), cursor=dauphin.String(), limit=dauphin.Int(), ) pipelineRunOrError = dauphin.Field(dauphin.NonNull('PipelineRunOrError'), runId=dauphin.NonNull(dauphin.ID)) pipelineRunTags = dauphin.non_null_list('PipelineTagAndValues') runGroupOrError = dauphin.Field(dauphin.NonNull('RunGroupOrError'), runId=dauphin.NonNull(dauphin.ID)) usedSolids = dauphin.Field(dauphin.non_null_list('UsedSolid')) usedSolid = dauphin.Field('UsedSolid', name=dauphin.NonNull(dauphin.String)) isPipelineConfigValid = dauphin.Field( dauphin.NonNull('PipelineConfigValidationResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'environmentConfigData': dauphin.Argument('EnvironmentConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) executionPlan = dauphin.Field( dauphin.NonNull('ExecutionPlanResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'environmentConfigData': dauphin.Argument('EnvironmentConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) environmentSchemaOrError = dauphin.Field( dauphin.NonNull('EnvironmentSchemaOrError'), args={ 'selector': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'mode': dauphin.Argument(dauphin.String), }, description= '''Fetch an environment schema given an execution selection and a mode. See the descripton on EnvironmentSchema for more information.''', ) instance = dauphin.NonNull('Instance') assetsOrError = dauphin.Field(dauphin.NonNull('AssetsOrError')) assetOrError = dauphin.Field(dauphin.NonNull('AssetOrError'), assetKey=dauphin.NonNull(dauphin.String)) def resolve_pipelineSnapshot(self, graphene_info, **kwargs): return get_pipeline_snapshot(graphene_info, kwargs['snapshotId']) def resolve_pipelineSnapshotOrError(self, graphene_info, **kwargs): snapshot_id_arg = kwargs.get('snapshotId') pipeline_name_arg = kwargs.get('activePipelineName') check.invariant( not (snapshot_id_arg and pipeline_name_arg), 'Cannot pass both snapshotId and activePipelineName', ) check.invariant(snapshot_id_arg or pipeline_name_arg, 'Must set one of snapshotId or activePipelineName') if pipeline_name_arg: return get_pipeline_snapshot_or_error_from_pipeline_name( graphene_info, pipeline_name_arg) else: return get_pipeline_snapshot_or_error_from_snapshot_id( graphene_info, snapshot_id_arg) def resolve_version(self, graphene_info): return graphene_info.context.version def resolve_reloadSupported(self, graphene_info): return graphene_info.context.is_reload_supported def resolve_scheduler(self, graphene_info): return get_scheduler_or_error(graphene_info) def resolve_scheduleOrError(self, graphene_info, schedule_name): return get_schedule_or_error(graphene_info, schedule_name) def resolve_pipelineOrError(self, graphene_info, **kwargs): return get_pipeline_or_error(graphene_info, kwargs['params'].to_selector()) def resolve_pipeline(self, graphene_info, **kwargs): return get_pipeline_or_raise(graphene_info, kwargs['params'].to_selector()) def resolve_pipelinesOrError(self, graphene_info): return get_pipelines_or_error(graphene_info) def resolve_pipelines(self, graphene_info): return get_pipelines_or_raise(graphene_info) def resolve_pipelineRunsOrError(self, graphene_info, **kwargs): filters = kwargs.get('filter') if filters is not None: filters = filters.to_selector() return graphene_info.schema.type_named('PipelineRuns')( results=get_runs(graphene_info, filters, kwargs.get('cursor'), kwargs.get('limit'))) def resolve_pipelineRunOrError(self, graphene_info, runId): return get_run_by_id(graphene_info, runId) def resolve_partitionSetsOrError(self, graphene_info, **kwargs): pipeline_name = kwargs.get('pipelineName') return get_partition_sets_or_error(graphene_info, pipeline_name) def resolve_partitionSetOrError(self, graphene_info, partitionSetName): return get_partition_set(graphene_info, partitionSetName) def resolve_pipelineRunTags(self, graphene_info): return get_run_tags(graphene_info) def resolve_runGroupOrError(self, graphene_info, runId): return get_run_group(graphene_info, runId) def resolve_usedSolid(self, graphene_info, name): return get_solid(graphene_info, name) def resolve_usedSolids(self, graphene_info): return get_solids(graphene_info) def resolve_isPipelineConfigValid(self, graphene_info, pipeline, **kwargs): return validate_pipeline_config( graphene_info, pipeline.to_selector(), kwargs.get('environmentConfigData'), kwargs.get('mode'), ) def resolve_executionPlan(self, graphene_info, pipeline, **kwargs): return get_execution_plan( graphene_info, pipeline.to_selector(), kwargs.get('environmentConfigData'), kwargs.get('mode'), ) def resolve_environmentSchemaOrError(self, graphene_info, **kwargs): return resolve_environment_schema_or_error( graphene_info, kwargs['selector'].to_selector(), kwargs.get('mode')) def resolve_instance(self, graphene_info): return graphene_info.schema.type_named('Instance')( graphene_info.context.instance) def resolve_assetsOrError(self, graphene_info): return get_assets(graphene_info) def resolve_assetOrError(self, graphene_info, assetKey): return get_asset(graphene_info, assetKey)
class DauphinPartition(dauphin.ObjectType): class Meta: name = "Partition" name = dauphin.NonNull(dauphin.String) partition_set_name = dauphin.NonNull(dauphin.String) solid_selection = dauphin.List(dauphin.NonNull(dauphin.String)) mode = dauphin.NonNull(dauphin.String) runConfigOrError = dauphin.NonNull("PartitionRunConfigOrError") tagsOrError = dauphin.NonNull("PartitionTagsOrError") runs = dauphin.Field( dauphin.non_null_list("PipelineRun"), filter=dauphin.Argument("PipelineRunsFilter"), cursor=dauphin.String(), limit=dauphin.Int(), ) status = dauphin.Field("PipelineRunStatus") def __init__(self, external_repository_handle, external_partition_set, partition_name): self._external_repository_handle = check.inst_param( external_repository_handle, "external_respository_handle", RepositoryHandle) self._external_partition_set = check.inst_param( external_partition_set, "external_partition_set", ExternalPartitionSet) self._partition_name = check.str_param(partition_name, "partition_name") super(DauphinPartition, self).__init__( name=partition_name, partition_set_name=external_partition_set.name, solid_selection=external_partition_set.solid_selection, mode=external_partition_set.mode, ) def resolve_runConfigOrError(self, graphene_info): return get_partition_config( graphene_info, self._external_repository_handle, self._external_partition_set.name, self._partition_name, ) def resolve_tagsOrError(self, graphene_info): return get_partition_tags( graphene_info, self._external_repository_handle, self._external_partition_set.name, self._partition_name, ) def resolve_runs(self, graphene_info, **kwargs): filters = kwargs.get("filter") partition_tags = { PARTITION_SET_TAG: self._external_partition_set.name, PARTITION_NAME_TAG: self._partition_name, } if filters is not None: filters = filters.to_selector() runs_filter = PipelineRunsFilter( run_ids=filters.run_ids, pipeline_name=filters.pipeline_name, statuses=filters.statuses, tags=merge_dicts(filters.tags, partition_tags), ) else: runs_filter = PipelineRunsFilter(tags=partition_tags) return get_runs(graphene_info, runs_filter, cursor=kwargs.get("cursor"), limit=kwargs.get("limit"))
class DauphinScheduler(dauphin.ObjectType): class Meta: name = 'Scheduler' runningSchedules = dauphin.non_null_list('RunningSchedule')
class DauphinAssetKeyInput(dauphin.InputObjectType): class Meta(object): name = "AssetKeyInput" path = dauphin.non_null_list(dauphin.String)
class DauphinRunConfigSchema(dauphin.ObjectType): def __init__(self, represented_pipeline, mode): self._represented_pipeline = check.inst_param(represented_pipeline, "represented_pipeline", RepresentedPipeline) self._mode = check.str_param(mode, "mode") class Meta(object): name = "RunConfigSchema" description = """The run config schema represents the all the config type information given a certain execution selection and mode of execution of that selection. All config interactions (e.g. checking config validity, fetching all config types, fetching in a particular config type) should be done through this type """ rootConfigType = dauphin.Field( dauphin.NonNull("ConfigType"), description= """Fetch the root environment type. Concretely this is the type that is in scope at the root of configuration document for a particular execution selection. It is the type that is in scope initially with a blank config editor.""", ) allConfigTypes = dauphin.Field( dauphin.non_null_list("ConfigType"), description= """Fetch all the named config types that are in the schema. Useful for things like a type browser UI, or for fetching all the types are in the scope of a document so that the index can be built for the autocompleting editor. """, ) isRunConfigValid = dauphin.Field( dauphin.NonNull("PipelineConfigValidationResult"), args={"runConfigData": dauphin.Argument("RunConfigData")}, description= """Parse a particular environment config result. The return value either indicates that the validation succeeded by returning `PipelineConfigValidationValid` or that there are configuration errors by returning `PipelineConfigValidationInvalid' which containers a list errors so that can be rendered for the user""", ) def resolve_allConfigTypes(self, _graphene_info): return sorted( list( map( lambda key: to_dauphin_config_type( self._represented_pipeline.config_schema_snapshot, key ), self._represented_pipeline.config_schema_snapshot. all_config_keys, )), key=lambda ct: ct.key, ) def resolve_rootConfigType(self, _graphene_info): return to_dauphin_config_type( self._represented_pipeline.config_schema_snapshot, self._represented_pipeline.get_mode_def_snap( self._mode).root_config_key, ) def resolve_isRunConfigValid(self, graphene_info, **kwargs): return resolve_is_run_config_valid( graphene_info, self._represented_pipeline, self._mode, kwargs.get("runConfigData", {}), )
class DauphinPipeline(dauphin.ObjectType): class Meta: name = 'Pipeline' interfaces = (DauphinSolidContainer, DauphinPipelineReference) name = dauphin.NonNull(dauphin.String) description = dauphin.String() solids = dauphin.non_null_list('Solid') runtime_types = dauphin.non_null_list('RuntimeType') runs = dauphin.non_null_list('PipelineRun') modes = dauphin.non_null_list('Mode') solid_handles = dauphin.non_null_list('SolidHandle') presets = dauphin.non_null_list('PipelinePreset') def __init__(self, pipeline): super(DauphinPipeline, self).__init__(name=pipeline.name, description=pipeline.description) self._pipeline = check.inst_param(pipeline, 'pipeline', PipelineDefinition) def resolve_solids(self, _graphene_info): return build_dauphin_solids(self._pipeline) def resolve_runtime_types(self, _graphene_info): return sorted( list( map( to_dauphin_runtime_type, [t for t in self._pipeline.all_runtime_types() if t.name], )), key=lambda config_type: config_type.name, ) def resolve_runs(self, graphene_info): return [ graphene_info.schema.type_named('PipelineRun')(r) for r in graphene_info.context.instance. get_runs_with_pipeline_name(self._pipeline.name) ] def get_dagster_pipeline(self): return self._pipeline def get_type(self, _graphene_info, typeName): if self._pipeline.has_config_type(typeName): return to_dauphin_config_type( self._pipeline.config_type_named(typeName)) elif self._pipeline.has_runtime_type(typeName): return to_dauphin_runtime_type( self._pipeline.runtime_type_named(typeName)) else: check.failed('Not a config type or runtime type') def resolve_modes(self, graphene_info): return [ graphene_info.schema.type_named('Mode')(mode_definition) for mode_definition in sorted(self._pipeline.mode_definitions, key=lambda item: item.name) ] def resolve_solid_handles(self, _graphene_info): return sorted(build_dauphin_solid_handles(self._pipeline), key=lambda item: str(item.handleID)) def resolve_presets(self, _graphene_info): return [ DauphinPipelinePreset(preset, self._pipeline.name) for preset in sorted(self._pipeline.get_presets(), key=lambda item: item.name) ]
class DauphinPipelineRunLogsSubscriptionSuccess(dauphin.ObjectType): class Meta(object): name = "PipelineRunLogsSubscriptionSuccess" run = dauphin.NonNull("PipelineRun") messages = dauphin.non_null_list("PipelineRunEvent")
class DauphinSolidContainer(dauphin.Interface): class Meta(object): name = 'SolidContainer' solids = dauphin.non_null_list('Solid')
class DauphinScheduleDefinitions(dauphin.ObjectType): class Meta(object): name = "ScheduleDefinitions" results = dauphin.non_null_list("ScheduleDefinition")
class DauphinEnvironmentSchema(dauphin.ObjectType): def __init__(self, environment_schema, dagster_pipeline): from dagster.core.definitions.environment_schema import EnvironmentSchema from dagster.core.definitions.pipeline import PipelineDefinition self._environment_schema = check.inst_param( environment_schema, 'environment_schema', EnvironmentSchema ) self._dagster_pipeline = check.inst_param( dagster_pipeline, 'dagster_pipeline', PipelineDefinition ) class Meta: name = 'EnvironmentSchema' description = '''The environment schema represents the all the config type information given a certain execution selection and mode of execution of that selection. All config interactions (e.g. checking config validity, fetching all config types, fetching in a particular config type) should be done through this type ''' rootEnvironmentType = dauphin.Field( dauphin.NonNull('ConfigType'), description='''Fetch the root environment type. Concretely this is the type that is in scope at the root of configuration document for a particular execution selection. It is the type that is in scope initially with a blank config editor.''', ) allConfigTypes = dauphin.Field( dauphin.non_null_list('ConfigType'), description='''Fetch all the named config types that are in the schema. Useful for things like a type browser UI, or for fetching all the types are in the scope of a document so that the index can be built for the autocompleting editor. ''', ) configTypeOrError = dauphin.Field( dauphin.NonNull('ConfigTypeOrError'), configTypeName=dauphin.Argument(dauphin.NonNull(dauphin.String)), description='''Fetch a particular config type''', ) isEnvironmentConfigValid = dauphin.Field( dauphin.NonNull('PipelineConfigValidationResult'), args={'environmentConfigData': dauphin.Argument('EnvironmentConfigData')}, description='''Parse a particular environment config result. The return value either indicates that the validation succeeded by returning `PipelineConfigValidationValid` or that there are configuration errors by returning `PipelineConfigValidationInvalid' which containers a list errors so that can be rendered for the user''', ) def resolve_allConfigTypes(self, _graphene_info): return sorted( list(map(to_dauphin_config_type, self._environment_schema.all_config_types())), key=lambda ct: ct.name if ct.name else '', ) def resolve_rootEnvironmentType(self, _graphene_info): return to_dauphin_config_type(self._environment_schema.environment_type) def resolve_configTypeOrError(self, graphene_info, **kwargs): return resolve_config_type_or_error( graphene_info, self._environment_schema, self._dagster_pipeline, kwargs['configTypeName'], ) def resolve_isEnvironmentConfigValid(self, graphene_info, **kwargs): return resolve_is_environment_config_valid( graphene_info, self._environment_schema, self._dagster_pipeline, kwargs.get('environmentConfigData'), )
class DauphinQuery(dauphin.ObjectType): class Meta: name = 'Query' version = dauphin.NonNull(dauphin.String) pipelineOrError = dauphin.Field( dauphin.NonNull('PipelineOrError'), params=dauphin.NonNull('ExecutionSelector') ) pipeline = dauphin.Field( dauphin.NonNull('Pipeline'), params=dauphin.NonNull('ExecutionSelector') ) pipelinesOrError = dauphin.NonNull('PipelinesOrError') pipelines = dauphin.Field(dauphin.NonNull('PipelineConnection')) configTypeOrError = dauphin.Field( dauphin.NonNull('ConfigTypeOrError'), pipelineName=dauphin.Argument(dauphin.NonNull(dauphin.String)), configTypeName=dauphin.Argument(dauphin.NonNull(dauphin.String)), mode=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) runtimeTypeOrError = dauphin.Field( dauphin.NonNull('RuntimeTypeOrError'), pipelineName=dauphin.Argument(dauphin.NonNull(dauphin.String)), runtimeTypeName=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) pipelineRuns = dauphin.non_null_list('PipelineRun') pipelineRunOrError = dauphin.Field( dauphin.NonNull('PipelineRunOrError'), runId=dauphin.NonNull(dauphin.ID) ) isPipelineConfigValid = dauphin.Field( dauphin.NonNull('PipelineConfigValidationResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'environmentConfigData': dauphin.Argument('EnvironmentConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) executionPlan = dauphin.Field( dauphin.NonNull('ExecutionPlanResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'environmentConfigData': dauphin.Argument('EnvironmentConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) environmentSchemaOrError = dauphin.Field( dauphin.NonNull('EnvironmentSchemaOrError'), args={ 'selector': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, description='''Fetch an environment schema given an execution selection and a mode. See the descripton on EnvironmentSchema for more information.''', ) def resolve_configTypeOrError(self, graphene_info, **kwargs): return get_config_type( graphene_info, kwargs['pipelineName'], kwargs['configTypeName'], kwargs.get('mode') ) def resolve_runtimeTypeOrError(self, graphene_info, **kwargs): return get_runtime_type(graphene_info, kwargs['pipelineName'], kwargs['runtimeTypeName']) def resolve_version(self, graphene_info): return graphene_info.context.version def resolve_pipelineOrError(self, graphene_info, **kwargs): return get_pipeline_or_error(graphene_info, kwargs['params'].to_selector()) def resolve_pipeline(self, graphene_info, **kwargs): return get_pipeline_or_raise(graphene_info, kwargs['params'].to_selector()) def resolve_pipelinesOrError(self, graphene_info): return get_pipelines_or_error(graphene_info) def resolve_pipelines(self, graphene_info): return get_pipelines_or_raise(graphene_info) def resolve_pipelineRuns(self, graphene_info): return get_runs(graphene_info) def resolve_pipelineRunOrError(self, graphene_info, runId): return get_run(graphene_info, runId) def resolve_isPipelineConfigValid(self, graphene_info, pipeline, **kwargs): return validate_pipeline_config( graphene_info, pipeline.to_selector(), kwargs.get('environmentConfigData'), kwargs.get('mode'), ) def resolve_executionPlan(self, graphene_info, pipeline, **kwargs): return get_execution_plan( graphene_info, pipeline.to_selector(), kwargs.get('environmentConfigData'), kwargs.get('mode'), ) def resolve_environmentSchemaOrError(self, graphene_info, **kwargs): return resolve_environment_schema_or_error( graphene_info, kwargs['selector'].to_selector(), kwargs['mode'] )
class DauphinPipeline(dauphin.ObjectType): class Meta: name = 'Pipeline' name = dauphin.NonNull(dauphin.String) description = dauphin.String() solids = dauphin.non_null_list('Solid') contexts = dauphin.non_null_list('PipelineContext') environment_type = dauphin.NonNull('ConfigType') config_types = dauphin.non_null_list('ConfigType') runtime_types = dauphin.non_null_list('RuntimeType') runs = dauphin.non_null_list('PipelineRun') def __init__(self, pipeline): super(DauphinPipeline, self).__init__(name=pipeline.name, description=pipeline.description) self._pipeline = check.inst_param(pipeline, 'pipeline', PipelineDefinition) def resolve_solids(self, graphene_info): return [ graphene_info.schema.type_named('Solid')( solid, self._pipeline.dependency_structure.deps_of_solid_with_input( solid.name), self._pipeline.dependency_structure.depended_by_of_solid( solid.name), ) for solid in self._pipeline.solids ] def resolve_contexts(self, graphene_info): return [ graphene_info.schema.type_named('PipelineContext')(name=name, context=context) for name, context in self._pipeline.context_definitions.items() ] def resolve_environment_type(self, _graphene_info): return to_dauphin_config_type(self._pipeline.environment_type) def resolve_config_types(self, _graphene_info): return sorted( list(map(to_dauphin_config_type, self._pipeline.all_config_types())), key=lambda config_type: config_type.key, ) def resolve_runtime_types(self, _graphene_info): return sorted( list( map( to_dauphin_runtime_type, [t for t in self._pipeline.all_runtime_types() if t.name], )), key=lambda config_type: config_type.name, ) def resolve_runs(self, graphene_info): return [ graphene_info.schema.type_named('PipelineRun')(r) for r in graphene_info.context.pipeline_runs.all_runs_for_pipeline( self._pipeline.name) ] def get_dagster_pipeline(self): return self._pipeline def get_type(self, _graphene_info, typeName): if self._pipeline.has_config_type(typeName): return to_dauphin_config_type( self._pipeline.config_type_named(typeName)) elif self._pipeline.has_runtime_type(typeName): return to_dauphin_runtime_type( self._pipeline.runtime_type_named(typeName)) else: check.failed('Not a config type or runtime type')
class DauphinQuery(dauphin.ObjectType): class Meta(object): name = 'Query' version = dauphin.NonNull(dauphin.String) repositoriesOrError = dauphin.NonNull('RepositoriesOrError') repositoryOrError = dauphin.Field( dauphin.NonNull('RepositoryOrError'), repositorySelector=dauphin.NonNull('RepositorySelector'), ) pipelineOrError = dauphin.Field(dauphin.NonNull('PipelineOrError'), params=dauphin.NonNull('PipelineSelector')) pipelineSnapshotOrError = dauphin.Field( dauphin.NonNull('PipelineSnapshotOrError'), snapshotId=dauphin.String(), activePipelineSelector=dauphin.Argument('PipelineSelector'), ) scheduler = dauphin.Field(dauphin.NonNull('SchedulerOrError')) scheduleDefinitionOrError = dauphin.Field( dauphin.NonNull('ScheduleDefinitionOrError'), schedule_selector=dauphin.NonNull('ScheduleSelector'), ) scheduleDefinitionsOrError = dauphin.Field( dauphin.NonNull('ScheduleDefinitionsOrError'), repositorySelector=dauphin.NonNull('RepositorySelector'), ) scheduleStatesOrError = dauphin.Field( dauphin.NonNull('ScheduleStatesOrError'), repositorySelector=dauphin.NonNull('RepositorySelector'), withNoScheduleDefinition=dauphin.Boolean(), ) partitionSetsOrError = dauphin.Field( dauphin.NonNull('PartitionSetsOrError'), repositorySelector=dauphin.NonNull('RepositorySelector'), pipelineName=dauphin.NonNull(dauphin.String), ) partitionSetOrError = dauphin.Field( dauphin.NonNull('PartitionSetOrError'), repositorySelector=dauphin.NonNull('RepositorySelector'), partitionSetName=dauphin.String(), ) pipelineRunsOrError = dauphin.Field( dauphin.NonNull('PipelineRunsOrError'), filter=dauphin.Argument('PipelineRunsFilter'), cursor=dauphin.String(), limit=dauphin.Int(), ) pipelineRunOrError = dauphin.Field(dauphin.NonNull('PipelineRunOrError'), runId=dauphin.NonNull(dauphin.ID)) pipelineRunTags = dauphin.non_null_list('PipelineTagAndValues') runGroupOrError = dauphin.Field(dauphin.NonNull('RunGroupOrError'), runId=dauphin.NonNull(dauphin.ID)) runGroupsOrError = dauphin.Field( dauphin.NonNull('RunGroupsOrError'), filter=dauphin.Argument('PipelineRunsFilter'), cursor=dauphin.String(), limit=dauphin.Int(), ) isPipelineConfigValid = dauphin.Field( dauphin.NonNull('PipelineConfigValidationResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('PipelineSelector')), 'runConfigData': dauphin.Argument('RunConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) executionPlanOrError = dauphin.Field( dauphin.NonNull('ExecutionPlanOrError'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('PipelineSelector')), 'runConfigData': dauphin.Argument('RunConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) runConfigSchemaOrError = dauphin.Field( dauphin.NonNull('RunConfigSchemaOrError'), args={ 'selector': dauphin.Argument(dauphin.NonNull('PipelineSelector')), 'mode': dauphin.Argument(dauphin.String), }, description= '''Fetch an environment schema given an execution selection and a mode. See the descripton on RunConfigSchema for more information.''', ) instance = dauphin.NonNull('Instance') assetsOrError = dauphin.Field(dauphin.NonNull('AssetsOrError')) assetOrError = dauphin.Field( dauphin.NonNull('AssetOrError'), assetKey=dauphin.Argument(dauphin.NonNull('AssetKeyInput')), ) def resolve_repositoriesOrError(self, graphene_info): return fetch_repositories(graphene_info) def resolve_repositoryOrError(self, graphene_info, **kwargs): return fetch_repository( graphene_info, RepositorySelector.from_graphql_input( kwargs.get('repositorySelector')), ) def resolve_pipelineSnapshotOrError(self, graphene_info, **kwargs): snapshot_id_arg = kwargs.get('snapshotId') pipeline_selector_arg = kwargs.get('activePipelineSelector') check.invariant( not (snapshot_id_arg and pipeline_selector_arg), 'Must only pass one of snapshotId or activePipelineSelector', ) check.invariant( snapshot_id_arg or pipeline_selector_arg, 'Must set one of snapshotId or activePipelineSelector', ) if pipeline_selector_arg: pipeline_selector = pipeline_selector_from_graphql( graphene_info.context, kwargs['activePipelineSelector']) return get_pipeline_snapshot_or_error_from_pipeline_selector( graphene_info, pipeline_selector) else: return get_pipeline_snapshot_or_error_from_snapshot_id( graphene_info, snapshot_id_arg) def resolve_version(self, graphene_info): return graphene_info.context.version def resolve_scheduler(self, graphene_info): return get_scheduler_or_error(graphene_info) def resolve_scheduleDefinitionOrError(self, graphene_info, schedule_selector): return get_schedule_definition_or_error( graphene_info, ScheduleSelector.from_graphql_input(schedule_selector)) def resolve_scheduleDefinitionsOrError(self, graphene_info, **kwargs): return get_schedule_definitions_or_error( graphene_info, RepositorySelector.from_graphql_input( kwargs.get('repositorySelector'))) def resolve_scheduleStatesOrError(self, graphene_info, **kwargs): return get_schedule_states_or_error( graphene_info, RepositorySelector.from_graphql_input( kwargs.get('repositorySelector')), kwargs.get('withNoScheduleDefinition'), ) def resolve_pipelineOrError(self, graphene_info, **kwargs): return get_pipeline_or_error( graphene_info, pipeline_selector_from_graphql(graphene_info.context, kwargs['params']), ) def resolve_pipelineRunsOrError(self, graphene_info, **kwargs): filters = kwargs.get('filter') if filters is not None: filters = filters.to_selector() return graphene_info.schema.type_named('PipelineRuns')( results=get_runs(graphene_info, filters, kwargs.get('cursor'), kwargs.get('limit'))) def resolve_pipelineRunOrError(self, graphene_info, runId): return get_run_by_id(graphene_info, runId) def resolve_runGroupsOrError(self, graphene_info, **kwargs): filters = kwargs.get('filter') if filters is not None: filters = filters.to_selector() return graphene_info.schema.type_named('RunGroupsOrError')( results=get_run_groups(graphene_info, filters, kwargs.get( 'cursor'), kwargs.get('limit'))) def resolve_partitionSetsOrError(self, graphene_info, **kwargs): return get_partition_sets_or_error( graphene_info, RepositorySelector.from_graphql_input( kwargs.get('repositorySelector')), kwargs.get('pipelineName'), ) def resolve_partitionSetOrError(self, graphene_info, **kwargs): return get_partition_set( graphene_info, RepositorySelector.from_graphql_input( kwargs.get('repositorySelector')), kwargs.get('partitionSetName'), ) def resolve_pipelineRunTags(self, graphene_info): return get_run_tags(graphene_info) def resolve_runGroupOrError(self, graphene_info, runId): return get_run_group(graphene_info, runId) def resolve_isPipelineConfigValid(self, graphene_info, pipeline, **kwargs): return validate_pipeline_config( graphene_info, pipeline_selector_from_graphql(graphene_info.context, pipeline), kwargs.get('runConfigData'), kwargs.get('mode'), ) def resolve_executionPlanOrError(self, graphene_info, pipeline, **kwargs): return get_execution_plan( graphene_info, pipeline_selector_from_graphql(graphene_info.context, pipeline), kwargs.get('runConfigData'), kwargs.get('mode'), ) def resolve_runConfigSchemaOrError(self, graphene_info, **kwargs): return resolve_run_config_schema_or_error( graphene_info, pipeline_selector_from_graphql(graphene_info.context, kwargs['selector']), kwargs.get('mode'), ) def resolve_instance(self, graphene_info): return graphene_info.schema.type_named('Instance')( graphene_info.context.instance) def resolve_assetsOrError(self, graphene_info): return get_assets(graphene_info) def resolve_assetOrError(self, graphene_info, **kwargs): return get_asset(graphene_info, AssetKey.from_graphql_input(kwargs['assetKey']))
class DauphinPipelineRun(dauphin.ObjectType): class Meta(object): name = 'PipelineRun' runId = dauphin.NonNull(dauphin.String) # Nullable because of historical runs pipelineSnapshotId = dauphin.String() status = dauphin.NonNull('PipelineRunStatus') pipeline = dauphin.NonNull('PipelineReference') stats = dauphin.NonNull('PipelineRunStatsOrError') stepStats = dauphin.non_null_list('PipelineRunStepStats') computeLogs = dauphin.Field( dauphin.NonNull('ComputeLogs'), stepKey=dauphin.Argument(dauphin.NonNull(dauphin.String)), description=''' Compute logs are the stdout/stderr logs for a given solid step computation ''', ) executionPlan = dauphin.Field('ExecutionPlan') stepKeysToExecute = dauphin.List(dauphin.NonNull(dauphin.String)) environmentConfigYaml = dauphin.NonNull(dauphin.String) mode = dauphin.NonNull(dauphin.String) tags = dauphin.non_null_list('PipelineTag') rootRunId = dauphin.Field(dauphin.String) parentRunId = dauphin.Field(dauphin.String) canCancel = dauphin.NonNull(dauphin.Boolean) executionSelection = dauphin.NonNull('ExecutionSelection') def __init__(self, pipeline_run): super(DauphinPipelineRun, self).__init__( runId=pipeline_run.run_id, status=pipeline_run.status, mode=pipeline_run.mode ) self._pipeline_run = check.inst_param(pipeline_run, 'pipeline_run', PipelineRun) def resolve_pipeline(self, graphene_info): return get_pipeline_reference_or_raise(graphene_info, self._pipeline_run.selector) def resolve_pipelineSnapshotId(self, _): return self._pipeline_run.pipeline_snapshot_id def resolve_stats(self, graphene_info): return get_stats(graphene_info, self.run_id) def resolve_stepStats(self, graphene_info): return get_step_stats(graphene_info, self.run_id) def resolve_computeLogs(self, graphene_info, stepKey): return graphene_info.schema.type_named('ComputeLogs')(runId=self.run_id, stepKey=stepKey) def resolve_executionPlan(self, graphene_info): if not ( self._pipeline_run.execution_plan_snapshot_id and self._pipeline_run.pipeline_snapshot_id ): return None from .execution import DauphinExecutionPlan instance = graphene_info.context.instance pipeline_snapshot = instance.get_pipeline_snapshot(self._pipeline_run.pipeline_snapshot_id) execution_plan_snapshot = instance.get_execution_plan_snapshot( self._pipeline_run.execution_plan_snapshot_id ) return ( DauphinExecutionPlan( ExecutionPlanIndex( execution_plan_snapshot=execution_plan_snapshot, pipeline_index=PipelineIndex(pipeline_snapshot), ) ) if execution_plan_snapshot and pipeline_snapshot else None ) def resolve_stepKeysToExecute(self, _): return self._pipeline_run.step_keys_to_execute def resolve_environmentConfigYaml(self, _graphene_info): return yaml.dump(self._pipeline_run.environment_dict, default_flow_style=False) def resolve_tags(self, graphene_info): return [ graphene_info.schema.type_named('PipelineTag')(key=key, value=value) for key, value in self._pipeline_run.tags.items() ] def resolve_rootRunId(self, _): return self._pipeline_run.root_run_id def resolve_parentRunId(self, _): return self._pipeline_run.parent_run_id @property def run_id(self): return self.runId def resolve_canCancel(self, graphene_info): return graphene_info.context.execution_manager.can_terminate(self.run_id) def resolve_executionSelection(self, graphene_info): return graphene_info.schema.type_named('ExecutionSelection')(self._pipeline_run.selector)
class DauphinPartitions(dauphin.ObjectType): class Meta(object): name = 'Partitions' results = dauphin.non_null_list('Partition')
class DauphinPipelineConfigValidationError(dauphin.Interface): class Meta(object): name = "PipelineConfigValidationError" message = dauphin.NonNull(dauphin.String) path = dauphin.non_null_list(dauphin.String) stack = dauphin.NonNull("EvaluationStack") reason = dauphin.NonNull("EvaluationErrorReason") @staticmethod def from_dagster_error(config_schema_snapshot, error): check.inst_param(config_schema_snapshot, "config_schema_snapshot", ConfigSchemaSnapshot) check.inst_param(error, "error", EvaluationError) if isinstance(error.error_data, RuntimeMismatchErrorData): return DauphinRuntimeMismatchConfigError( message=error.message, path=[], # TODO: remove stack=DauphinEvaluationStack(config_schema_snapshot, error.stack), reason=error.reason, value_rep=error.error_data.value_rep, ) elif isinstance(error.error_data, MissingFieldErrorData): return DauphinMissingFieldConfigError( message=error.message, path=[], # TODO: remove stack=DauphinEvaluationStack(config_schema_snapshot, error.stack), reason=error.reason, field=DauphinConfigTypeField( config_schema_snapshot=config_schema_snapshot, field_snap=error.error_data.field_snap, ), ) elif isinstance(error.error_data, MissingFieldsErrorData): return DauphinMissingFieldsConfigError( message=error.message, path=[], # TODO: remove stack=DauphinEvaluationStack(config_schema_snapshot, error.stack), reason=error.reason, fields=[ DauphinConfigTypeField( config_schema_snapshot=config_schema_snapshot, field_snap=field_snap, ) for field_snap in error.error_data.field_snaps ], ) elif isinstance(error.error_data, FieldNotDefinedErrorData): return DauphinFieldNotDefinedConfigError( message=error.message, path=[], # TODO: remove stack=DauphinEvaluationStack(config_schema_snapshot, error.stack), reason=error.reason, field_name=error.error_data.field_name, ) elif isinstance(error.error_data, FieldsNotDefinedErrorData): return DauphinFieldsNotDefinedConfigError( message=error.message, path=[], # TODO: remove stack=DauphinEvaluationStack(config_schema_snapshot, error.stack), reason=error.reason, field_names=error.error_data.field_names, ) elif isinstance(error.error_data, SelectorTypeErrorData): return DauphinSelectorTypeConfigError( message=error.message, path=[], # TODO: remove stack=DauphinEvaluationStack(config_schema_snapshot, error.stack), reason=error.reason, incoming_fields=error.error_data.incoming_fields, ) else: check.failed("Error type not supported {error_data}".format( error_data=repr(error.error_data)))
class DauphinPipelineConnection(dauphin.ObjectType): class Meta: name = 'PipelineConnection' nodes = dauphin.non_null_list('Pipeline')
class DauphinMissingFieldsConfigError(dauphin.ObjectType): class Meta(object): name = "MissingFieldsConfigError" interfaces = (DauphinPipelineConfigValidationError, ) fields = dauphin.non_null_list("ConfigTypeField")
class DauphinAssetConnection(dauphin.ObjectType): class Meta(object): name = "AssetConnection" nodes = dauphin.non_null_list("Asset")
class DauphinFieldsNotDefinedConfigError(dauphin.ObjectType): class Meta(object): name = "FieldsNotDefinedConfigError" interfaces = (DauphinPipelineConfigValidationError, ) field_names = dauphin.non_null_list(dauphin.String)
class DauphinPipelineRun(dauphin.ObjectType): class Meta(object): name = "PipelineRun" runId = dauphin.NonNull(dauphin.String) # Nullable because of historical runs pipelineSnapshotId = dauphin.String() status = dauphin.NonNull("PipelineRunStatus") pipeline = dauphin.NonNull("PipelineReference") pipelineName = dauphin.NonNull(dauphin.String) solidSelection = dauphin.List(dauphin.NonNull(dauphin.String)) stats = dauphin.NonNull("PipelineRunStatsOrError") stepStats = dauphin.non_null_list("PipelineRunStepStats") computeLogs = dauphin.Field( dauphin.NonNull("ComputeLogs"), stepKey=dauphin.Argument(dauphin.NonNull(dauphin.String)), description=""" Compute logs are the stdout/stderr logs for a given solid step computation """, ) executionPlan = dauphin.Field("ExecutionPlan") stepKeysToExecute = dauphin.List(dauphin.NonNull(dauphin.String)) runConfigYaml = dauphin.NonNull(dauphin.String) mode = dauphin.NonNull(dauphin.String) tags = dauphin.non_null_list("PipelineTag") rootRunId = dauphin.Field(dauphin.String) parentRunId = dauphin.Field(dauphin.String) canTerminate = dauphin.NonNull(dauphin.Boolean) assets = dauphin.non_null_list("Asset") def __init__(self, pipeline_run): super(DauphinPipelineRun, self).__init__(runId=pipeline_run.run_id, status=pipeline_run.status, mode=pipeline_run.mode) self._pipeline_run = check.inst_param(pipeline_run, "pipeline_run", PipelineRun) def resolve_pipeline(self, graphene_info): return get_pipeline_reference_or_raise( graphene_info, self._pipeline_run, ) def resolve_pipelineName(self, _graphene_info): return self._pipeline_run.pipeline_name def resolve_solidSelection(self, _graphene_info): return self._pipeline_run.solid_selection def resolve_pipelineSnapshotId(self, _): return self._pipeline_run.pipeline_snapshot_id def resolve_stats(self, graphene_info): return get_stats(graphene_info, self.run_id) def resolve_stepStats(self, graphene_info): return get_step_stats(graphene_info, self.run_id) def resolve_computeLogs(self, graphene_info, stepKey): return graphene_info.schema.type_named("ComputeLogs")( runId=self.run_id, stepKey=stepKey) def resolve_executionPlan(self, graphene_info): if not (self._pipeline_run.execution_plan_snapshot_id and self._pipeline_run.pipeline_snapshot_id): return None from .execution import DauphinExecutionPlan instance = graphene_info.context.instance historical_pipeline = instance.get_historical_pipeline( self._pipeline_run.pipeline_snapshot_id) execution_plan_snapshot = instance.get_execution_plan_snapshot( self._pipeline_run.execution_plan_snapshot_id) return (DauphinExecutionPlan( ExternalExecutionPlan( execution_plan_snapshot=execution_plan_snapshot, represented_pipeline=historical_pipeline, )) if execution_plan_snapshot and historical_pipeline else None) def resolve_stepKeysToExecute(self, _): return self._pipeline_run.step_keys_to_execute def resolve_runConfigYaml(self, _graphene_info): return yaml.dump(self._pipeline_run.run_config, default_flow_style=False) def resolve_tags(self, graphene_info): return [ graphene_info.schema.type_named("PipelineTag")(key=key, value=value) for key, value in self._pipeline_run.tags.items() if get_tag_type(key) != TagType.HIDDEN ] def resolve_rootRunId(self, _): return self._pipeline_run.root_run_id def resolve_parentRunId(self, _): return self._pipeline_run.parent_run_id @property def run_id(self): return self.runId def resolve_canTerminate(self, graphene_info): return graphene_info.context.instance.run_launcher.can_terminate( self.run_id) def resolve_assets(self, graphene_info): return get_assets_for_run_id(graphene_info, self.run_id)
class DauphinSelectorTypeConfigError(dauphin.ObjectType): class Meta(object): name = "SelectorTypeConfigError" interfaces = (DauphinPipelineConfigValidationError, ) incoming_fields = dauphin.non_null_list(dauphin.String)
class DauphinQuery(dauphin.ObjectType): class Meta(object): name = "Query" version = dauphin.NonNull(dauphin.String) repositoriesOrError = dauphin.NonNull("RepositoriesOrError") repositoryOrError = dauphin.Field( dauphin.NonNull("RepositoryOrError"), repositorySelector=dauphin.NonNull("RepositorySelector"), ) pipelineOrError = dauphin.Field(dauphin.NonNull("PipelineOrError"), params=dauphin.NonNull("PipelineSelector")) pipelineSnapshotOrError = dauphin.Field( dauphin.NonNull("PipelineSnapshotOrError"), snapshotId=dauphin.String(), activePipelineSelector=dauphin.Argument("PipelineSelector"), ) scheduler = dauphin.Field(dauphin.NonNull("SchedulerOrError")) scheduleDefinitionOrError = dauphin.Field( dauphin.NonNull("ScheduleDefinitionOrError"), schedule_selector=dauphin.NonNull("ScheduleSelector"), ) scheduleDefinitionsOrError = dauphin.Field( dauphin.NonNull("ScheduleDefinitionsOrError"), repositorySelector=dauphin.NonNull("RepositorySelector"), ) scheduleStatesOrError = dauphin.Field( dauphin.NonNull("ScheduleStatesOrError"), repositorySelector=dauphin.Argument("RepositorySelector"), withNoScheduleDefinition=dauphin.Boolean(), ) partitionSetsOrError = dauphin.Field( dauphin.NonNull("PartitionSetsOrError"), repositorySelector=dauphin.NonNull("RepositorySelector"), pipelineName=dauphin.NonNull(dauphin.String), ) partitionSetOrError = dauphin.Field( dauphin.NonNull("PartitionSetOrError"), repositorySelector=dauphin.NonNull("RepositorySelector"), partitionSetName=dauphin.String(), ) pipelineRunsOrError = dauphin.Field( dauphin.NonNull("PipelineRunsOrError"), filter=dauphin.Argument("PipelineRunsFilter"), cursor=dauphin.String(), limit=dauphin.Int(), ) pipelineRunOrError = dauphin.Field(dauphin.NonNull("PipelineRunOrError"), runId=dauphin.NonNull(dauphin.ID)) pipelineRunTags = dauphin.non_null_list("PipelineTagAndValues") runGroupOrError = dauphin.Field(dauphin.NonNull("RunGroupOrError"), runId=dauphin.NonNull(dauphin.ID)) runGroupsOrError = dauphin.Field( dauphin.NonNull("RunGroupsOrError"), filter=dauphin.Argument("PipelineRunsFilter"), cursor=dauphin.String(), limit=dauphin.Int(), ) isPipelineConfigValid = dauphin.Field( dauphin.NonNull("PipelineConfigValidationResult"), args={ "pipeline": dauphin.Argument(dauphin.NonNull("PipelineSelector")), "runConfigData": dauphin.Argument("RunConfigData"), "mode": dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) executionPlanOrError = dauphin.Field( dauphin.NonNull("ExecutionPlanOrError"), args={ "pipeline": dauphin.Argument(dauphin.NonNull("PipelineSelector")), "runConfigData": dauphin.Argument("RunConfigData"), "mode": dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) runConfigSchemaOrError = dauphin.Field( dauphin.NonNull("RunConfigSchemaOrError"), args={ "selector": dauphin.Argument(dauphin.NonNull("PipelineSelector")), "mode": dauphin.Argument(dauphin.String), }, description= """Fetch an environment schema given an execution selection and a mode. See the descripton on RunConfigSchema for more information.""", ) instance = dauphin.NonNull("Instance") assetsOrError = dauphin.Field(dauphin.NonNull("AssetsOrError")) assetOrError = dauphin.Field( dauphin.NonNull("AssetOrError"), assetKey=dauphin.Argument(dauphin.NonNull("AssetKeyInput")), ) def resolve_repositoriesOrError(self, graphene_info): return fetch_repositories(graphene_info) def resolve_repositoryOrError(self, graphene_info, **kwargs): return fetch_repository( graphene_info, RepositorySelector.from_graphql_input( kwargs.get("repositorySelector")), ) def resolve_pipelineSnapshotOrError(self, graphene_info, **kwargs): snapshot_id_arg = kwargs.get("snapshotId") pipeline_selector_arg = kwargs.get("activePipelineSelector") check.invariant( not (snapshot_id_arg and pipeline_selector_arg), "Must only pass one of snapshotId or activePipelineSelector", ) check.invariant( snapshot_id_arg or pipeline_selector_arg, "Must set one of snapshotId or activePipelineSelector", ) if pipeline_selector_arg: pipeline_selector = pipeline_selector_from_graphql( graphene_info.context, kwargs["activePipelineSelector"]) return get_pipeline_snapshot_or_error_from_pipeline_selector( graphene_info, pipeline_selector) else: return get_pipeline_snapshot_or_error_from_snapshot_id( graphene_info, snapshot_id_arg) def resolve_version(self, graphene_info): return graphene_info.context.version def resolve_scheduler(self, graphene_info): return get_scheduler_or_error(graphene_info) def resolve_scheduleDefinitionOrError(self, graphene_info, schedule_selector): return get_schedule_definition_or_error( graphene_info, ScheduleSelector.from_graphql_input(schedule_selector)) def resolve_scheduleDefinitionsOrError(self, graphene_info, **kwargs): return get_schedule_definitions_or_error( graphene_info, RepositorySelector.from_graphql_input( kwargs.get("repositorySelector"))) def resolve_scheduleStatesOrError(self, graphene_info, **kwargs): return get_schedule_states_or_error( graphene_info, RepositorySelector.from_graphql_input(kwargs["repositorySelector"]) if kwargs.get("repositorySelector") else None, kwargs.get("withNoScheduleDefinition"), ) def resolve_pipelineOrError(self, graphene_info, **kwargs): return get_pipeline_or_error( graphene_info, pipeline_selector_from_graphql(graphene_info.context, kwargs["params"]), ) def resolve_pipelineRunsOrError(self, graphene_info, **kwargs): filters = kwargs.get("filter") if filters is not None: filters = filters.to_selector() return graphene_info.schema.type_named("PipelineRuns")( results=get_runs(graphene_info, filters, kwargs.get("cursor"), kwargs.get("limit"))) def resolve_pipelineRunOrError(self, graphene_info, runId): return get_run_by_id(graphene_info, runId) def resolve_runGroupsOrError(self, graphene_info, **kwargs): filters = kwargs.get("filter") if filters is not None: filters = filters.to_selector() return graphene_info.schema.type_named("RunGroupsOrError")( results=get_run_groups(graphene_info, filters, kwargs.get( "cursor"), kwargs.get("limit"))) def resolve_partitionSetsOrError(self, graphene_info, **kwargs): return get_partition_sets_or_error( graphene_info, RepositorySelector.from_graphql_input( kwargs.get("repositorySelector")), kwargs.get("pipelineName"), ) def resolve_partitionSetOrError(self, graphene_info, **kwargs): return get_partition_set( graphene_info, RepositorySelector.from_graphql_input( kwargs.get("repositorySelector")), kwargs.get("partitionSetName"), ) def resolve_pipelineRunTags(self, graphene_info): return get_run_tags(graphene_info) def resolve_runGroupOrError(self, graphene_info, runId): return get_run_group(graphene_info, runId) def resolve_isPipelineConfigValid(self, graphene_info, pipeline, **kwargs): return validate_pipeline_config( graphene_info, pipeline_selector_from_graphql(graphene_info.context, pipeline), kwargs.get("runConfigData"), kwargs.get("mode"), ) def resolve_executionPlanOrError(self, graphene_info, pipeline, **kwargs): return get_execution_plan( graphene_info, pipeline_selector_from_graphql(graphene_info.context, pipeline), kwargs.get("runConfigData"), kwargs.get("mode"), ) def resolve_runConfigSchemaOrError(self, graphene_info, **kwargs): return resolve_run_config_schema_or_error( graphene_info, pipeline_selector_from_graphql(graphene_info.context, kwargs["selector"]), kwargs.get("mode"), ) def resolve_instance(self, graphene_info): return graphene_info.schema.type_named("Instance")( graphene_info.context.instance) def resolve_assetsOrError(self, graphene_info): return get_assets(graphene_info) def resolve_assetOrError(self, graphene_info, **kwargs): return get_asset(graphene_info, AssetKey.from_graphql_input(kwargs["assetKey"]))
class DauphinPipelineRuns(dauphin.ObjectType): class Meta(object): name = "PipelineRuns" results = dauphin.non_null_list("PipelineRun")
class DauphinPartitionTags(dauphin.ObjectType): class Meta: name = "PartitionTags" results = dauphin.non_null_list("PipelineTag")
class DauphinRunGroupsOrError(dauphin.ObjectType): class Meta(object): name = "RunGroupsOrError" types = ("RunGroups", DauphinPythonError) results = dauphin.non_null_list("RunGroup")
class DauphinPartitions(dauphin.ObjectType): class Meta: name = "Partitions" results = dauphin.non_null_list("Partition")
class DauphinPartitionBackfillSuccess(dauphin.ObjectType): class Meta(object): name = "PartitionBackfillSuccess" backfill_id = dauphin.NonNull(dauphin.String) launched_run_ids = dauphin.non_null_list(dauphin.String)
class DauphinRunningSchedule(dauphin.ObjectType): class Meta: name = 'RunningSchedule' schedule_id = dauphin.NonNull(dauphin.String) schedule_definition = dauphin.NonNull('ScheduleDefinition') python_path = dauphin.Field(dauphin.String) repository_path = dauphin.Field(dauphin.String) status = dauphin.NonNull('ScheduleStatus') runs = dauphin.Field(dauphin.non_null_list('PipelineRun'), limit=dauphin.Int()) runs_count = dauphin.NonNull(dauphin.Int) attempts = dauphin.Field(dauphin.non_null_list('ScheduleAttempt'), limit=dauphin.Int()) logs_path = dauphin.NonNull(dauphin.String) def __init__(self, graphene_info, schedule): self._schedule = check.inst_param(schedule, 'schedule', Schedule) super(DauphinRunningSchedule, self).__init__( schedule_id=schedule.schedule_id, schedule_definition=graphene_info.schema.type_named('ScheduleDefinition')( get_dagster_schedule_def(graphene_info, schedule.name) ), status=schedule.status, python_path=schedule.python_path, repository_path=schedule.repository_path, ) def resolve_attempts(self, graphene_info, **kwargs): limit = kwargs.get('limit') scheduler = graphene_info.context.get_scheduler() log_dir = scheduler.log_path_for_schedule(self._schedule.name) results = glob.glob(os.path.join(log_dir, "*.result")) latest_results = heapq.nlargest(limit, results, key=os.path.getctime) attempts = [] for result_path in latest_results: with open(result_path, 'r') as f: line = f.readline() if not line: continue # File is empty start_scheduled_execution_response = json.loads(line) json_result = start_scheduled_execution_response['data']['startScheduledExecution'] typename = json_result['__typename'] if typename == 'StartPipelineExecutionSuccess': status = DauphinScheduleAttemptStatus.SUCCESS elif typename == 'ScheduleExecutionBlocked': status = DauphinScheduleAttemptStatus.SKIPPED else: status = DauphinScheduleAttemptStatus.ERROR run = None if typename == 'StartPipelineExecutionSuccess': run_id = json_result['run']['runId'] run = graphene_info.schema.type_named('PipelineRun')( graphene_info.context.instance.get_run_by_id(run_id) ) attempts.append( graphene_info.schema.type_named('ScheduleAttempt')( time=os.path.getctime, json_result=json.dumps(json_result), status=status, run=run, ) ) return attempts def resolve_logs_path(self, graphene_info): scheduler = graphene_info.context.get_scheduler() return scheduler.log_path_for_schedule(self._schedule.name) def resolve_runs(self, graphene_info, **kwargs): return [ graphene_info.schema.type_named('PipelineRun')(r) for r in graphene_info.context.instance.get_runs_with_matching_tags( [("dagster/schedule_id", self._schedule.schedule_id)], limit=kwargs.get('limit') ) ] def resolve_runs_count(self, graphene_info): return graphene_info.context.instance.get_run_count_with_matching_tags( [("dagster/schedule_id", self._schedule.schedule_id)] )
class DauphinScheduleState(dauphin.ObjectType): class Meta(object): name = "ScheduleState" schedule_origin_id = dauphin.NonNull(dauphin.String) schedule_name = dauphin.NonNull(dauphin.String) cron_schedule = dauphin.NonNull(dauphin.String) status = dauphin.NonNull("ScheduleStatus") runs = dauphin.Field(dauphin.non_null_list("PipelineRun"), limit=dauphin.Int()) runs_count = dauphin.NonNull(dauphin.Int) ticks = dauphin.Field(dauphin.non_null_list("ScheduleTick"), limit=dauphin.Int()) ticks_count = dauphin.NonNull(dauphin.Int) stats = dauphin.NonNull("ScheduleTickStatsSnapshot") logs_path = dauphin.NonNull(dauphin.String) running_schedule_count = dauphin.NonNull(dauphin.Int) repository_origin = dauphin.NonNull("RepositoryOrigin") repository_origin_id = dauphin.NonNull(dauphin.String) id = dauphin.NonNull(dauphin.ID) def __init__(self, _graphene_info, schedule_state): self._schedule_state = check.inst_param(schedule_state, "schedule", ScheduleState) self._external_schedule_origin_id = self._schedule_state.schedule_origin_id super(DauphinScheduleState, self).__init__( schedule_origin_id=schedule_state.schedule_origin_id, schedule_name=schedule_state.name, cron_schedule=schedule_state.cron_schedule, status=schedule_state.status, ) def resolve_id(self, _graphene_info): return self._external_schedule_origin_id def resolve_running_schedule_count(self, graphene_info): running_schedule_count = graphene_info.context.instance.running_schedule_count( self._external_schedule_origin_id ) return running_schedule_count def resolve_stats(self, graphene_info): stats = graphene_info.context.instance.get_schedule_tick_stats( self._external_schedule_origin_id ) return graphene_info.schema.type_named("ScheduleTickStatsSnapshot")(stats) def resolve_ticks(self, graphene_info, limit=None): # TODO: Add cursor limit argument to get_schedule_ticks_by_schedule # https://github.com/dagster-io/dagster/issues/2291 ticks = graphene_info.context.instance.get_schedule_ticks(self._external_schedule_origin_id) if not limit: tick_subset = ticks else: tick_subset = ticks[:limit] return [ graphene_info.schema.type_named("ScheduleTick")( tick_id=tick.tick_id, status=tick.status, timestamp=tick.timestamp, tick_specific_data=tick_specific_data_from_dagster_tick(graphene_info, tick), ) for tick in tick_subset ] def resolve_ticks_count(self, graphene_info): ticks = graphene_info.context.instance.get_schedule_ticks(self._external_schedule_origin_id) return len(ticks) def resolve_runs(self, graphene_info, **kwargs): return [ graphene_info.schema.type_named("PipelineRun")(r) for r in graphene_info.context.instance.get_runs( filters=PipelineRunsFilter.for_schedule(self._schedule_state), limit=kwargs.get("limit"), ) ] def resolve_runs_count(self, graphene_info): return graphene_info.context.instance.get_runs_count( filters=PipelineRunsFilter.for_schedule(self._schedule_state) ) def resolve_repository_origin_id(self, _graphene_info): return self._schedule_state.repository_origin_id def resolve_repository_origin(self, graphene_info): origin = self._schedule_state.origin.get_repo_origin() if isinstance(origin, RepositoryGrpcServerOrigin): return graphene_info.schema.type_named("GrpcRepositoryOrigin")(origin) else: return graphene_info.schema.type_named("PythonRepositoryOrigin")(origin)