class DauphinEventMetadataEntry(dauphin.Interface): class Meta: name = 'EventMetadataEntry' label = dauphin.NonNull(dauphin.String) description = dauphin.String()
class DauphinRetriesPreviousAttempts(dauphin.InputObjectType): class Meta(object): name = "RetriesPreviousAttempts" key = dauphin.String() count = dauphin.Int()
class DauphinPipeline(dauphin.ObjectType): class Meta(object): name = 'Pipeline' interfaces = (DauphinSolidContainer, DauphinPipelineReference) name = dauphin.NonNull(dauphin.String) description = dauphin.String() solids = dauphin.non_null_list('Solid') runtime_types = dauphin.non_null_list('RuntimeType') runs = dauphin.non_null_list('PipelineRun') modes = dauphin.non_null_list('Mode') solid_handles = dauphin.Field(dauphin.non_null_list('SolidHandle'), parentHandleID=dauphin.String()) presets = dauphin.non_null_list('PipelinePreset') solid_handle = dauphin.Field( 'SolidHandle', handleID=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) def __init__(self, pipeline): super(DauphinPipeline, self).__init__(name=pipeline.name, description=pipeline.description) self._pipeline = check.inst_param(pipeline, 'pipeline', PipelineDefinition) def resolve_solids(self, _graphene_info): return build_dauphin_solids(self._pipeline) def resolve_runtime_types(self, _graphene_info): return sorted( list( map( to_dauphin_runtime_type, [t for t in self._pipeline.all_runtime_types() if t.name], )), key=lambda config_type: config_type.name, ) def resolve_runs(self, graphene_info): return [ graphene_info.schema.type_named('PipelineRun')(r) for r in graphene_info.context.instance.get_runs( filters=PipelineRunsFilter(pipeline_name=self._pipeline.name)) ] def get_dagster_pipeline(self): return self._pipeline def get_type(self, _graphene_info, typeName): if self._pipeline.has_config_type(typeName): return to_dauphin_config_type( self._pipeline.config_type_named(typeName)) elif self._pipeline.has_runtime_type(typeName): return to_dauphin_runtime_type( self._pipeline.runtime_type_named(typeName)) else: check.failed('Not a config type or runtime type') def resolve_modes(self, graphene_info): return [ graphene_info.schema.type_named('Mode')(mode_definition) for mode_definition in sorted(self._pipeline.mode_definitions, key=lambda item: item.name) ] def resolve_solid_handle(self, _graphene_info, handleID): return _get_solid_handles(self._pipeline).get(handleID) def resolve_solid_handles(self, _graphene_info, **kwargs): handles = _get_solid_handles(self._pipeline) parentHandleID = kwargs.get('parentHandleID') if parentHandleID == "": handles = { key: handle for key, handle in handles.items() if not handle.parent } elif parentHandleID is not None: handles = { key: handle for key, handle in handles.items() if handle.parent and handle.parent.handleID.to_string() == parentHandleID } return [handles[key] for key in sorted(handles)] def resolve_presets(self, _graphene_info): return [ DauphinPipelinePreset(preset, self._pipeline.name) for preset in sorted(self._pipeline.get_presets(), key=lambda item: item.name) ]
class DauphinError(dauphin.Interface): class Meta(object): name = "Error" message = dauphin.String(required=True)
class DauphinEventMetadataEntry(dauphin.Interface): class Meta(object): name = "EventMetadataEntry" label = dauphin.NonNull(dauphin.String) description = dauphin.String()
class DauphinQuery(dauphin.ObjectType): class Meta: name = 'Query' version = dauphin.NonNull(dauphin.String) reloadSupported = dauphin.NonNull(dauphin.Boolean) pipelineOrError = dauphin.Field( dauphin.NonNull('PipelineOrError'), params=dauphin.NonNull('ExecutionSelector') ) pipeline = dauphin.Field( dauphin.NonNull('Pipeline'), params=dauphin.NonNull('ExecutionSelector') ) pipelinesOrError = dauphin.NonNull('PipelinesOrError') pipelines = dauphin.Field(dauphin.NonNull('PipelineConnection')) configTypeOrError = dauphin.Field( dauphin.NonNull('ConfigTypeOrError'), pipelineName=dauphin.Argument(dauphin.NonNull(dauphin.String)), configTypeName=dauphin.Argument(dauphin.NonNull(dauphin.String)), mode=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) runtimeTypeOrError = dauphin.Field( dauphin.NonNull('RuntimeTypeOrError'), pipelineName=dauphin.Argument(dauphin.NonNull(dauphin.String)), runtimeTypeName=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) scheduler = dauphin.Field(dauphin.NonNull('SchedulerOrError')) pipelineRunsOrError = dauphin.Field( dauphin.NonNull('PipelineRunsOrError'), filter=dauphin.Argument(dauphin.NonNull('PipelineRunsFilter')), cursor=dauphin.String(), limit=dauphin.Int(), ) pipelineRunOrError = dauphin.Field( dauphin.NonNull('PipelineRunOrError'), runId=dauphin.NonNull(dauphin.ID) ) pipelineRunTags = dauphin.non_null_list('PipelineTagAndValues') usedSolids = dauphin.Field(dauphin.non_null_list('UsedSolid')) isPipelineConfigValid = dauphin.Field( dauphin.NonNull('PipelineConfigValidationResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'environmentConfigData': dauphin.Argument('EnvironmentConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) executionPlan = dauphin.Field( dauphin.NonNull('ExecutionPlanResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'environmentConfigData': dauphin.Argument('EnvironmentConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) environmentSchemaOrError = dauphin.Field( dauphin.NonNull('EnvironmentSchemaOrError'), args={ 'selector': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'mode': dauphin.Argument(dauphin.String), }, description='''Fetch an environment schema given an execution selection and a mode. See the descripton on EnvironmentSchema for more information.''', ) instance = dauphin.NonNull('Instance') def resolve_configTypeOrError(self, graphene_info, **kwargs): return get_config_type( graphene_info, kwargs['pipelineName'], kwargs['configTypeName'], kwargs.get('mode') ) def resolve_runtimeTypeOrError(self, graphene_info, **kwargs): return get_runtime_type(graphene_info, kwargs['pipelineName'], kwargs['runtimeTypeName']) def resolve_version(self, graphene_info): return graphene_info.context.version def resolve_reloadSupported(self, graphene_info): return graphene_info.context.reloader.is_reload_supported def resolve_scheduler(self, graphene_info): return get_scheduler_or_error(graphene_info) def resolve_pipelineOrError(self, graphene_info, **kwargs): return get_pipeline_or_error(graphene_info, kwargs['params'].to_selector()) def resolve_pipeline(self, graphene_info, **kwargs): return get_pipeline_or_raise(graphene_info, kwargs['params'].to_selector()) def resolve_pipelinesOrError(self, graphene_info): return get_pipelines_or_error(graphene_info) def resolve_pipelines(self, graphene_info): return get_pipelines_or_raise(graphene_info) def resolve_pipelineRunsOrError(self, graphene_info, **kwargs): filters = kwargs['filter'].to_selector() provided = [ i for i in [filters.run_id, filters.pipeline, filters.tag_key, filters.status] if i ] if len(provided) > 1: return graphene_info.schema.type_named('InvalidPipelineRunsFilterError')( message="You may only provide one of the filter options." ) return graphene_info.schema.type_named('PipelineRuns')( results=get_runs(graphene_info, filters, kwargs.get('cursor'), kwargs.get('limit')) ) def resolve_pipelineRunOrError(self, graphene_info, runId): return get_run(graphene_info, runId) def resolve_pipelineRunTags(self, graphene_info): return get_run_tags(graphene_info) def resolve_usedSolids(self, graphene_info): repository = graphene_info.context.repository_definition inv_by_def_name = defaultdict(list) definitions = [] for pipeline in repository.get_all_pipelines(): for handle in build_dauphin_solid_handles(pipeline): definition = handle.solid.resolve_definition(graphene_info) if definition.name not in inv_by_def_name: definitions.append(definition) inv_by_def_name[definition.name].append( DauphinSolidInvocationSite(pipeline=pipeline, solidHandle=handle) ) return map( lambda d: DauphinUsedSolid( definition=d, invocations=sorted(inv_by_def_name[d.name], key=lambda i: i.solidHandle.handleID), ), sorted(definitions, key=lambda d: d.name), ) def resolve_isPipelineConfigValid(self, graphene_info, pipeline, **kwargs): return validate_pipeline_config( graphene_info, pipeline.to_selector(), kwargs.get('environmentConfigData'), kwargs.get('mode'), ) def resolve_executionPlan(self, graphene_info, pipeline, **kwargs): return get_execution_plan( graphene_info, pipeline.to_selector(), kwargs.get('environmentConfigData'), kwargs.get('mode'), ) def resolve_environmentSchemaOrError(self, graphene_info, **kwargs): return resolve_environment_schema_or_error( graphene_info, kwargs['selector'].to_selector(), kwargs.get('mode') ) def resolve_instance(self, graphene_info): return graphene_info.schema.type_named('Instance')(graphene_info.context.instance)
class DauphinPartition(dauphin.ObjectType): class Meta: name = "Partition" name = dauphin.NonNull(dauphin.String) partition_set_name = dauphin.NonNull(dauphin.String) solid_selection = dauphin.List(dauphin.NonNull(dauphin.String)) mode = dauphin.NonNull(dauphin.String) runConfigOrError = dauphin.NonNull("PartitionRunConfigOrError") tagsOrError = dauphin.NonNull("PartitionTagsOrError") runs = dauphin.Field( dauphin.non_null_list("PipelineRun"), filter=dauphin.Argument("PipelineRunsFilter"), cursor=dauphin.String(), limit=dauphin.Int(), ) status = dauphin.Field("PipelineRunStatus") def __init__(self, external_repository_handle, external_partition_set, partition_name): self._external_repository_handle = check.inst_param( external_repository_handle, "external_respository_handle", RepositoryHandle) self._external_partition_set = check.inst_param( external_partition_set, "external_partition_set", ExternalPartitionSet) self._partition_name = check.str_param(partition_name, "partition_name") super(DauphinPartition, self).__init__( name=partition_name, partition_set_name=external_partition_set.name, solid_selection=external_partition_set.solid_selection, mode=external_partition_set.mode, ) def resolve_runConfigOrError(self, graphene_info): return get_partition_config( graphene_info, self._external_repository_handle, self._external_partition_set.name, self._partition_name, ) def resolve_tagsOrError(self, graphene_info): return get_partition_tags( graphene_info, self._external_repository_handle, self._external_partition_set.name, self._partition_name, ) def resolve_runs(self, graphene_info, **kwargs): filters = kwargs.get("filter") partition_tags = { PARTITION_SET_TAG: self._external_partition_set.name, PARTITION_NAME_TAG: self._partition_name, } if filters is not None: filters = filters.to_selector() runs_filter = PipelineRunsFilter( run_ids=filters.run_ids, pipeline_name=filters.pipeline_name, statuses=filters.statuses, tags=merge_dicts(filters.tags, partition_tags), ) else: runs_filter = PipelineRunsFilter(tags=partition_tags) return get_runs(graphene_info, runs_filter, cursor=kwargs.get("cursor"), limit=kwargs.get("limit"))
class DauphinError(dauphin.Interface): class Meta: name = 'Error' message = dauphin.String(required=True) stack = dauphin.non_null_list(dauphin.String)
class DauphinPartitionSet(dauphin.ObjectType): class Meta(object): name = 'PartitionSet' name = dauphin.NonNull(dauphin.String) pipeline_name = dauphin.NonNull(dauphin.String) solid_selection = dauphin.List(dauphin.NonNull(dauphin.String)) mode = dauphin.NonNull(dauphin.String) partitions = dauphin.Field( dauphin.NonNull('Partitions'), cursor=dauphin.String(), limit=dauphin.Int(), reverse=dauphin.Boolean(), ) partition = dauphin.Field('Partition', partition_name=dauphin.NonNull(dauphin.String)) def __init__(self, external_repository_handle, external_partition_set): self._external_repository_handle = check.inst_param( external_repository_handle, 'external_respository_handle', RepositoryHandle) self._external_partition_set = check.inst_param( external_partition_set, 'external_partition_set', ExternalPartitionSet) super(DauphinPartitionSet, self).__init__( name=external_partition_set.name, pipeline_name=external_partition_set.pipeline_name, solid_selection=external_partition_set.solid_selection, mode=external_partition_set.mode, ) def resolve_partitions(self, graphene_info, **kwargs): partition_names = get_partition_names( self._external_repository_handle, self._external_partition_set.name, ) cursor = kwargs.get("cursor") limit = kwargs.get("limit") reverse = kwargs.get('reverse') start = 0 end = len(partition_names) index = 0 if cursor: index = next( (idx for (idx, partition_name) in enumerate(partition_names) if partition_name == cursor), None, ) if reverse: end = index else: start = index + 1 if limit: if reverse: start = end - limit else: end = start + limit partition_names = partition_names[start:end] return graphene_info.schema.type_named('Partitions')(results=[ graphene_info.schema.type_named('Partition')( external_partition_set=self._external_partition_set, external_repository_handle=self._external_repository_handle, partition_name=partition_name, ) for partition_name in partition_names ]) def resolve_partition(self, graphene_info, partition_name): return get_partition_by_name( graphene_info, self._external_repository_handle, self._external_partition_set, partition_name, )
class DauphinQuery(dauphin.ObjectType): class Meta(object): name = 'Query' version = dauphin.NonNull(dauphin.String) reloadSupported = dauphin.NonNull(dauphin.Boolean) repositoryLocationsOrError = dauphin.Field('RepositoryLocationsOrError') pipelineOrError = dauphin.Field(dauphin.NonNull('PipelineOrError'), params=dauphin.NonNull('PipelineSelector')) pipelinesOrError = dauphin.NonNull('PipelinesOrError') pipelineSnapshotOrError = dauphin.Field( dauphin.NonNull('PipelineSnapshotOrError'), snapshotId=dauphin.String(), activePipelineName=dauphin.String(), ) scheduler = dauphin.Field(dauphin.NonNull('SchedulerOrError')) scheduleOrError = dauphin.Field( dauphin.NonNull('ScheduleOrError'), schedule_name=dauphin.NonNull(dauphin.String), limit=dauphin.Int(), ) partitionSetsOrError = dauphin.Field( dauphin.NonNull('PartitionSetsOrError'), pipelineName=dauphin.String()) partitionSetOrError = dauphin.Field(dauphin.NonNull('PartitionSetOrError'), partitionSetName=dauphin.String()) pipelineRunsOrError = dauphin.Field( dauphin.NonNull('PipelineRunsOrError'), filter=dauphin.Argument('PipelineRunsFilter'), cursor=dauphin.String(), limit=dauphin.Int(), ) pipelineRunOrError = dauphin.Field(dauphin.NonNull('PipelineRunOrError'), runId=dauphin.NonNull(dauphin.ID)) pipelineRunTags = dauphin.non_null_list('PipelineTagAndValues') runGroupOrError = dauphin.Field(dauphin.NonNull('RunGroupOrError'), runId=dauphin.NonNull(dauphin.ID)) runGroupsOrError = dauphin.Field( dauphin.NonNull('RunGroupsOrError'), filter=dauphin.Argument('PipelineRunsFilter'), cursor=dauphin.String(), limit=dauphin.Int(), ) usedSolids = dauphin.Field(dauphin.non_null_list('UsedSolid')) usedSolid = dauphin.Field('UsedSolid', name=dauphin.NonNull(dauphin.String)) isPipelineConfigValid = dauphin.Field( dauphin.NonNull('PipelineConfigValidationResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('PipelineSelector')), 'runConfigData': dauphin.Argument('RunConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) executionPlanOrError = dauphin.Field( dauphin.NonNull('ExecutionPlanOrError'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('PipelineSelector')), 'runConfigData': dauphin.Argument('RunConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) runConfigSchemaOrError = dauphin.Field( dauphin.NonNull('RunConfigSchemaOrError'), args={ 'selector': dauphin.Argument(dauphin.NonNull('PipelineSelector')), 'mode': dauphin.Argument(dauphin.String), }, description= '''Fetch an environment schema given an execution selection and a mode. See the descripton on RunConfigSchema for more information.''', ) instance = dauphin.NonNull('Instance') assetsOrError = dauphin.Field(dauphin.NonNull('AssetsOrError')) assetOrError = dauphin.Field(dauphin.NonNull('AssetOrError'), assetKey=dauphin.NonNull(dauphin.String)) def resolve_repositoryLocationsOrError(self, graphene_info): return fetch_repository_locations(graphene_info) def resolve_pipelineSnapshotOrError(self, graphene_info, **kwargs): snapshot_id_arg = kwargs.get('snapshotId') pipeline_name_arg = kwargs.get('activePipelineName') check.invariant( not (snapshot_id_arg and pipeline_name_arg), 'Cannot pass both snapshotId and activePipelineName', ) check.invariant(snapshot_id_arg or pipeline_name_arg, 'Must set one of snapshotId or activePipelineName') if pipeline_name_arg: return get_pipeline_snapshot_or_error_from_pipeline_name( graphene_info, pipeline_name_arg) else: return get_pipeline_snapshot_or_error_from_snapshot_id( graphene_info, snapshot_id_arg) def resolve_version(self, graphene_info): return graphene_info.context.version def resolve_reloadSupported(self, graphene_info): return graphene_info.context.legacy_location.is_reload_supported def resolve_scheduler(self, graphene_info): return get_scheduler_or_error(graphene_info) def resolve_scheduleOrError(self, graphene_info, schedule_name): return get_schedule_or_error(graphene_info, schedule_name) def resolve_pipelineOrError(self, graphene_info, **kwargs): return get_pipeline_or_error( graphene_info, PipelineSelector.from_graphql_input(graphene_info.context, kwargs['params']), ) def resolve_pipelinesOrError(self, graphene_info): return get_pipelines_or_error(graphene_info) def resolve_pipelineRunsOrError(self, graphene_info, **kwargs): filters = kwargs.get('filter') if filters is not None: filters = filters.to_selector() return graphene_info.schema.type_named('PipelineRuns')( results=get_runs(graphene_info, filters, kwargs.get('cursor'), kwargs.get('limit'))) def resolve_pipelineRunOrError(self, graphene_info, runId): return get_run_by_id(graphene_info, runId) def resolve_runGroupsOrError(self, graphene_info, **kwargs): filters = kwargs.get('filter') if filters is not None: filters = filters.to_selector() return graphene_info.schema.type_named('RunGroupsOrError')( results=get_run_groups(graphene_info, filters, kwargs.get( 'cursor'), kwargs.get('limit'))) def resolve_partitionSetsOrError(self, graphene_info, **kwargs): pipeline_name = kwargs.get('pipelineName') return get_partition_sets_or_error(graphene_info, pipeline_name) def resolve_partitionSetOrError(self, graphene_info, partitionSetName): return get_partition_set(graphene_info, partitionSetName) def resolve_pipelineRunTags(self, graphene_info): return get_run_tags(graphene_info) def resolve_runGroupOrError(self, graphene_info, runId): return get_run_group(graphene_info, runId) def resolve_usedSolid(self, graphene_info, name): return get_solid(graphene_info, name) def resolve_usedSolids(self, graphene_info): return get_solids(graphene_info) def resolve_isPipelineConfigValid(self, graphene_info, pipeline, **kwargs): return validate_pipeline_config( graphene_info, PipelineSelector.from_graphql_input(graphene_info.context, pipeline), kwargs.get('runConfigData'), kwargs.get('mode'), ) def resolve_executionPlanOrError(self, graphene_info, pipeline, **kwargs): return get_execution_plan( graphene_info, PipelineSelector.from_graphql_input(graphene_info.context, pipeline), kwargs.get('runConfigData'), kwargs.get('mode'), ) def resolve_runConfigSchemaOrError(self, graphene_info, **kwargs): return resolve_run_config_schema_or_error( graphene_info, PipelineSelector.from_graphql_input(graphene_info.context, kwargs['selector']), kwargs.get('mode'), ) def resolve_instance(self, graphene_info): return graphene_info.schema.type_named('Instance')( graphene_info.context.instance) def resolve_assetsOrError(self, graphene_info): return get_assets(graphene_info) def resolve_assetOrError(self, graphene_info, assetKey): return get_asset(graphene_info, assetKey)
class DauphinIPipelineSnapshotMixin: # Mixin this class to implement IPipelineSnapshot # # Graphene has some strange properties that make it so that you cannot # implement ABCs nor use properties in an overridable way. So the way # the mixin works is that the target classes have to have a method # get_represented_pipeline() # def get_represented_pipeline(self): raise NotImplementedError() name = dauphin.NonNull(dauphin.String) description = dauphin.String() id = dauphin.NonNull(dauphin.ID) pipeline_snapshot_id = dauphin.NonNull(dauphin.String) dagster_types = dauphin.non_null_list("DagsterType") dagster_type_or_error = dauphin.Field( dauphin.NonNull("DagsterTypeOrError"), dagsterTypeName=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) solids = dauphin.non_null_list("Solid") modes = dauphin.non_null_list("Mode") solid_handles = dauphin.Field( dauphin.non_null_list("SolidHandle"), parentHandleID=dauphin.String() ) solid_handle = dauphin.Field( "SolidHandle", handleID=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) tags = dauphin.non_null_list("PipelineTag") runs = dauphin.Field( dauphin.non_null_list("PipelineRun"), cursor=dauphin.String(), limit=dauphin.Int(), ) schedules = dauphin.non_null_list("Schedule") parent_snapshot_id = dauphin.String() def resolve_pipeline_snapshot_id(self, _): return self.get_represented_pipeline().identifying_pipeline_snapshot_id def resolve_id(self, _): return self.get_represented_pipeline().identifying_pipeline_snapshot_id def resolve_name(self, _): return self.get_represented_pipeline().name def resolve_description(self, _): return self.get_represented_pipeline().description def resolve_dagster_types(self, _graphene_info): represented_pipeline = self.get_represented_pipeline() return sorted( list( map( lambda dt: to_dauphin_dagster_type( represented_pipeline.pipeline_snapshot, dt.key ), [t for t in represented_pipeline.dagster_type_snaps if t.name], ) ), key=lambda dagster_type: dagster_type.name, ) @capture_dauphin_error def resolve_dagster_type_or_error(self, _, **kwargs): type_name = kwargs["dagsterTypeName"] represented_pipeline = self.get_represented_pipeline() if not represented_pipeline.has_dagster_type_named(type_name): from .errors import DauphinDagsterTypeNotFoundError raise UserFacingGraphQLError( DauphinDagsterTypeNotFoundError(dagster_type_name=type_name) ) return to_dauphin_dagster_type( represented_pipeline.pipeline_snapshot, represented_pipeline.get_dagster_type_by_name(type_name).key, ) def resolve_solids(self, _graphene_info): represented_pipeline = self.get_represented_pipeline() return build_dauphin_solids(represented_pipeline, represented_pipeline.dep_structure_index,) def resolve_modes(self, _): represented_pipeline = self.get_represented_pipeline() return [ DauphinMode(represented_pipeline.config_schema_snapshot, mode_def_snap) for mode_def_snap in sorted( represented_pipeline.mode_def_snaps, key=lambda item: item.name ) ] def resolve_solid_handle(self, _graphene_info, handleID): return _get_solid_handles(self.get_represented_pipeline()).get(handleID) def resolve_solid_handles(self, _graphene_info, **kwargs): handles = _get_solid_handles(self.get_represented_pipeline()) parentHandleID = kwargs.get("parentHandleID") if parentHandleID == "": handles = {key: handle for key, handle in handles.items() if not handle.parent} elif parentHandleID is not None: handles = { key: handle for key, handle in handles.items() if handle.parent and handle.parent.handleID.to_string() == parentHandleID } return [handles[key] for key in sorted(handles)] def resolve_tags(self, graphene_info): represented_pipeline = self.get_represented_pipeline() return [ graphene_info.schema.type_named("PipelineTag")(key=key, value=value) for key, value in represented_pipeline.pipeline_snapshot.tags.items() ] def resolve_solidSelection(self, _graphene_info): return self.get_represented_pipeline().solid_selection def resolve_runs(self, graphene_info, **kwargs): runs_filter = PipelineRunsFilter(pipeline_name=self.get_represented_pipeline().name) return get_runs(graphene_info, runs_filter, kwargs.get("cursor"), kwargs.get("limit")) def resolve_schedules(self, graphene_info): represented_pipeline = self.get_represented_pipeline() if not isinstance(represented_pipeline, ExternalPipeline): # this is an historical pipeline snapshot, so there are not any associated running # schedules return [] pipeline_selector = represented_pipeline.handle.to_selector() schedules = get_schedules_for_pipeline(graphene_info, pipeline_selector) return schedules def resolve_parent_snapshot_id(self, _graphene_info): lineage_snapshot = self.get_represented_pipeline().pipeline_snapshot.lineage_snapshot if lineage_snapshot: return lineage_snapshot.parent_snapshot_id else: return None
class DauphinStartPipelineExecution(dauphin.Interface): class Meta(object): name = 'DauphinStartPipelineExecution' message = dauphin.String(required=True)
class DauphinScheduler(dauphin.ObjectType): class Meta(object): name = 'Scheduler' scheduler_class = dauphin.String()
class DauphinQuery(dauphin.ObjectType): class Meta(object): name = "Query" version = dauphin.NonNull(dauphin.String) repositoriesOrError = dauphin.NonNull("RepositoriesOrError") repositoryOrError = dauphin.Field( dauphin.NonNull("RepositoryOrError"), repositorySelector=dauphin.NonNull("RepositorySelector"), ) pipelineOrError = dauphin.Field(dauphin.NonNull("PipelineOrError"), params=dauphin.NonNull("PipelineSelector")) pipelineSnapshotOrError = dauphin.Field( dauphin.NonNull("PipelineSnapshotOrError"), snapshotId=dauphin.String(), activePipelineSelector=dauphin.Argument("PipelineSelector"), ) scheduler = dauphin.Field(dauphin.NonNull("SchedulerOrError")) scheduleDefinitionOrError = dauphin.Field( dauphin.NonNull("ScheduleDefinitionOrError"), schedule_selector=dauphin.NonNull("ScheduleSelector"), ) scheduleDefinitionsOrError = dauphin.Field( dauphin.NonNull("ScheduleDefinitionsOrError"), repositorySelector=dauphin.NonNull("RepositorySelector"), ) scheduleStatesOrError = dauphin.Field( dauphin.NonNull("ScheduleStatesOrError"), repositorySelector=dauphin.Argument("RepositorySelector"), withNoScheduleDefinition=dauphin.Boolean(), ) partitionSetsOrError = dauphin.Field( dauphin.NonNull("PartitionSetsOrError"), repositorySelector=dauphin.NonNull("RepositorySelector"), pipelineName=dauphin.NonNull(dauphin.String), ) partitionSetOrError = dauphin.Field( dauphin.NonNull("PartitionSetOrError"), repositorySelector=dauphin.NonNull("RepositorySelector"), partitionSetName=dauphin.String(), ) pipelineRunsOrError = dauphin.Field( dauphin.NonNull("PipelineRunsOrError"), filter=dauphin.Argument("PipelineRunsFilter"), cursor=dauphin.String(), limit=dauphin.Int(), ) pipelineRunOrError = dauphin.Field(dauphin.NonNull("PipelineRunOrError"), runId=dauphin.NonNull(dauphin.ID)) pipelineRunTags = dauphin.non_null_list("PipelineTagAndValues") runGroupOrError = dauphin.Field(dauphin.NonNull("RunGroupOrError"), runId=dauphin.NonNull(dauphin.ID)) runGroupsOrError = dauphin.Field( dauphin.NonNull("RunGroupsOrError"), filter=dauphin.Argument("PipelineRunsFilter"), cursor=dauphin.String(), limit=dauphin.Int(), ) isPipelineConfigValid = dauphin.Field( dauphin.NonNull("PipelineConfigValidationResult"), args={ "pipeline": dauphin.Argument(dauphin.NonNull("PipelineSelector")), "runConfigData": dauphin.Argument("RunConfigData"), "mode": dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) executionPlanOrError = dauphin.Field( dauphin.NonNull("ExecutionPlanOrError"), args={ "pipeline": dauphin.Argument(dauphin.NonNull("PipelineSelector")), "runConfigData": dauphin.Argument("RunConfigData"), "mode": dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) runConfigSchemaOrError = dauphin.Field( dauphin.NonNull("RunConfigSchemaOrError"), args={ "selector": dauphin.Argument(dauphin.NonNull("PipelineSelector")), "mode": dauphin.Argument(dauphin.String), }, description= """Fetch an environment schema given an execution selection and a mode. See the descripton on RunConfigSchema for more information.""", ) instance = dauphin.NonNull("Instance") assetsOrError = dauphin.Field(dauphin.NonNull("AssetsOrError")) assetOrError = dauphin.Field( dauphin.NonNull("AssetOrError"), assetKey=dauphin.Argument(dauphin.NonNull("AssetKeyInput")), ) def resolve_repositoriesOrError(self, graphene_info): return fetch_repositories(graphene_info) def resolve_repositoryOrError(self, graphene_info, **kwargs): return fetch_repository( graphene_info, RepositorySelector.from_graphql_input( kwargs.get("repositorySelector")), ) def resolve_pipelineSnapshotOrError(self, graphene_info, **kwargs): snapshot_id_arg = kwargs.get("snapshotId") pipeline_selector_arg = kwargs.get("activePipelineSelector") check.invariant( not (snapshot_id_arg and pipeline_selector_arg), "Must only pass one of snapshotId or activePipelineSelector", ) check.invariant( snapshot_id_arg or pipeline_selector_arg, "Must set one of snapshotId or activePipelineSelector", ) if pipeline_selector_arg: pipeline_selector = pipeline_selector_from_graphql( graphene_info.context, kwargs["activePipelineSelector"]) return get_pipeline_snapshot_or_error_from_pipeline_selector( graphene_info, pipeline_selector) else: return get_pipeline_snapshot_or_error_from_snapshot_id( graphene_info, snapshot_id_arg) def resolve_version(self, graphene_info): return graphene_info.context.version def resolve_scheduler(self, graphene_info): return get_scheduler_or_error(graphene_info) def resolve_scheduleDefinitionOrError(self, graphene_info, schedule_selector): return get_schedule_definition_or_error( graphene_info, ScheduleSelector.from_graphql_input(schedule_selector)) def resolve_scheduleDefinitionsOrError(self, graphene_info, **kwargs): return get_schedule_definitions_or_error( graphene_info, RepositorySelector.from_graphql_input( kwargs.get("repositorySelector"))) def resolve_scheduleStatesOrError(self, graphene_info, **kwargs): return get_schedule_states_or_error( graphene_info, RepositorySelector.from_graphql_input(kwargs["repositorySelector"]) if kwargs.get("repositorySelector") else None, kwargs.get("withNoScheduleDefinition"), ) def resolve_pipelineOrError(self, graphene_info, **kwargs): return get_pipeline_or_error( graphene_info, pipeline_selector_from_graphql(graphene_info.context, kwargs["params"]), ) def resolve_pipelineRunsOrError(self, graphene_info, **kwargs): filters = kwargs.get("filter") if filters is not None: filters = filters.to_selector() return graphene_info.schema.type_named("PipelineRuns")( results=get_runs(graphene_info, filters, kwargs.get("cursor"), kwargs.get("limit"))) def resolve_pipelineRunOrError(self, graphene_info, runId): return get_run_by_id(graphene_info, runId) def resolve_runGroupsOrError(self, graphene_info, **kwargs): filters = kwargs.get("filter") if filters is not None: filters = filters.to_selector() return graphene_info.schema.type_named("RunGroupsOrError")( results=get_run_groups(graphene_info, filters, kwargs.get( "cursor"), kwargs.get("limit"))) def resolve_partitionSetsOrError(self, graphene_info, **kwargs): return get_partition_sets_or_error( graphene_info, RepositorySelector.from_graphql_input( kwargs.get("repositorySelector")), kwargs.get("pipelineName"), ) def resolve_partitionSetOrError(self, graphene_info, **kwargs): return get_partition_set( graphene_info, RepositorySelector.from_graphql_input( kwargs.get("repositorySelector")), kwargs.get("partitionSetName"), ) def resolve_pipelineRunTags(self, graphene_info): return get_run_tags(graphene_info) def resolve_runGroupOrError(self, graphene_info, runId): return get_run_group(graphene_info, runId) def resolve_isPipelineConfigValid(self, graphene_info, pipeline, **kwargs): return validate_pipeline_config( graphene_info, pipeline_selector_from_graphql(graphene_info.context, pipeline), kwargs.get("runConfigData"), kwargs.get("mode"), ) def resolve_executionPlanOrError(self, graphene_info, pipeline, **kwargs): return get_execution_plan( graphene_info, pipeline_selector_from_graphql(graphene_info.context, pipeline), kwargs.get("runConfigData"), kwargs.get("mode"), ) def resolve_runConfigSchemaOrError(self, graphene_info, **kwargs): return resolve_run_config_schema_or_error( graphene_info, pipeline_selector_from_graphql(graphene_info.context, kwargs["selector"]), kwargs.get("mode"), ) def resolve_instance(self, graphene_info): return graphene_info.schema.type_named("Instance")( graphene_info.context.instance) def resolve_assetsOrError(self, graphene_info): return get_assets(graphene_info) def resolve_assetOrError(self, graphene_info, **kwargs): return get_asset(graphene_info, AssetKey.from_graphql_input(kwargs["assetKey"]))
class DauphinPipelineRun(dauphin.ObjectType): class Meta(object): name = 'PipelineRun' runId = dauphin.NonNull(dauphin.String) # Nullable because of historical runs pipelineSnapshotId = dauphin.String() status = dauphin.NonNull('PipelineRunStatus') pipeline = dauphin.NonNull('PipelineReference') stats = dauphin.NonNull('PipelineRunStatsOrError') stepStats = dauphin.non_null_list('PipelineRunStepStats') computeLogs = dauphin.Field( dauphin.NonNull('ComputeLogs'), stepKey=dauphin.Argument(dauphin.NonNull(dauphin.String)), description=''' Compute logs are the stdout/stderr logs for a given solid step computation ''', ) executionPlan = dauphin.Field('ExecutionPlan') stepKeysToExecute = dauphin.List(dauphin.NonNull(dauphin.String)) environmentConfigYaml = dauphin.NonNull(dauphin.String) mode = dauphin.NonNull(dauphin.String) tags = dauphin.non_null_list('PipelineTag') rootRunId = dauphin.Field(dauphin.String) parentRunId = dauphin.Field(dauphin.String) canCancel = dauphin.NonNull(dauphin.Boolean) executionSelection = dauphin.NonNull('ExecutionSelection') def __init__(self, pipeline_run): super(DauphinPipelineRun, self).__init__(runId=pipeline_run.run_id, status=pipeline_run.status, mode=pipeline_run.mode) self._pipeline_run = check.inst_param(pipeline_run, 'pipeline_run', PipelineRun) def resolve_pipeline(self, graphene_info): return get_pipeline_reference_or_raise(graphene_info, self._pipeline_run.selector) def resolve_pipelineSnapshotId(self, _): return self._pipeline_run.pipeline_snapshot_id def resolve_stats(self, graphene_info): return get_stats(graphene_info, self.run_id) def resolve_stepStats(self, graphene_info): return get_step_stats(graphene_info, self.run_id) def resolve_computeLogs(self, graphene_info, stepKey): return graphene_info.schema.type_named('ComputeLogs')( runId=self.run_id, stepKey=stepKey) def resolve_executionPlan(self, graphene_info): if not (self._pipeline_run.execution_plan_snapshot_id and self._pipeline_run.pipeline_snapshot_id): return None from .execution import DauphinExecutionPlan instance = graphene_info.context.instance pipeline_snapshot = instance.get_pipeline_snapshot( self._pipeline_run.pipeline_snapshot_id) execution_plan_snapshot = instance.get_execution_plan_snapshot( self._pipeline_run.execution_plan_snapshot_id) return (DauphinExecutionPlan( ExecutionPlanIndex( execution_plan_snapshot=execution_plan_snapshot, pipeline_index=PipelineIndex(pipeline_snapshot), )) if execution_plan_snapshot and pipeline_snapshot else None) def resolve_stepKeysToExecute(self, _): return self._pipeline_run.step_keys_to_execute def resolve_environmentConfigYaml(self, _graphene_info): return yaml.dump(self._pipeline_run.environment_dict, default_flow_style=False) def resolve_tags(self, graphene_info): return [ graphene_info.schema.type_named('PipelineTag')(key=key, value=value) for key, value in self._pipeline_run.tags.items() ] def resolve_rootRunId(self, _): return self._pipeline_run.root_run_id def resolve_parentRunId(self, _): return self._pipeline_run.parent_run_id @property def run_id(self): return self.runId def resolve_canCancel(self, graphene_info): return graphene_info.context.execution_manager.can_terminate( self.run_id) def resolve_executionSelection(self, graphene_info): return graphene_info.schema.type_named('ExecutionSelection')( self._pipeline_run.selector)
class DauphinIPipelineSnapshotMixin(object): # Mixin this class to implement IPipelineSnapshot # # Graphene has some strange properties that make it so that you cannot # implement ABCs nor use properties in an overridable way. So the way # the mixin works is that the target classes have to have a method # get_pipeline_index() # def get_pipeline_index(self): raise NotImplementedError() name = dauphin.NonNull(dauphin.String) description = dauphin.String() pipeline_snapshot_id = dauphin.NonNull(dauphin.String) runtime_types = dauphin.non_null_list('RuntimeType') runtime_type_or_error = dauphin.Field( dauphin.NonNull('RuntimeTypeOrError'), runtimeTypeName=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) solids = dauphin.non_null_list('Solid') modes = dauphin.non_null_list('Mode') solid_handles = dauphin.Field( dauphin.non_null_list('SolidHandle'), parentHandleID=dauphin.String() ) solid_handle = dauphin.Field( 'SolidHandle', handleID=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) tags = dauphin.non_null_list('PipelineTag') def resolve_pipeline_snapshot_id(self, _): return self.get_pipeline_index().pipeline_snapshot_id def resolve_name(self, _): return self.get_pipeline_index().name def resolve_description(self, _): return self.get_pipeline_index().description def resolve_runtime_types(self, _graphene_info): # TODO yuhan rename runtime_type in schema pipeline_index = self.get_pipeline_index() return sorted( list( map( lambda dt: to_dauphin_dagster_type(pipeline_index.pipeline_snapshot, dt.key), [t for t in pipeline_index.get_dagster_type_snaps() if t.name], ) ), key=lambda dagster_type: dagster_type.name, ) @capture_dauphin_error def resolve_runtime_type_or_error(self, _, **kwargs): type_name = kwargs['runtimeTypeName'] pipeline_index = self.get_pipeline_index() if not pipeline_index.has_dagster_type_name(type_name): from .errors import DauphinRuntimeTypeNotFoundError raise UserFacingGraphQLError( DauphinRuntimeTypeNotFoundError(runtime_type_name=type_name) ) return to_dauphin_dagster_type( pipeline_index.pipeline_snapshot, pipeline_index.get_dagster_type_from_name(type_name).key, ) def resolve_solids(self, _graphene_info): pipeline_index = self.get_pipeline_index() return build_dauphin_solids(pipeline_index, pipeline_index.dep_structure_index) def resolve_modes(self, _): pipeline_snapshot = self.get_pipeline_index().pipeline_snapshot return [ DauphinMode(pipeline_snapshot.config_schema_snapshot, mode_def_snap) for mode_def_snap in sorted( pipeline_snapshot.mode_def_snaps, key=lambda item: item.name ) ] def resolve_solid_handle(self, _graphene_info, handleID): return _get_solid_handles(self.get_pipeline_index()).get(handleID) def resolve_solid_handles(self, _graphene_info, **kwargs): handles = _get_solid_handles(self.get_pipeline_index()) parentHandleID = kwargs.get('parentHandleID') if parentHandleID == "": handles = {key: handle for key, handle in handles.items() if not handle.parent} elif parentHandleID is not None: handles = { key: handle for key, handle in handles.items() if handle.parent and handle.parent.handleID.to_string() == parentHandleID } return [handles[key] for key in sorted(handles)] def resolve_tags(self, graphene_info): return [ graphene_info.schema.type_named('PipelineTag')(key=key, value=value) for key, value in self.get_pipeline_index().pipeline_snapshot.tags.items() ]
class DauphinQuery(dauphin.ObjectType): class Meta: name = 'Query' version = dauphin.NonNull(dauphin.String) reloadSupported = dauphin.NonNull(dauphin.Boolean) pipelineOrError = dauphin.Field( dauphin.NonNull('PipelineOrError'), params=dauphin.NonNull('ExecutionSelector')) pipeline = dauphin.Field(dauphin.NonNull('Pipeline'), params=dauphin.NonNull('ExecutionSelector')) pipelinesOrError = dauphin.NonNull('PipelinesOrError') pipelines = dauphin.Field(dauphin.NonNull('PipelineConnection')) configTypeOrError = dauphin.Field( dauphin.NonNull('ConfigTypeOrError'), pipelineName=dauphin.Argument(dauphin.NonNull(dauphin.String)), configTypeName=dauphin.Argument(dauphin.NonNull(dauphin.String)), mode=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) runtimeTypeOrError = dauphin.Field( dauphin.NonNull('RuntimeTypeOrError'), pipelineName=dauphin.Argument(dauphin.NonNull(dauphin.String)), runtimeTypeName=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) scheduler = dauphin.Field(dauphin.NonNull('SchedulerOrError')) pipelineRunsOrError = dauphin.Field( dauphin.NonNull('PipelineRunsOrError'), filter=dauphin.Argument(dauphin.NonNull('PipelineRunsFilter')), cursor=dauphin.String(), limit=dauphin.Int(), ) pipelineRunOrError = dauphin.Field(dauphin.NonNull('PipelineRunOrError'), runId=dauphin.NonNull(dauphin.ID)) pipelineRunTags = dauphin.non_null_list('PipelineTagAndValues') isPipelineConfigValid = dauphin.Field( dauphin.NonNull('PipelineConfigValidationResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'environmentConfigData': dauphin.Argument('EnvironmentConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) executionPlan = dauphin.Field( dauphin.NonNull('ExecutionPlanResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'environmentConfigData': dauphin.Argument('EnvironmentConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) environmentSchemaOrError = dauphin.Field( dauphin.NonNull('EnvironmentSchemaOrError'), args={ 'selector': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'mode': dauphin.Argument(dauphin.String), }, description= '''Fetch an environment schema given an execution selection and a mode. See the descripton on EnvironmentSchema for more information.''', ) instance = dauphin.NonNull('Instance') def resolve_configTypeOrError(self, graphene_info, **kwargs): return get_config_type(graphene_info, kwargs['pipelineName'], kwargs['configTypeName'], kwargs.get('mode')) def resolve_runtimeTypeOrError(self, graphene_info, **kwargs): return get_runtime_type(graphene_info, kwargs['pipelineName'], kwargs['runtimeTypeName']) def resolve_version(self, graphene_info): return graphene_info.context.version def resolve_reloadSupported(self, graphene_info): return graphene_info.context.reloader.is_reload_supported def resolve_scheduler(self, graphene_info): return get_scheduler_or_error(graphene_info) def resolve_pipelineOrError(self, graphene_info, **kwargs): return get_pipeline_or_error(graphene_info, kwargs['params'].to_selector()) def resolve_pipeline(self, graphene_info, **kwargs): return get_pipeline_or_raise(graphene_info, kwargs['params'].to_selector()) def resolve_pipelinesOrError(self, graphene_info): return get_pipelines_or_error(graphene_info) def resolve_pipelines(self, graphene_info): return get_pipelines_or_raise(graphene_info) def resolve_pipelineRunsOrError(self, graphene_info, **kwargs): filters = kwargs['filter'].to_selector() provided = [ i for i in [ filters.run_id, filters.pipeline, filters.tag_key, filters.status ] if i ] if len(provided) > 1: return graphene_info.schema.type_named( 'InvalidPipelineRunsFilterError')( message="You may only provide one of the filter options.") return graphene_info.schema.type_named('PipelineRuns')( results=get_runs(graphene_info, filters, kwargs.get('cursor'), kwargs.get('limit'))) def resolve_pipelineRunOrError(self, graphene_info, runId): return get_run(graphene_info, runId) def resolve_pipelineRunTags(self, graphene_info): return get_run_tags(graphene_info) def resolve_isPipelineConfigValid(self, graphene_info, pipeline, **kwargs): return validate_pipeline_config( graphene_info, pipeline.to_selector(), kwargs.get('environmentConfigData'), kwargs.get('mode'), ) def resolve_executionPlan(self, graphene_info, pipeline, **kwargs): return get_execution_plan( graphene_info, pipeline.to_selector(), kwargs.get('environmentConfigData'), kwargs.get('mode'), ) def resolve_environmentSchemaOrError(self, graphene_info, **kwargs): return resolve_environment_schema_or_error( graphene_info, kwargs['selector'].to_selector(), kwargs.get('mode')) def resolve_instance(self, graphene_info): return graphene_info.schema.type_named('Instance')( graphene_info.context.instance)
class DauphinPipelineRun(dauphin.ObjectType): class Meta(object): name = "PipelineRun" runId = dauphin.NonNull(dauphin.String) # Nullable because of historical runs pipelineSnapshotId = dauphin.String() status = dauphin.NonNull("PipelineRunStatus") pipeline = dauphin.NonNull("PipelineReference") pipelineName = dauphin.NonNull(dauphin.String) solidSelection = dauphin.List(dauphin.NonNull(dauphin.String)) stats = dauphin.NonNull("PipelineRunStatsOrError") stepStats = dauphin.non_null_list("PipelineRunStepStats") computeLogs = dauphin.Field( dauphin.NonNull("ComputeLogs"), stepKey=dauphin.Argument(dauphin.NonNull(dauphin.String)), description=""" Compute logs are the stdout/stderr logs for a given solid step computation """, ) executionPlan = dauphin.Field("ExecutionPlan") stepKeysToExecute = dauphin.List(dauphin.NonNull(dauphin.String)) runConfigYaml = dauphin.NonNull(dauphin.String) mode = dauphin.NonNull(dauphin.String) tags = dauphin.non_null_list("PipelineTag") rootRunId = dauphin.Field(dauphin.String) parentRunId = dauphin.Field(dauphin.String) canTerminate = dauphin.NonNull(dauphin.Boolean) assets = dauphin.non_null_list("Asset") def __init__(self, pipeline_run): super(DauphinPipelineRun, self).__init__(runId=pipeline_run.run_id, status=pipeline_run.status, mode=pipeline_run.mode) self._pipeline_run = check.inst_param(pipeline_run, "pipeline_run", PipelineRun) def resolve_pipeline(self, graphene_info): return get_pipeline_reference_or_raise( graphene_info, self._pipeline_run, ) def resolve_pipelineName(self, _graphene_info): return self._pipeline_run.pipeline_name def resolve_solidSelection(self, _graphene_info): return self._pipeline_run.solid_selection def resolve_pipelineSnapshotId(self, _): return self._pipeline_run.pipeline_snapshot_id def resolve_stats(self, graphene_info): return get_stats(graphene_info, self.run_id) def resolve_stepStats(self, graphene_info): return get_step_stats(graphene_info, self.run_id) def resolve_computeLogs(self, graphene_info, stepKey): return graphene_info.schema.type_named("ComputeLogs")( runId=self.run_id, stepKey=stepKey) def resolve_executionPlan(self, graphene_info): if not (self._pipeline_run.execution_plan_snapshot_id and self._pipeline_run.pipeline_snapshot_id): return None from .execution import DauphinExecutionPlan instance = graphene_info.context.instance historical_pipeline = instance.get_historical_pipeline( self._pipeline_run.pipeline_snapshot_id) execution_plan_snapshot = instance.get_execution_plan_snapshot( self._pipeline_run.execution_plan_snapshot_id) return (DauphinExecutionPlan( ExternalExecutionPlan( execution_plan_snapshot=execution_plan_snapshot, represented_pipeline=historical_pipeline, )) if execution_plan_snapshot and historical_pipeline else None) def resolve_stepKeysToExecute(self, _): return self._pipeline_run.step_keys_to_execute def resolve_runConfigYaml(self, _graphene_info): return yaml.dump(self._pipeline_run.run_config, default_flow_style=False) def resolve_tags(self, graphene_info): return [ graphene_info.schema.type_named("PipelineTag")(key=key, value=value) for key, value in self._pipeline_run.tags.items() if get_tag_type(key) != TagType.HIDDEN ] def resolve_rootRunId(self, _): return self._pipeline_run.root_run_id def resolve_parentRunId(self, _): return self._pipeline_run.parent_run_id @property def run_id(self): return self.runId def resolve_canTerminate(self, graphene_info): # short circuit if the pipeline run is in a terminal state if self._pipeline_run.is_finished: return False return graphene_info.context.instance.run_launcher.can_terminate( self.run_id) def resolve_assets(self, graphene_info): return get_assets_for_run_id(graphene_info, self.run_id)
class DauphinEnumConfigValue(dauphin.ObjectType): class Meta: name = 'EnumConfigValue' value = dauphin.NonNull(dauphin.String) description = dauphin.String()
class DauphinExecutionMetadata(dauphin.InputObjectType): class Meta: name = 'ExecutionMetadata' runId = dauphin.String() tags = dauphin.List(dauphin.NonNull(DauphinExecutionTag))
class DauphinPipeline(dauphin.ObjectType): class Meta: name = 'Pipeline' interfaces = [DauphinSolidContainer] name = dauphin.NonNull(dauphin.String) description = dauphin.String() solids = dauphin.non_null_list('Solid') environment_type = dauphin.Field( dauphin.NonNull('ConfigType'), mode=dauphin.String(required=False) ) config_types = dauphin.Field( dauphin.non_null_list('ConfigType'), mode=dauphin.String(required=False) ) runtime_types = dauphin.non_null_list('RuntimeType') runs = dauphin.non_null_list('PipelineRun') modes = dauphin.non_null_list('Mode') solid_handles = dauphin.non_null_list('SolidHandle') presets = dauphin.non_null_list('PipelinePreset') def __init__(self, pipeline): super(DauphinPipeline, self).__init__(name=pipeline.name, description=pipeline.description) self._pipeline = check.inst_param(pipeline, 'pipeline', PipelineDefinition) def resolve_solids(self, _graphene_info): return build_dauphin_solids(self._pipeline) def resolve_environment_type(self, _graphene_info, mode=None): return to_dauphin_config_type(create_environment_type(self._pipeline, mode)) def resolve_config_types(self, _graphene_info, mode=None): environment_schema = create_environment_schema(self._pipeline, mode) return sorted( list(map(to_dauphin_config_type, environment_schema.all_config_types())), key=lambda config_type: config_type.key, ) def resolve_runtime_types(self, _graphene_info): return sorted( list( map( to_dauphin_runtime_type, [t for t in self._pipeline.all_runtime_types() if t.name], ) ), key=lambda config_type: config_type.name, ) def resolve_runs(self, graphene_info): return [ graphene_info.schema.type_named('PipelineRun')(r) for r in graphene_info.context.pipeline_runs.all_runs_for_pipeline(self._pipeline.name) ] def get_dagster_pipeline(self): return self._pipeline def get_type(self, _graphene_info, typeName): if self._pipeline.has_config_type(typeName): return to_dauphin_config_type(self._pipeline.config_type_named(typeName)) elif self._pipeline.has_runtime_type(typeName): return to_dauphin_runtime_type(self._pipeline.runtime_type_named(typeName)) else: check.failed('Not a config type or runtime type') def resolve_modes(self, graphene_info): return [ graphene_info.schema.type_named('Mode')(mode_definition) for mode_definition in sorted( self._pipeline.mode_definitions, key=lambda item: item.name ) ] def resolve_solid_handles(self, _graphene_info): return sorted( build_dauphin_solid_handles(self._pipeline), key=lambda item: str(item.handleID) ) def resolve_presets(self, _graphene_info): return [ DauphinPipelinePreset(preset, self._pipeline.name) for preset in sorted(self._pipeline.get_presets(), key=lambda item: item.name) ]
class DauphinQuery(dauphin.ObjectType): class Meta(object): name = 'Query' version = dauphin.NonNull(dauphin.String) reloadSupported = dauphin.NonNull(dauphin.Boolean) pipelineOrError = dauphin.Field( dauphin.NonNull('PipelineOrError'), params=dauphin.NonNull('ExecutionSelector')) pipeline = dauphin.Field(dauphin.NonNull('Pipeline'), params=dauphin.NonNull('ExecutionSelector')) pipelinesOrError = dauphin.NonNull('PipelinesOrError') pipelines = dauphin.Field(dauphin.NonNull('PipelineConnection')) pipelineSnapshot = dauphin.Field( dauphin.NonNull('PipelineSnapshot'), snapshotId=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) runtimeTypeOrError = dauphin.Field( dauphin.NonNull('RuntimeTypeOrError'), pipelineName=dauphin.Argument(dauphin.NonNull(dauphin.String)), runtimeTypeName=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) scheduler = dauphin.Field(dauphin.NonNull('SchedulerOrError')) scheduleOrError = dauphin.Field( dauphin.NonNull('ScheduleOrError'), schedule_name=dauphin.NonNull(dauphin.String), limit=dauphin.Int(), ) partitionSetsOrError = dauphin.Field( dauphin.NonNull('PartitionSetsOrError'), pipelineName=dauphin.String()) partitionSetOrError = dauphin.Field(dauphin.NonNull('PartitionSetOrError'), partitionSetName=dauphin.String()) pipelineRunsOrError = dauphin.Field( dauphin.NonNull('PipelineRunsOrError'), filter=dauphin.Argument('PipelineRunsFilter'), cursor=dauphin.String(), limit=dauphin.Int(), ) pipelineRunOrError = dauphin.Field(dauphin.NonNull('PipelineRunOrError'), runId=dauphin.NonNull(dauphin.ID)) pipelineRunTags = dauphin.non_null_list('PipelineTagAndValues') usedSolids = dauphin.Field(dauphin.non_null_list('UsedSolid')) usedSolid = dauphin.Field('UsedSolid', name=dauphin.NonNull(dauphin.String)) isPipelineConfigValid = dauphin.Field( dauphin.NonNull('PipelineConfigValidationResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'environmentConfigData': dauphin.Argument('EnvironmentConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) executionPlan = dauphin.Field( dauphin.NonNull('ExecutionPlanResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'environmentConfigData': dauphin.Argument('EnvironmentConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) environmentSchemaOrError = dauphin.Field( dauphin.NonNull('EnvironmentSchemaOrError'), args={ 'selector': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'mode': dauphin.Argument(dauphin.String), }, description= '''Fetch an environment schema given an execution selection and a mode. See the descripton on EnvironmentSchema for more information.''', ) instance = dauphin.NonNull('Instance') def resolve_pipelineSnapshot(self, graphene_info, **kwargs): return get_pipeline_snapshot_or_error(graphene_info, kwargs['snapshotId']) def resolve_runtimeTypeOrError(self, graphene_info, **kwargs): return get_dagster_type(graphene_info, kwargs['pipelineName'], kwargs['runtimeTypeName']) def resolve_version(self, graphene_info): return graphene_info.context.version def resolve_reloadSupported(self, graphene_info): if isinstance(graphene_info.context, DagsterSnapshotGraphQLContext): return False return graphene_info.context.reloader.is_reload_supported def resolve_scheduler(self, graphene_info): return get_scheduler_or_error(graphene_info) def resolve_scheduleOrError(self, graphene_info, schedule_name): return get_schedule_or_error(graphene_info, schedule_name) def resolve_pipelineOrError(self, graphene_info, **kwargs): return get_pipeline_or_error(graphene_info, kwargs['params'].to_selector()) def resolve_pipeline(self, graphene_info, **kwargs): return get_pipeline_or_raise(graphene_info, kwargs['params'].to_selector()) def resolve_pipelinesOrError(self, graphene_info): return get_pipelines_or_error(graphene_info) def resolve_pipelines(self, graphene_info): return get_pipelines_or_raise(graphene_info) def resolve_pipelineRunsOrError(self, graphene_info, **kwargs): filters = kwargs.get('filter') if filters is not None: filters = filters.to_selector() return graphene_info.schema.type_named('PipelineRuns')( results=get_runs(graphene_info, filters, kwargs.get('cursor'), kwargs.get('limit'))) def resolve_pipelineRunOrError(self, graphene_info, runId): return get_run(graphene_info, runId) def resolve_partitionSetsOrError(self, graphene_info, **kwargs): pipeline_name = kwargs.get('pipelineName') return get_partition_sets_or_error(graphene_info, pipeline_name) def resolve_partitionSetOrError(self, graphene_info, partitionSetName): return get_partition_set(graphene_info, partitionSetName) def resolve_pipelineRunTags(self, graphene_info): return get_run_tags(graphene_info) def resolve_usedSolid(self, graphene_info, name): return get_solid(graphene_info, name) def resolve_usedSolids(self, graphene_info): return get_solids(graphene_info) def resolve_isPipelineConfigValid(self, graphene_info, pipeline, **kwargs): return validate_pipeline_config( graphene_info, pipeline.to_selector(), kwargs.get('environmentConfigData'), kwargs.get('mode'), ) def resolve_executionPlan(self, graphene_info, pipeline, **kwargs): return get_execution_plan( graphene_info, pipeline.to_selector(), kwargs.get('environmentConfigData'), kwargs.get('mode'), ) def resolve_environmentSchemaOrError(self, graphene_info, **kwargs): return resolve_environment_schema_or_error( graphene_info, kwargs['selector'].to_selector(), kwargs.get('mode')) def resolve_instance(self, graphene_info): return graphene_info.schema.type_named('Instance')( graphene_info.context.instance)
class DauphinPartitionSet(dauphin.ObjectType): class Meta(object): name = 'PartitionSet' name = dauphin.NonNull(dauphin.String) pipeline_name = dauphin.NonNull(dauphin.String) solid_subset = dauphin.List(dauphin.NonNull(dauphin.String)) mode = dauphin.NonNull(dauphin.String) partitions = dauphin.Field( dauphin.NonNull('Partitions'), cursor=dauphin.String(), limit=dauphin.Int(), reverse=dauphin.Boolean(), ) def __init__(self, partition_set): self._partition_set = check.inst_param(partition_set, 'partition_set', PartitionSetDefinition) super(DauphinPartitionSet, self).__init__( name=partition_set.name, pipeline_name=partition_set.pipeline_name, solid_subset=partition_set.solid_subset, mode=partition_set.mode, ) def resolve_partitions(self, graphene_info, **kwargs): partitions = self._partition_set.get_partitions() cursor = kwargs.get("cursor") limit = kwargs.get("limit") reverse = kwargs.get('reverse') start = 0 end = len(partitions) index = 0 if cursor: index = next( (idx for (idx, partition) in enumerate(partitions) if partition.name == cursor), None, ) if reverse: end = index else: start = index + 1 if limit: if reverse: start = end - limit else: end = start + limit partitions = partitions[start:end] return graphene_info.schema.type_named('Partitions')(results=[ graphene_info.schema.type_named('Partition')( partition=partition, partition_set=self._partition_set) for partition in partitions ])