class DauphinSubscription(dauphin.ObjectType): class Meta: name = 'Subscription' pipelineRunLogs = dauphin.Field( dauphin.NonNull('PipelineRunLogsSubscriptionPayload'), runId=dauphin.Argument(dauphin.NonNull(dauphin.ID)), after=dauphin.Argument('Cursor'), ) def resolve_pipelineRunLogs(self, graphene_info, runId, after=None): return get_pipeline_run_observable(graphene_info, runId, after)
class DauphinDaemonHealth(dauphin.ObjectType): class Meta: name = "DaemonHealth" daemonStatus = dauphin.Field(dauphin.NonNull("DaemonStatus"), daemon_type=dauphin.Argument("DaemonType")) allDaemonStatuses = dauphin.non_null_list("DaemonStatus") def __init__(self, instance): from dagster.daemon.controller import get_daemon_status self._daemon_statuses = { DauphinDaemonType.SCHEDULER.value: get_daemon_status( # pylint: disable=no-member instance, DaemonType.SCHEDULER), DauphinDaemonType.SENSOR.value: get_daemon_status( # pylint: disable=no-member instance, DaemonType.SENSOR), DauphinDaemonType.QUEUED_RUN_COORDINATOR.value: get_daemon_status( # pylint: disable=no-member instance, DaemonType.QUEUED_RUN_COORDINATOR), } def resolve_daemonStatus(self, _graphene_info, daemon_type): check.str_param(daemon_type, "daemon_type") # DauphinDaemonType return _graphene_info.schema.type_named("DaemonStatus")( self._daemon_statuses[daemon_type]) def resolve_allDaemonStatuses(self, _graphene_info): return [ _graphene_info.schema.type_named("DaemonStatus")(daemon_status) for daemon_status in self._daemon_statuses.values() ]
class DauphinPipelineRun(dauphin.ObjectType): class Meta: name = 'PipelineRun' runId = dauphin.NonNull(dauphin.String) status = dauphin.NonNull('PipelineRunStatus') pipeline = dauphin.NonNull('Pipeline') logs = dauphin.NonNull('LogMessageConnection') computeLogs = dauphin.Field( dauphin.NonNull('ComputeLogs'), stepKey=dauphin.Argument(dauphin.NonNull(dauphin.String)), description=''' Compute logs are the stdout/stderr logs for a given solid step computation ''', ) executionPlan = dauphin.NonNull('ExecutionPlan') stepKeysToExecute = dauphin.List(dauphin.String) environmentConfigYaml = dauphin.NonNull(dauphin.String) mode = dauphin.NonNull(dauphin.String) def __init__(self, pipeline_run): super(DauphinPipelineRun, self).__init__( runId=pipeline_run.run_id, status=pipeline_run.status, mode=pipeline_run.mode ) self._pipeline_run = check.inst_param(pipeline_run, 'pipeline_run', PipelineRun) def resolve_pipeline(self, graphene_info): return get_pipeline_or_raise(graphene_info, self._pipeline_run.selector) def resolve_logs(self, graphene_info): return graphene_info.schema.type_named('LogMessageConnection')(self._pipeline_run) def resolve_computeLogs(self, graphene_info, stepKey): update = fetch_compute_logs(graphene_info.context.instance, self.run_id, stepKey) return graphene_info.schema.type_named('ComputeLogs')( runId=self.run_id, stepKey=stepKey, stdout=update.stdout, stderr=update.stderr, cursor=update.cursor, ) def resolve_executionPlan(self, graphene_info): pipeline = self.resolve_pipeline(graphene_info) execution_plan = create_execution_plan( pipeline.get_dagster_pipeline(), self._pipeline_run.environment_dict, RunConfig(mode=self._pipeline_run.mode), ) return graphene_info.schema.type_named('ExecutionPlan')(pipeline, execution_plan) def resolve_stepKeysToExecute(self, _): return self._pipeline_run.step_keys_to_execute def resolve_environmentConfigYaml(self, _graphene_info): return yaml.dump(self._pipeline_run.environment_dict, default_flow_style=False) @property def run_id(self): return self.runId
class DauphinIPipelineSnapshot(dauphin.Interface): class Meta: name = "IPipelineSnapshot" name = dauphin.NonNull(dauphin.String) description = dauphin.String() pipeline_snapshot_id = dauphin.NonNull(dauphin.String) dagster_types = dauphin.non_null_list("DagsterType") dagster_type_or_error = dauphin.Field( dauphin.NonNull("DagsterTypeOrError"), dagsterTypeName=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) solids = dauphin.non_null_list("Solid") modes = dauphin.non_null_list("Mode") solid_handles = dauphin.Field(dauphin.non_null_list("SolidHandle"), parentHandleID=dauphin.String()) solid_handle = dauphin.Field( "SolidHandle", handleID=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) tags = dauphin.non_null_list("PipelineTag")
class DauphinIPipelineSnapshot(dauphin.Interface): class Meta(object): name = 'IPipelineSnapshot' name = dauphin.NonNull(dauphin.String) description = dauphin.String() pipeline_snapshot_id = dauphin.NonNull(dauphin.String) dagster_types = dauphin.non_null_list('DagsterType') dagster_type_or_error = dauphin.Field( dauphin.NonNull('DagsterTypeOrError'), dagsterTypeName=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) solids = dauphin.non_null_list('Solid') modes = dauphin.non_null_list('Mode') solid_handles = dauphin.Field(dauphin.non_null_list('SolidHandle'), parentHandleID=dauphin.String()) solid_handle = dauphin.Field( 'SolidHandle', handleID=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) tags = dauphin.non_null_list('PipelineTag')
class DauphinSubscription(dauphin.ObjectType): class Meta(object): name = 'Subscription' pipelineRunLogs = dauphin.Field( dauphin.NonNull('PipelineRunLogsSubscriptionPayload'), runId=dauphin.Argument(dauphin.NonNull(dauphin.ID)), after=dauphin.Argument('Cursor'), ) computeLogs = dauphin.Field( dauphin.NonNull('ComputeLogFile'), runId=dauphin.Argument(dauphin.NonNull(dauphin.ID)), stepKey=dauphin.Argument(dauphin.NonNull(dauphin.String)), ioType=dauphin.Argument(dauphin.NonNull('ComputeIOType')), cursor=dauphin.Argument(dauphin.String), ) def resolve_pipelineRunLogs(self, graphene_info, runId, after=None): return get_pipeline_run_observable(graphene_info, runId, after) def resolve_computeLogs(self, graphene_info, runId, stepKey, ioType, cursor=None): check.str_param(ioType, 'ioType') # need to resolve to enum return get_compute_log_observable(graphene_info, runId, stepKey, ComputeIOType(ioType), cursor)
class DauphinIPipelineSnapshot(dauphin.Interface): class Meta(object): name = 'IPipelineSnapshot' name = dauphin.NonNull(dauphin.String) description = dauphin.String() runtime_types = dauphin.non_null_list('RuntimeType') solids = dauphin.non_null_list('Solid') runs = dauphin.non_null_list('PipelineRun') modes = dauphin.non_null_list('Mode') solid_handles = dauphin.Field(dauphin.non_null_list('SolidHandle'), parentHandleID=dauphin.String()) solid_handle = dauphin.Field( 'SolidHandle', handleID=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) tags = dauphin.non_null_list('PipelineTag')
class DauphinSubscription(dauphin.ObjectType): class Meta: name = "Subscription" pipelineRunLogs = dauphin.Field( dauphin.NonNull("PipelineRunLogsSubscriptionPayload"), runId=dauphin.Argument(dauphin.NonNull(dauphin.ID)), after=dauphin.Argument("Cursor"), ) computeLogs = dauphin.Field( dauphin.NonNull("ComputeLogFile"), runId=dauphin.Argument(dauphin.NonNull(dauphin.ID)), stepKey=dauphin.Argument(dauphin.NonNull(dauphin.String)), ioType=dauphin.Argument(dauphin.NonNull("ComputeIOType")), cursor=dauphin.Argument(dauphin.String), ) locationStateChangeEvents = dauphin.Field( dauphin.NonNull("LocationStateChangeSubscription")) def resolve_pipelineRunLogs(self, graphene_info, runId, after=None): return get_pipeline_run_observable(graphene_info, runId, after) def resolve_computeLogs(self, graphene_info, runId, stepKey, ioType, cursor=None): check.str_param(ioType, "ioType") # need to resolve to enum return get_compute_log_observable(graphene_info, runId, stepKey, ComputeIOType(ioType), cursor) def resolve_locationStateChangeEvents(self, graphene_info): return get_location_state_change_observable(graphene_info)
class DauphinQuery(dauphin.ObjectType): class Meta(object): name = 'Query' version = dauphin.NonNull(dauphin.String) reloadSupported = dauphin.NonNull(dauphin.Boolean) pipelineOrError = dauphin.Field( dauphin.NonNull('PipelineOrError'), params=dauphin.NonNull('ExecutionSelector')) pipeline = dauphin.Field(dauphin.NonNull('Pipeline'), params=dauphin.NonNull('ExecutionSelector')) pipelinesOrError = dauphin.NonNull('PipelinesOrError') pipelines = dauphin.Field(dauphin.NonNull('PipelineConnection')) runtimeTypeOrError = dauphin.Field( dauphin.NonNull('RuntimeTypeOrError'), pipelineName=dauphin.Argument(dauphin.NonNull(dauphin.String)), runtimeTypeName=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) scheduler = dauphin.Field(dauphin.NonNull('SchedulerOrError')) scheduleOrError = dauphin.Field( dauphin.NonNull('ScheduleOrError'), schedule_name=dauphin.NonNull(dauphin.String), limit=dauphin.Int(), ) partitionSetsOrError = dauphin.Field( dauphin.NonNull('PartitionSetsOrError'), pipelineName=dauphin.String()) partitionSetOrError = dauphin.Field(dauphin.NonNull('PartitionSetOrError'), partitionSetName=dauphin.String()) pipelineRunsOrError = dauphin.Field( dauphin.NonNull('PipelineRunsOrError'), filter=dauphin.Argument('PipelineRunsFilter'), cursor=dauphin.String(), limit=dauphin.Int(), ) pipelineRunOrError = dauphin.Field(dauphin.NonNull('PipelineRunOrError'), runId=dauphin.NonNull(dauphin.ID)) pipelineRunTags = dauphin.non_null_list('PipelineTagAndValues') usedSolids = dauphin.Field(dauphin.non_null_list('UsedSolid')) usedSolid = dauphin.Field('UsedSolid', name=dauphin.NonNull(dauphin.String)) isPipelineConfigValid = dauphin.Field( dauphin.NonNull('PipelineConfigValidationResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'environmentConfigData': dauphin.Argument('EnvironmentConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) executionPlan = dauphin.Field( dauphin.NonNull('ExecutionPlanResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'environmentConfigData': dauphin.Argument('EnvironmentConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) environmentSchemaOrError = dauphin.Field( dauphin.NonNull('EnvironmentSchemaOrError'), args={ 'selector': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'mode': dauphin.Argument(dauphin.String), }, description= '''Fetch an environment schema given an execution selection and a mode. See the descripton on EnvironmentSchema for more information.''', ) instance = dauphin.NonNull('Instance') def resolve_runtimeTypeOrError(self, graphene_info, **kwargs): return get_dagster_type(graphene_info, kwargs['pipelineName'], kwargs['runtimeTypeName']) def resolve_version(self, graphene_info): return graphene_info.context.version def resolve_reloadSupported(self, graphene_info): return graphene_info.context.reloader.is_reload_supported def resolve_scheduler(self, graphene_info): return get_scheduler_or_error(graphene_info) def resolve_scheduleOrError(self, graphene_info, schedule_name): return get_schedule_or_error(graphene_info, schedule_name) def resolve_pipelineOrError(self, graphene_info, **kwargs): return get_pipeline_or_error(graphene_info, kwargs['params'].to_selector()) def resolve_pipeline(self, graphene_info, **kwargs): return get_pipeline_or_raise(graphene_info, kwargs['params'].to_selector()) def resolve_pipelinesOrError(self, graphene_info): return get_pipelines_or_error(graphene_info) def resolve_pipelines(self, graphene_info): return get_pipelines_or_raise(graphene_info) def resolve_pipelineRunsOrError(self, graphene_info, **kwargs): filters = kwargs.get('filter') if filters is not None: filters = filters.to_selector() return graphene_info.schema.type_named('PipelineRuns')( results=get_runs(graphene_info, filters, kwargs.get('cursor'), kwargs.get('limit'))) def resolve_pipelineRunOrError(self, graphene_info, runId): return get_run(graphene_info, runId) def resolve_partitionSetsOrError(self, graphene_info, **kwargs): pipeline_name = kwargs.get('pipelineName') return get_partition_sets_or_error(graphene_info, pipeline_name) def resolve_partitionSetOrError(self, graphene_info, partitionSetName): return get_partition_set(graphene_info, partitionSetName) def resolve_pipelineRunTags(self, graphene_info): return get_run_tags(graphene_info) def resolve_usedSolid(self, graphene_info, name): repository = graphene_info.context.repository_definition invocations = [] definition = None for pipeline in repository.get_all_pipelines(): for handle in build_dauphin_solid_handles( pipeline.get_pipeline_snapshot(), pipeline): if handle.handleID.definition_name == name: if definition is None: definition = handle.solid.resolve_definition( graphene_info) invocations.append( DauphinSolidInvocationSite(pipeline=pipeline, solidHandle=handle)) return DauphinUsedSolid(definition=definition, invocations=invocations) def resolve_usedSolids(self, graphene_info): repository = graphene_info.context.repository_definition inv_by_def_name = defaultdict(list) definitions = [] for pipeline in repository.get_all_pipelines(): for handle in build_dauphin_solid_handles( pipeline.get_pipeline_snapshot(), pipeline): definition = handle.solid.resolve_definition(graphene_info) if definition.name not in inv_by_def_name: definitions.append(definition) inv_by_def_name[definition.name].append( DauphinSolidInvocationSite(pipeline=pipeline, solidHandle=handle)) return map( lambda d: DauphinUsedSolid( definition=d, invocations=sorted(inv_by_def_name[d.name], key=lambda i: i.solidHandle.handleID), ), sorted(definitions, key=lambda d: d.name), ) def resolve_isPipelineConfigValid(self, graphene_info, pipeline, **kwargs): return validate_pipeline_config( graphene_info, pipeline.to_selector(), kwargs.get('environmentConfigData'), kwargs.get('mode'), ) def resolve_executionPlan(self, graphene_info, pipeline, **kwargs): return get_execution_plan( graphene_info, pipeline.to_selector(), kwargs.get('environmentConfigData'), kwargs.get('mode'), ) def resolve_environmentSchemaOrError(self, graphene_info, **kwargs): return resolve_environment_schema_or_error( graphene_info, kwargs['selector'].to_selector(), kwargs.get('mode')) def resolve_instance(self, graphene_info): return graphene_info.schema.type_named('Instance')( graphene_info.context.instance)
class DauphinQuery(dauphin.ObjectType): class Meta(object): name = 'Query' version = dauphin.NonNull(dauphin.String) reloadSupported = dauphin.NonNull(dauphin.Boolean) pipelineOrError = dauphin.Field( dauphin.NonNull('PipelineOrError'), params=dauphin.NonNull('ExecutionSelector')) pipeline = dauphin.Field(dauphin.NonNull('Pipeline'), params=dauphin.NonNull('ExecutionSelector')) pipelinesOrError = dauphin.NonNull('PipelinesOrError') pipelines = dauphin.Field(dauphin.NonNull('PipelineConnection')) pipelineSnapshot = dauphin.Field( dauphin.NonNull('PipelineSnapshot'), snapshotId=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) pipelineSnapshotOrError = dauphin.Field( dauphin.NonNull('PipelineSnapshotOrError'), snapshotId=dauphin.String(), activePipelineName=dauphin.String(), ) scheduler = dauphin.Field(dauphin.NonNull('SchedulerOrError')) scheduleOrError = dauphin.Field( dauphin.NonNull('ScheduleOrError'), schedule_name=dauphin.NonNull(dauphin.String), limit=dauphin.Int(), ) partitionSetsOrError = dauphin.Field( dauphin.NonNull('PartitionSetsOrError'), pipelineName=dauphin.String()) partitionSetOrError = dauphin.Field(dauphin.NonNull('PartitionSetOrError'), partitionSetName=dauphin.String()) pipelineRunsOrError = dauphin.Field( dauphin.NonNull('PipelineRunsOrError'), filter=dauphin.Argument('PipelineRunsFilter'), cursor=dauphin.String(), limit=dauphin.Int(), ) pipelineRunOrError = dauphin.Field(dauphin.NonNull('PipelineRunOrError'), runId=dauphin.NonNull(dauphin.ID)) pipelineRunTags = dauphin.non_null_list('PipelineTagAndValues') usedSolids = dauphin.Field(dauphin.non_null_list('UsedSolid')) usedSolid = dauphin.Field('UsedSolid', name=dauphin.NonNull(dauphin.String)) isPipelineConfigValid = dauphin.Field( dauphin.NonNull('PipelineConfigValidationResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'environmentConfigData': dauphin.Argument('EnvironmentConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) executionPlan = dauphin.Field( dauphin.NonNull('ExecutionPlanResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'environmentConfigData': dauphin.Argument('EnvironmentConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) environmentSchemaOrError = dauphin.Field( dauphin.NonNull('EnvironmentSchemaOrError'), args={ 'selector': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'mode': dauphin.Argument(dauphin.String), }, description= '''Fetch an environment schema given an execution selection and a mode. See the descripton on EnvironmentSchema for more information.''', ) instance = dauphin.NonNull('Instance') def resolve_pipelineSnapshot(self, graphene_info, **kwargs): return get_pipeline_snapshot(graphene_info, kwargs['snapshotId']) def resolve_pipelineSnapshotOrError(self, graphene_info, **kwargs): snapshot_id_arg = kwargs.get('snapshotId') pipeline_name_arg = kwargs.get('activePipelineName') check.invariant( not (snapshot_id_arg and pipeline_name_arg), 'Cannot pass both snapshotId and activePipelineName', ) check.invariant(snapshot_id_arg or pipeline_name_arg, 'Must set one of snapshotId or activePipelineName') if pipeline_name_arg: return get_pipeline_snapshot_or_error_from_pipeline_name( graphene_info, pipeline_name_arg) else: return get_pipeline_snapshot_or_error_from_snapshot_id( graphene_info, snapshot_id_arg) def resolve_version(self, graphene_info): return graphene_info.context.version def resolve_reloadSupported(self, graphene_info): if isinstance(graphene_info.context, DagsterSnapshotGraphQLContext): return False return graphene_info.context.reloader.is_reload_supported def resolve_scheduler(self, graphene_info): return get_scheduler_or_error(graphene_info) def resolve_scheduleOrError(self, graphene_info, schedule_name): return get_schedule_or_error(graphene_info, schedule_name) def resolve_pipelineOrError(self, graphene_info, **kwargs): return get_pipeline_or_error(graphene_info, kwargs['params'].to_selector()) def resolve_pipeline(self, graphene_info, **kwargs): return get_pipeline_or_raise(graphene_info, kwargs['params'].to_selector()) def resolve_pipelinesOrError(self, graphene_info): return get_pipelines_or_error(graphene_info) def resolve_pipelines(self, graphene_info): return get_pipelines_or_raise(graphene_info) def resolve_pipelineRunsOrError(self, graphene_info, **kwargs): filters = kwargs.get('filter') if filters is not None: filters = filters.to_selector() return graphene_info.schema.type_named('PipelineRuns')( results=get_runs(graphene_info, filters, kwargs.get('cursor'), kwargs.get('limit'))) def resolve_pipelineRunOrError(self, graphene_info, runId): return get_run_by_id(graphene_info, runId) def resolve_partitionSetsOrError(self, graphene_info, **kwargs): pipeline_name = kwargs.get('pipelineName') return get_partition_sets_or_error(graphene_info, pipeline_name) def resolve_partitionSetOrError(self, graphene_info, partitionSetName): return get_partition_set(graphene_info, partitionSetName) def resolve_pipelineRunTags(self, graphene_info): return get_run_tags(graphene_info) def resolve_usedSolid(self, graphene_info, name): return get_solid(graphene_info, name) def resolve_usedSolids(self, graphene_info): return get_solids(graphene_info) def resolve_isPipelineConfigValid(self, graphene_info, pipeline, **kwargs): return validate_pipeline_config( graphene_info, pipeline.to_selector(), kwargs.get('environmentConfigData'), kwargs.get('mode'), ) def resolve_executionPlan(self, graphene_info, pipeline, **kwargs): return get_execution_plan( graphene_info, pipeline.to_selector(), kwargs.get('environmentConfigData'), kwargs.get('mode'), ) def resolve_environmentSchemaOrError(self, graphene_info, **kwargs): return resolve_environment_schema_or_error( graphene_info, kwargs['selector'].to_selector(), kwargs.get('mode')) def resolve_instance(self, graphene_info): return graphene_info.schema.type_named('Instance')( graphene_info.context.instance)
class DauphinPipelineRun(dauphin.ObjectType): class Meta(object): name = 'PipelineRun' runId = dauphin.NonNull(dauphin.String) # Nullable because of historical runs pipelineSnapshotId = dauphin.String() status = dauphin.NonNull('PipelineRunStatus') pipeline = dauphin.NonNull('PipelineReference') stats = dauphin.NonNull('PipelineRunStatsOrError') stepStats = dauphin.non_null_list('PipelineRunStepStats') logs = dauphin.NonNull('LogMessageConnection') computeLogs = dauphin.Field( dauphin.NonNull('ComputeLogs'), stepKey=dauphin.Argument(dauphin.NonNull(dauphin.String)), description=''' Compute logs are the stdout/stderr logs for a given solid step computation ''', ) executionPlan = dauphin.Field('ExecutionPlan') stepKeysToExecute = dauphin.List(dauphin.NonNull(dauphin.String)) environmentConfigYaml = dauphin.NonNull(dauphin.String) mode = dauphin.NonNull(dauphin.String) tags = dauphin.non_null_list('PipelineTag') rootRunId = dauphin.Field(dauphin.String) parentRunId = dauphin.Field(dauphin.String) canCancel = dauphin.NonNull(dauphin.Boolean) executionSelection = dauphin.NonNull('ExecutionSelection') def __init__(self, pipeline_run): super(DauphinPipelineRun, self).__init__(runId=pipeline_run.run_id, status=pipeline_run.status, mode=pipeline_run.mode) self._pipeline_run = check.inst_param(pipeline_run, 'pipeline_run', PipelineRun) def resolve_pipeline(self, graphene_info): return get_pipeline_reference_or_raise(graphene_info, self._pipeline_run.selector) def resolve_pipelineSnapshotId(self, _): return self._pipeline_run.pipeline_snapshot_id def resolve_logs(self, graphene_info): return graphene_info.schema.type_named('LogMessageConnection')( self._pipeline_run) def resolve_stats(self, graphene_info): return get_stats(graphene_info, self.run_id) def resolve_stepStats(self, graphene_info): return get_step_stats(graphene_info, self.run_id) def resolve_computeLogs(self, graphene_info, stepKey): return graphene_info.schema.type_named('ComputeLogs')( runId=self.run_id, stepKey=stepKey) def resolve_executionPlan(self, graphene_info): pipeline = self.resolve_pipeline(graphene_info) if isinstance(pipeline, DauphinPipeline): selector = self._pipeline_run.selector environment_dict = self._pipeline_run.environment_dict mode = self._pipeline_run.mode pipeline_def = get_pipeline_def_from_selector( graphene_info, selector) if is_config_valid(pipeline_def, environment_dict, mode): return get_execution_plan(graphene_info, selector, environment_dict, mode) return None def resolve_stepKeysToExecute(self, _): return self._pipeline_run.step_keys_to_execute def resolve_environmentConfigYaml(self, _graphene_info): return yaml.dump(self._pipeline_run.environment_dict, default_flow_style=False) def resolve_tags(self, graphene_info): return [ graphene_info.schema.type_named('PipelineTag')(key=key, value=value) for key, value in self._pipeline_run.tags.items() ] def resolve_rootRunId(self, _): return self._pipeline_run.root_run_id def resolve_parentRunId(self, _): return self._pipeline_run.parent_run_id @property def run_id(self): return self.runId def resolve_canCancel(self, graphene_info): return graphene_info.context.execution_manager.can_terminate( self.run_id) def resolve_executionSelection(self, graphene_info): return graphene_info.schema.type_named('ExecutionSelection')( self._pipeline_run.selector)
class DauphinQuery(dauphin.ObjectType): class Meta: name = 'Query' version = dauphin.NonNull(dauphin.String) pipelineOrError = dauphin.Field( dauphin.NonNull('PipelineOrError'), params=dauphin.NonNull('ExecutionSelector')) pipeline = dauphin.Field(dauphin.NonNull('Pipeline'), params=dauphin.NonNull('ExecutionSelector')) pipelinesOrError = dauphin.NonNull('PipelinesOrError') pipelines = dauphin.Field(dauphin.NonNull('PipelineConnection')) configTypeOrError = dauphin.Field( dauphin.NonNull('ConfigTypeOrError'), pipelineName=dauphin.Argument(dauphin.NonNull(dauphin.String)), configTypeName=dauphin.Argument(dauphin.NonNull(dauphin.String)), mode=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) runtimeTypeOrError = dauphin.Field( dauphin.NonNull('RuntimeTypeOrError'), pipelineName=dauphin.Argument(dauphin.NonNull(dauphin.String)), runtimeTypeName=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) scheduler = dauphin.Field('Scheduler') pipelineRuns = dauphin.non_null_list('PipelineRun') pipelineRunOrError = dauphin.Field(dauphin.NonNull('PipelineRunOrError'), runId=dauphin.NonNull(dauphin.ID)) isPipelineConfigValid = dauphin.Field( dauphin.NonNull('PipelineConfigValidationResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'environmentConfigData': dauphin.Argument('EnvironmentConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) executionPlan = dauphin.Field( dauphin.NonNull('ExecutionPlanResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'environmentConfigData': dauphin.Argument('EnvironmentConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) environmentSchemaOrError = dauphin.Field( dauphin.NonNull('EnvironmentSchemaOrError'), args={ 'selector': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, description= '''Fetch an environment schema given an execution selection and a mode. See the descripton on EnvironmentSchema for more information.''', ) enabledFeatures = dauphin.non_null_list(dauphin.String) def resolve_configTypeOrError(self, graphene_info, **kwargs): return get_config_type(graphene_info, kwargs['pipelineName'], kwargs['configTypeName'], kwargs.get('mode')) def resolve_runtimeTypeOrError(self, graphene_info, **kwargs): return get_runtime_type(graphene_info, kwargs['pipelineName'], kwargs['runtimeTypeName']) def resolve_version(self, graphene_info): return graphene_info.context.version def resolve_scheduler(self, graphene_info): return get_scheduler(graphene_info) def resolve_pipelineOrError(self, graphene_info, **kwargs): return get_pipeline_or_error(graphene_info, kwargs['params'].to_selector()) def resolve_pipeline(self, graphene_info, **kwargs): return get_pipeline_or_raise(graphene_info, kwargs['params'].to_selector()) def resolve_pipelinesOrError(self, graphene_info): return get_pipelines_or_error(graphene_info) def resolve_pipelines(self, graphene_info): return get_pipelines_or_raise(graphene_info) def resolve_pipelineRuns(self, graphene_info): return get_runs(graphene_info) def resolve_pipelineRunOrError(self, graphene_info, runId): return get_run(graphene_info, runId) def resolve_isPipelineConfigValid(self, graphene_info, pipeline, **kwargs): return validate_pipeline_config( graphene_info, pipeline.to_selector(), kwargs.get('environmentConfigData'), kwargs.get('mode'), ) def resolve_executionPlan(self, graphene_info, pipeline, **kwargs): return get_execution_plan( graphene_info, pipeline.to_selector(), kwargs.get('environmentConfigData'), kwargs.get('mode'), ) def resolve_environmentSchemaOrError(self, graphene_info, **kwargs): return resolve_environment_schema_or_error( graphene_info, kwargs['selector'].to_selector(), kwargs['mode']) def resolve_enabledFeatures(self, _): return get_enabled_features()
class DauphinQuery(dauphin.ObjectType): class Meta: name = 'Query' version = dauphin.NonNull(dauphin.String) pipelineOrError = dauphin.Field( dauphin.NonNull('PipelineOrError'), params=dauphin.NonNull('ExecutionSelector') ) pipeline = dauphin.Field( dauphin.NonNull('Pipeline'), params=dauphin.NonNull('ExecutionSelector') ) pipelinesOrError = dauphin.NonNull('PipelinesOrError') pipelines = dauphin.Field(dauphin.NonNull('PipelineConnection')) configTypeOrError = dauphin.Field( dauphin.NonNull('ConfigTypeOrError'), pipelineName=dauphin.Argument(dauphin.NonNull(dauphin.String)), configTypeName=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) runtimeTypeOrError = dauphin.Field( dauphin.NonNull('RuntimeTypeOrError'), pipelineName=dauphin.Argument(dauphin.NonNull(dauphin.String)), runtimeTypeName=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) pipelineRuns = dauphin.non_null_list('PipelineRun') pipelineRunOrError = dauphin.Field( dauphin.NonNull('PipelineRunOrError'), runId=dauphin.NonNull(dauphin.ID) ) isPipelineConfigValid = dauphin.Field( dauphin.NonNull('PipelineConfigValidationResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'config': dauphin.Argument('PipelineConfig'), }, ) executionPlan = dauphin.Field( dauphin.NonNull('ExecutionPlanResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'config': dauphin.Argument('PipelineConfig'), }, ) presetsForPipeline = dauphin.Field( dauphin.List(dauphin.NonNull('PipelinePreset')), args={'pipelineName': dauphin.Argument(dauphin.NonNull('String'))}, ) def resolve_configTypeOrError(self, graphene_info, **kwargs): return get_config_type(graphene_info, kwargs['pipelineName'], kwargs['configTypeName']) def resolve_runtimeTypeOrError(self, graphene_info, **kwargs): return get_runtime_type(graphene_info, kwargs['pipelineName'], kwargs['runtimeTypeName']) def resolve_version(self, graphene_info): return graphene_info.context.version def resolve_pipelineOrError(self, graphene_info, **kwargs): return get_pipeline(graphene_info, kwargs['params'].to_selector()) def resolve_pipeline(self, graphene_info, **kwargs): return get_pipeline_or_raise(graphene_info, kwargs['params'].to_selector()) def resolve_pipelinesOrError(self, graphene_info): return get_pipelines(graphene_info) def resolve_pipelines(self, graphene_info): return get_pipelines_or_raise(graphene_info) def resolve_pipelineRuns(self, graphene_info): return get_runs(graphene_info) def resolve_pipelineRunOrError(self, graphene_info, runId): return get_run(graphene_info, runId) def resolve_isPipelineConfigValid(self, graphene_info, pipeline, config): return validate_pipeline_config(graphene_info, pipeline.to_selector(), config) def resolve_executionPlan(self, graphene_info, pipeline, config): return get_execution_plan(graphene_info, pipeline.to_selector(), config) def resolve_presetsForPipeline(self, graphene_info, pipelineName): return get_pipeline_presets(graphene_info, pipelineName)
class DauphinQuery(dauphin.ObjectType): class Meta(object): name = "Query" version = dauphin.NonNull(dauphin.String) repositoriesOrError = dauphin.NonNull("RepositoriesOrError") repositoryOrError = dauphin.Field( dauphin.NonNull("RepositoryOrError"), repositorySelector=dauphin.NonNull("RepositorySelector"), ) pipelineOrError = dauphin.Field(dauphin.NonNull("PipelineOrError"), params=dauphin.NonNull("PipelineSelector")) pipelineSnapshotOrError = dauphin.Field( dauphin.NonNull("PipelineSnapshotOrError"), snapshotId=dauphin.String(), activePipelineSelector=dauphin.Argument("PipelineSelector"), ) scheduler = dauphin.Field(dauphin.NonNull("SchedulerOrError")) scheduleDefinitionOrError = dauphin.Field( dauphin.NonNull("ScheduleDefinitionOrError"), schedule_selector=dauphin.NonNull("ScheduleSelector"), ) scheduleDefinitionsOrError = dauphin.Field( dauphin.NonNull("ScheduleDefinitionsOrError"), repositorySelector=dauphin.NonNull("RepositorySelector"), ) scheduleStatesOrError = dauphin.Field( dauphin.NonNull("ScheduleStatesOrError"), repositorySelector=dauphin.Argument("RepositorySelector"), withNoScheduleDefinition=dauphin.Boolean(), ) partitionSetsOrError = dauphin.Field( dauphin.NonNull("PartitionSetsOrError"), repositorySelector=dauphin.NonNull("RepositorySelector"), pipelineName=dauphin.NonNull(dauphin.String), ) partitionSetOrError = dauphin.Field( dauphin.NonNull("PartitionSetOrError"), repositorySelector=dauphin.NonNull("RepositorySelector"), partitionSetName=dauphin.String(), ) pipelineRunsOrError = dauphin.Field( dauphin.NonNull("PipelineRunsOrError"), filter=dauphin.Argument("PipelineRunsFilter"), cursor=dauphin.String(), limit=dauphin.Int(), ) pipelineRunOrError = dauphin.Field(dauphin.NonNull("PipelineRunOrError"), runId=dauphin.NonNull(dauphin.ID)) pipelineRunTags = dauphin.non_null_list("PipelineTagAndValues") runGroupOrError = dauphin.Field(dauphin.NonNull("RunGroupOrError"), runId=dauphin.NonNull(dauphin.ID)) runGroupsOrError = dauphin.Field( dauphin.NonNull("RunGroupsOrError"), filter=dauphin.Argument("PipelineRunsFilter"), cursor=dauphin.String(), limit=dauphin.Int(), ) isPipelineConfigValid = dauphin.Field( dauphin.NonNull("PipelineConfigValidationResult"), args={ "pipeline": dauphin.Argument(dauphin.NonNull("PipelineSelector")), "runConfigData": dauphin.Argument("RunConfigData"), "mode": dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) executionPlanOrError = dauphin.Field( dauphin.NonNull("ExecutionPlanOrError"), args={ "pipeline": dauphin.Argument(dauphin.NonNull("PipelineSelector")), "runConfigData": dauphin.Argument("RunConfigData"), "mode": dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) runConfigSchemaOrError = dauphin.Field( dauphin.NonNull("RunConfigSchemaOrError"), args={ "selector": dauphin.Argument(dauphin.NonNull("PipelineSelector")), "mode": dauphin.Argument(dauphin.String), }, description= """Fetch an environment schema given an execution selection and a mode. See the descripton on RunConfigSchema for more information.""", ) instance = dauphin.NonNull("Instance") assetsOrError = dauphin.Field(dauphin.NonNull("AssetsOrError")) assetOrError = dauphin.Field( dauphin.NonNull("AssetOrError"), assetKey=dauphin.Argument(dauphin.NonNull("AssetKeyInput")), ) def resolve_repositoriesOrError(self, graphene_info): return fetch_repositories(graphene_info) def resolve_repositoryOrError(self, graphene_info, **kwargs): return fetch_repository( graphene_info, RepositorySelector.from_graphql_input( kwargs.get("repositorySelector")), ) def resolve_pipelineSnapshotOrError(self, graphene_info, **kwargs): snapshot_id_arg = kwargs.get("snapshotId") pipeline_selector_arg = kwargs.get("activePipelineSelector") check.invariant( not (snapshot_id_arg and pipeline_selector_arg), "Must only pass one of snapshotId or activePipelineSelector", ) check.invariant( snapshot_id_arg or pipeline_selector_arg, "Must set one of snapshotId or activePipelineSelector", ) if pipeline_selector_arg: pipeline_selector = pipeline_selector_from_graphql( graphene_info.context, kwargs["activePipelineSelector"]) return get_pipeline_snapshot_or_error_from_pipeline_selector( graphene_info, pipeline_selector) else: return get_pipeline_snapshot_or_error_from_snapshot_id( graphene_info, snapshot_id_arg) def resolve_version(self, graphene_info): return graphene_info.context.version def resolve_scheduler(self, graphene_info): return get_scheduler_or_error(graphene_info) def resolve_scheduleDefinitionOrError(self, graphene_info, schedule_selector): return get_schedule_definition_or_error( graphene_info, ScheduleSelector.from_graphql_input(schedule_selector)) def resolve_scheduleDefinitionsOrError(self, graphene_info, **kwargs): return get_schedule_definitions_or_error( graphene_info, RepositorySelector.from_graphql_input( kwargs.get("repositorySelector"))) def resolve_scheduleStatesOrError(self, graphene_info, **kwargs): return get_schedule_states_or_error( graphene_info, RepositorySelector.from_graphql_input(kwargs["repositorySelector"]) if kwargs.get("repositorySelector") else None, kwargs.get("withNoScheduleDefinition"), ) def resolve_pipelineOrError(self, graphene_info, **kwargs): return get_pipeline_or_error( graphene_info, pipeline_selector_from_graphql(graphene_info.context, kwargs["params"]), ) def resolve_pipelineRunsOrError(self, graphene_info, **kwargs): filters = kwargs.get("filter") if filters is not None: filters = filters.to_selector() return graphene_info.schema.type_named("PipelineRuns")( results=get_runs(graphene_info, filters, kwargs.get("cursor"), kwargs.get("limit"))) def resolve_pipelineRunOrError(self, graphene_info, runId): return get_run_by_id(graphene_info, runId) def resolve_runGroupsOrError(self, graphene_info, **kwargs): filters = kwargs.get("filter") if filters is not None: filters = filters.to_selector() return graphene_info.schema.type_named("RunGroupsOrError")( results=get_run_groups(graphene_info, filters, kwargs.get( "cursor"), kwargs.get("limit"))) def resolve_partitionSetsOrError(self, graphene_info, **kwargs): return get_partition_sets_or_error( graphene_info, RepositorySelector.from_graphql_input( kwargs.get("repositorySelector")), kwargs.get("pipelineName"), ) def resolve_partitionSetOrError(self, graphene_info, **kwargs): return get_partition_set( graphene_info, RepositorySelector.from_graphql_input( kwargs.get("repositorySelector")), kwargs.get("partitionSetName"), ) def resolve_pipelineRunTags(self, graphene_info): return get_run_tags(graphene_info) def resolve_runGroupOrError(self, graphene_info, runId): return get_run_group(graphene_info, runId) def resolve_isPipelineConfigValid(self, graphene_info, pipeline, **kwargs): return validate_pipeline_config( graphene_info, pipeline_selector_from_graphql(graphene_info.context, pipeline), kwargs.get("runConfigData"), kwargs.get("mode"), ) def resolve_executionPlanOrError(self, graphene_info, pipeline, **kwargs): return get_execution_plan( graphene_info, pipeline_selector_from_graphql(graphene_info.context, pipeline), kwargs.get("runConfigData"), kwargs.get("mode"), ) def resolve_runConfigSchemaOrError(self, graphene_info, **kwargs): return resolve_run_config_schema_or_error( graphene_info, pipeline_selector_from_graphql(graphene_info.context, kwargs["selector"]), kwargs.get("mode"), ) def resolve_instance(self, graphene_info): return graphene_info.schema.type_named("Instance")( graphene_info.context.instance) def resolve_assetsOrError(self, graphene_info): return get_assets(graphene_info) def resolve_assetOrError(self, graphene_info, **kwargs): return get_asset(graphene_info, AssetKey.from_graphql_input(kwargs["assetKey"]))
class Arguments(object): executionParams = dauphin.NonNull("ExecutionParams") retries = dauphin.Argument("Retries")
class DauphinQuery(dauphin.ObjectType): class Meta: name = 'Query' version = dauphin.NonNull(dauphin.String) reloadSupported = dauphin.NonNull(dauphin.Boolean) pipelineOrError = dauphin.Field( dauphin.NonNull('PipelineOrError'), params=dauphin.NonNull('ExecutionSelector') ) pipeline = dauphin.Field( dauphin.NonNull('Pipeline'), params=dauphin.NonNull('ExecutionSelector') ) pipelinesOrError = dauphin.NonNull('PipelinesOrError') pipelines = dauphin.Field(dauphin.NonNull('PipelineConnection')) configTypeOrError = dauphin.Field( dauphin.NonNull('ConfigTypeOrError'), pipelineName=dauphin.Argument(dauphin.NonNull(dauphin.String)), configTypeName=dauphin.Argument(dauphin.NonNull(dauphin.String)), mode=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) runtimeTypeOrError = dauphin.Field( dauphin.NonNull('RuntimeTypeOrError'), pipelineName=dauphin.Argument(dauphin.NonNull(dauphin.String)), runtimeTypeName=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) scheduler = dauphin.Field(dauphin.NonNull('SchedulerOrError')) pipelineRunsOrError = dauphin.Field( dauphin.NonNull('PipelineRunsOrError'), filter=dauphin.Argument(dauphin.NonNull('PipelineRunsFilter')), cursor=dauphin.String(), limit=dauphin.Int(), ) pipelineRunOrError = dauphin.Field( dauphin.NonNull('PipelineRunOrError'), runId=dauphin.NonNull(dauphin.ID) ) pipelineRunTags = dauphin.non_null_list('PipelineTagAndValues') isPipelineConfigValid = dauphin.Field( dauphin.NonNull('PipelineConfigValidationResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'environmentConfigData': dauphin.Argument('EnvironmentConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) executionPlan = dauphin.Field( dauphin.NonNull('ExecutionPlanResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'environmentConfigData': dauphin.Argument('EnvironmentConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) environmentSchemaOrError = dauphin.Field( dauphin.NonNull('EnvironmentSchemaOrError'), args={ 'selector': dauphin.Argument(dauphin.NonNull('ExecutionSelector')), 'mode': dauphin.Argument(dauphin.String), }, description='''Fetch an environment schema given an execution selection and a mode. See the descripton on EnvironmentSchema for more information.''', ) instance = dauphin.NonNull('Instance') def resolve_configTypeOrError(self, graphene_info, **kwargs): return get_config_type( graphene_info, kwargs['pipelineName'], kwargs['configTypeName'], kwargs.get('mode') ) def resolve_runtimeTypeOrError(self, graphene_info, **kwargs): return get_runtime_type(graphene_info, kwargs['pipelineName'], kwargs['runtimeTypeName']) def resolve_version(self, graphene_info): return graphene_info.context.version def resolve_reloadSupported(self, graphene_info): return graphene_info.context.reloader.is_reload_supported def resolve_scheduler(self, graphene_info): return get_scheduler_or_error(graphene_info) def resolve_pipelineOrError(self, graphene_info, **kwargs): return get_pipeline_or_error(graphene_info, kwargs['params'].to_selector()) def resolve_pipeline(self, graphene_info, **kwargs): return get_pipeline_or_raise(graphene_info, kwargs['params'].to_selector()) def resolve_pipelinesOrError(self, graphene_info): return get_pipelines_or_error(graphene_info) def resolve_pipelines(self, graphene_info): return get_pipelines_or_raise(graphene_info) def resolve_pipelineRunsOrError(self, graphene_info, **kwargs): filters = kwargs['filter'].to_selector() provided = [ i for i in [filters.run_id, filters.pipeline, filters.tag_key, filters.status] if i ] if len(provided) > 1: return graphene_info.schema.type_named('InvalidPipelineRunsFilterError')( message="You may only provide one of the filter options." ) return graphene_info.schema.type_named('PipelineRuns')( results=get_runs(graphene_info, filters, kwargs.get('cursor'), kwargs.get('limit')) ) def resolve_pipelineRunOrError(self, graphene_info, runId): return get_run(graphene_info, runId) def resolve_pipelineRunTags(self, graphene_info): return get_run_tags(graphene_info) def resolve_isPipelineConfigValid(self, graphene_info, pipeline, **kwargs): return validate_pipeline_config( graphene_info, pipeline.to_selector(), kwargs.get('environmentConfigData'), kwargs.get('mode'), ) def resolve_executionPlan(self, graphene_info, pipeline, **kwargs): return get_execution_plan( graphene_info, pipeline.to_selector(), kwargs.get('environmentConfigData'), kwargs.get('mode'), ) def resolve_environmentSchemaOrError(self, graphene_info, **kwargs): return resolve_environment_schema_or_error( graphene_info, kwargs['selector'].to_selector(), kwargs.get('mode') ) def resolve_instance(self, graphene_info): return graphene_info.schema.type_named('Instance')(graphene_info.context.instance)
class DauphinIPipelineSnapshotMixin(object): # Mixin this class to implement IPipelineSnapshot # # Graphene has some strange properties that make it so that you cannot # implement ABCs nor use properties in an overridable way. So the way # the mixin works is that the target classes have to have a method # get_represented_pipeline() # def get_represented_pipeline(self): raise NotImplementedError() name = dauphin.NonNull(dauphin.String) description = dauphin.String() pipeline_snapshot_id = dauphin.NonNull(dauphin.String) dagster_types = dauphin.non_null_list('DagsterType') dagster_type_or_error = dauphin.Field( dauphin.NonNull('DagsterTypeOrError'), dagsterTypeName=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) solids = dauphin.non_null_list('Solid') modes = dauphin.non_null_list('Mode') solid_handles = dauphin.Field(dauphin.non_null_list('SolidHandle'), parentHandleID=dauphin.String()) solid_handle = dauphin.Field( 'SolidHandle', handleID=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) tags = dauphin.non_null_list('PipelineTag') def resolve_pipeline_snapshot_id(self, _): return self.get_represented_pipeline().identifying_pipeline_snapshot_id def resolve_name(self, _): return self.get_represented_pipeline().name def resolve_description(self, _): return self.get_represented_pipeline().description def resolve_dagster_types(self, _graphene_info): represented_pipeline = self.get_represented_pipeline() return sorted( list( map( lambda dt: to_dauphin_dagster_type( represented_pipeline.pipeline_snapshot, dt.key), [ t for t in represented_pipeline.dagster_type_snaps if t.name ], )), key=lambda dagster_type: dagster_type.name, ) @capture_dauphin_error def resolve_dagster_type_or_error(self, _, **kwargs): type_name = kwargs['dagsterTypeName'] represented_pipeline = self.get_represented_pipeline() if not represented_pipeline.has_dagster_type_named(type_name): from .errors import DauphinDagsterTypeNotFoundError raise UserFacingGraphQLError( DauphinDagsterTypeNotFoundError(dagster_type_name=type_name)) return to_dauphin_dagster_type( represented_pipeline.pipeline_snapshot, represented_pipeline.get_dagster_type_by_name(type_name).key, ) def resolve_solids(self, _graphene_info): represented_pipeline = self.get_represented_pipeline() return build_dauphin_solids( represented_pipeline, represented_pipeline.dep_structure_index, ) def resolve_modes(self, _): represented_pipeline = self.get_represented_pipeline() return [ DauphinMode(represented_pipeline.config_schema_snapshot, mode_def_snap) for mode_def_snap in sorted(represented_pipeline.mode_def_snaps, key=lambda item: item.name) ] def resolve_solid_handle(self, _graphene_info, handleID): return _get_solid_handles( self.get_represented_pipeline()).get(handleID) def resolve_solid_handles(self, _graphene_info, **kwargs): handles = _get_solid_handles(self.get_represented_pipeline()) parentHandleID = kwargs.get('parentHandleID') if parentHandleID == "": handles = { key: handle for key, handle in handles.items() if not handle.parent } elif parentHandleID is not None: handles = { key: handle for key, handle in handles.items() if handle.parent and handle.parent.handleID.to_string() == parentHandleID } return [handles[key] for key in sorted(handles)] def resolve_tags(self, graphene_info): represented_pipeline = self.get_represented_pipeline() return [ graphene_info.schema.type_named('PipelineTag')(key=key, value=value) for key, value in represented_pipeline.pipeline_snapshot.tags.items() ]
class DauphinPipelineRun(dauphin.ObjectType): class Meta: name = "PipelineRun" id = dauphin.NonNull(dauphin.ID) runId = dauphin.NonNull(dauphin.String) # Nullable because of historical runs pipelineSnapshotId = dauphin.String() repositoryOrigin = dauphin.Field("RepositoryOrigin") status = dauphin.NonNull("PipelineRunStatus") pipeline = dauphin.NonNull("PipelineReference") pipelineName = dauphin.NonNull(dauphin.String) solidSelection = dauphin.List(dauphin.NonNull(dauphin.String)) stats = dauphin.NonNull("PipelineRunStatsOrError") stepStats = dauphin.non_null_list("PipelineRunStepStats") computeLogs = dauphin.Field( dauphin.NonNull("ComputeLogs"), stepKey=dauphin.Argument(dauphin.NonNull(dauphin.String)), description=""" Compute logs are the stdout/stderr logs for a given solid step computation """, ) executionPlan = dauphin.Field("ExecutionPlan") stepKeysToExecute = dauphin.List(dauphin.NonNull(dauphin.String)) runConfigYaml = dauphin.NonNull(dauphin.String) mode = dauphin.NonNull(dauphin.String) tags = dauphin.non_null_list("PipelineTag") rootRunId = dauphin.Field(dauphin.String) parentRunId = dauphin.Field(dauphin.String) canTerminate = dauphin.NonNull(dauphin.Boolean) assets = dauphin.non_null_list("Asset") def __init__(self, pipeline_run): super(DauphinPipelineRun, self).__init__(runId=pipeline_run.run_id, status=pipeline_run.status, mode=pipeline_run.mode) self._pipeline_run = check.inst_param(pipeline_run, "pipeline_run", PipelineRun) def resolve_id(self, _): return self._pipeline_run.run_id def resolve_repositoryOrigin(self, graphene_info): return (graphene_info.schema.type_named("RepositoryOrigin")( self._pipeline_run.external_pipeline_origin. external_repository_origin) if self._pipeline_run.external_pipeline_origin else None) def resolve_pipeline(self, graphene_info): return get_pipeline_reference_or_raise(graphene_info, self._pipeline_run) def resolve_pipelineName(self, _graphene_info): return self._pipeline_run.pipeline_name def resolve_solidSelection(self, _graphene_info): return self._pipeline_run.solid_selection def resolve_pipelineSnapshotId(self, _): return self._pipeline_run.pipeline_snapshot_id def resolve_stats(self, graphene_info): return get_stats(graphene_info, self.run_id) def resolve_stepStats(self, graphene_info): return get_step_stats(graphene_info, self.run_id) def resolve_computeLogs(self, graphene_info, stepKey): return graphene_info.schema.type_named("ComputeLogs")( runId=self.run_id, stepKey=stepKey) def resolve_executionPlan(self, graphene_info): if not (self._pipeline_run.execution_plan_snapshot_id and self._pipeline_run.pipeline_snapshot_id): return None from .execution import DauphinExecutionPlan instance = graphene_info.context.instance historical_pipeline = instance.get_historical_pipeline( self._pipeline_run.pipeline_snapshot_id) execution_plan_snapshot = instance.get_execution_plan_snapshot( self._pipeline_run.execution_plan_snapshot_id) return (DauphinExecutionPlan( ExternalExecutionPlan( execution_plan_snapshot=execution_plan_snapshot, represented_pipeline=historical_pipeline, )) if execution_plan_snapshot and historical_pipeline else None) def resolve_stepKeysToExecute(self, _): return self._pipeline_run.step_keys_to_execute def resolve_runConfigYaml(self, _graphene_info): return yaml.dump(self._pipeline_run.run_config, default_flow_style=False, allow_unicode=True) def resolve_tags(self, graphene_info): return [ graphene_info.schema.type_named("PipelineTag")(key=key, value=value) for key, value in self._pipeline_run.tags.items() if get_tag_type(key) != TagType.HIDDEN ] def resolve_rootRunId(self, _): return self._pipeline_run.root_run_id def resolve_parentRunId(self, _): return self._pipeline_run.parent_run_id @property def run_id(self): return self.runId def resolve_canTerminate(self, graphene_info): # short circuit if the pipeline run is in a terminal state if self._pipeline_run.is_finished: return False return graphene_info.context.instance.run_launcher.can_terminate( self.run_id) def resolve_assets(self, graphene_info): return get_assets_for_run_id(graphene_info, self.run_id)
class DauphinPipelineRun(dauphin.ObjectType): class Meta: name = 'PipelineRun' runId = dauphin.NonNull(dauphin.String) status = dauphin.NonNull('PipelineRunStatus') pipeline = dauphin.NonNull('PipelineReference') stats = dauphin.NonNull('PipelineRunStatsSnapshot') logs = dauphin.NonNull('LogMessageConnection') computeLogs = dauphin.Field( dauphin.NonNull('ComputeLogs'), stepKey=dauphin.Argument(dauphin.NonNull(dauphin.String)), description=''' Compute logs are the stdout/stderr logs for a given solid step computation ''', ) executionPlan = dauphin.Field('ExecutionPlan') stepKeysToExecute = dauphin.List(dauphin.NonNull(dauphin.String)) environmentConfigYaml = dauphin.NonNull(dauphin.String) mode = dauphin.NonNull(dauphin.String) tags = dauphin.List(dauphin.NonNull('PipelineTag')) def __init__(self, pipeline_run): super(DauphinPipelineRun, self).__init__(runId=pipeline_run.run_id, status=pipeline_run.status, mode=pipeline_run.mode) self._pipeline_run = check.inst_param(pipeline_run, 'pipeline_run', PipelineRun) def resolve_pipeline(self, graphene_info): return get_pipeline_reference_or_raise(graphene_info, self._pipeline_run.selector) def resolve_logs(self, graphene_info): return graphene_info.schema.type_named('LogMessageConnection')( self._pipeline_run) def resolve_stats(self, graphene_info): stats = graphene_info.context.instance.get_run_stats(self.run_id) return graphene_info.schema.type_named('PipelineRunStatsSnapshot')( stats) def resolve_computeLogs(self, graphene_info, stepKey): update = graphene_info.context.instance.compute_log_manager.read_logs( self.run_id, stepKey) return from_compute_log_update(graphene_info, self.run_id, stepKey, update) def resolve_executionPlan(self, graphene_info): pipeline = self.resolve_pipeline(graphene_info) if isinstance(pipeline, DauphinPipeline): execution_plan = create_execution_plan( pipeline.get_dagster_pipeline(), self._pipeline_run.environment_dict, RunConfig(mode=self._pipeline_run.mode), ) return graphene_info.schema.type_named('ExecutionPlan')( pipeline, execution_plan) else: return None def resolve_stepKeysToExecute(self, _): return self._pipeline_run.step_keys_to_execute def resolve_environmentConfigYaml(self, _graphene_info): return yaml.dump(self._pipeline_run.environment_dict, default_flow_style=False) def resolve_tags(self, graphene_info): return [ graphene_info.schema.type_named('PipelineTag')(key=key, value=value) for key, value in self._pipeline_run.tags.items() ] @property def run_id(self): return self.runId
class DauphinPartition(dauphin.ObjectType): class Meta: name = "Partition" name = dauphin.NonNull(dauphin.String) partition_set_name = dauphin.NonNull(dauphin.String) solid_selection = dauphin.List(dauphin.NonNull(dauphin.String)) mode = dauphin.NonNull(dauphin.String) runConfigOrError = dauphin.NonNull("PartitionRunConfigOrError") tagsOrError = dauphin.NonNull("PartitionTagsOrError") runs = dauphin.Field( dauphin.non_null_list("PipelineRun"), filter=dauphin.Argument("PipelineRunsFilter"), cursor=dauphin.String(), limit=dauphin.Int(), ) status = dauphin.Field("PipelineRunStatus") def __init__(self, external_repository_handle, external_partition_set, partition_name): self._external_repository_handle = check.inst_param( external_repository_handle, "external_respository_handle", RepositoryHandle) self._external_partition_set = check.inst_param( external_partition_set, "external_partition_set", ExternalPartitionSet) self._partition_name = check.str_param(partition_name, "partition_name") super(DauphinPartition, self).__init__( name=partition_name, partition_set_name=external_partition_set.name, solid_selection=external_partition_set.solid_selection, mode=external_partition_set.mode, ) def resolve_runConfigOrError(self, graphene_info): return get_partition_config( graphene_info, self._external_repository_handle, self._external_partition_set.name, self._partition_name, ) def resolve_tagsOrError(self, graphene_info): return get_partition_tags( graphene_info, self._external_repository_handle, self._external_partition_set.name, self._partition_name, ) def resolve_runs(self, graphene_info, **kwargs): filters = kwargs.get("filter") partition_tags = { PARTITION_SET_TAG: self._external_partition_set.name, PARTITION_NAME_TAG: self._partition_name, } if filters is not None: filters = filters.to_selector() runs_filter = PipelineRunsFilter( run_ids=filters.run_ids, pipeline_name=filters.pipeline_name, statuses=filters.statuses, tags=merge_dicts(filters.tags, partition_tags), ) else: runs_filter = PipelineRunsFilter(tags=partition_tags) return get_runs(graphene_info, runs_filter, cursor=kwargs.get("cursor"), limit=kwargs.get("limit"))
class Arguments: executionParams = dauphin.NonNull('ExecutionParams') reexecutionConfig = dauphin.Argument('ReexecutionConfig')
class DauphinIPipelineSnapshotMixin: # Mixin this class to implement IPipelineSnapshot # # Graphene has some strange properties that make it so that you cannot # implement ABCs nor use properties in an overridable way. So the way # the mixin works is that the target classes have to have a method # get_represented_pipeline() # def get_represented_pipeline(self): raise NotImplementedError() name = dauphin.NonNull(dauphin.String) description = dauphin.String() id = dauphin.NonNull(dauphin.ID) pipeline_snapshot_id = dauphin.NonNull(dauphin.String) dagster_types = dauphin.non_null_list("DagsterType") dagster_type_or_error = dauphin.Field( dauphin.NonNull("DagsterTypeOrError"), dagsterTypeName=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) solids = dauphin.non_null_list("Solid") modes = dauphin.non_null_list("Mode") solid_handles = dauphin.Field(dauphin.non_null_list("SolidHandle"), parentHandleID=dauphin.String()) solid_handle = dauphin.Field( "SolidHandle", handleID=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) tags = dauphin.non_null_list("PipelineTag") runs = dauphin.Field( dauphin.non_null_list("PipelineRun"), cursor=dauphin.String(), limit=dauphin.Int(), ) schedules = dauphin.non_null_list("ScheduleDefinition") parent_snapshot_id = dauphin.String() def resolve_pipeline_snapshot_id(self, _): return self.get_represented_pipeline().identifying_pipeline_snapshot_id def resolve_id(self, _): return self.get_represented_pipeline().identifying_pipeline_snapshot_id def resolve_name(self, _): return self.get_represented_pipeline().name def resolve_description(self, _): return self.get_represented_pipeline().description def resolve_dagster_types(self, _graphene_info): represented_pipeline = self.get_represented_pipeline() return sorted( list( map( lambda dt: to_dauphin_dagster_type( represented_pipeline.pipeline_snapshot, dt.key), [ t for t in represented_pipeline.dagster_type_snaps if t.name ], )), key=lambda dagster_type: dagster_type.name, ) @capture_dauphin_error def resolve_dagster_type_or_error(self, _, **kwargs): type_name = kwargs["dagsterTypeName"] represented_pipeline = self.get_represented_pipeline() if not represented_pipeline.has_dagster_type_named(type_name): from .errors import DauphinDagsterTypeNotFoundError raise UserFacingGraphQLError( DauphinDagsterTypeNotFoundError(dagster_type_name=type_name)) return to_dauphin_dagster_type( represented_pipeline.pipeline_snapshot, represented_pipeline.get_dagster_type_by_name(type_name).key, ) def resolve_solids(self, _graphene_info): represented_pipeline = self.get_represented_pipeline() return build_dauphin_solids( represented_pipeline, represented_pipeline.dep_structure_index, ) def resolve_modes(self, _): represented_pipeline = self.get_represented_pipeline() return [ DauphinMode(represented_pipeline.config_schema_snapshot, mode_def_snap) for mode_def_snap in sorted(represented_pipeline.mode_def_snaps, key=lambda item: item.name) ] def resolve_solid_handle(self, _graphene_info, handleID): return _get_solid_handles( self.get_represented_pipeline()).get(handleID) def resolve_solid_handles(self, _graphene_info, **kwargs): handles = _get_solid_handles(self.get_represented_pipeline()) parentHandleID = kwargs.get("parentHandleID") if parentHandleID == "": handles = { key: handle for key, handle in handles.items() if not handle.parent } elif parentHandleID is not None: handles = { key: handle for key, handle in handles.items() if handle.parent and handle.parent.handleID.to_string() == parentHandleID } return [handles[key] for key in sorted(handles)] def resolve_tags(self, graphene_info): represented_pipeline = self.get_represented_pipeline() return [ graphene_info.schema.type_named("PipelineTag")(key=key, value=value) for key, value in represented_pipeline.pipeline_snapshot.tags.items() ] def resolve_solidSelection(self, _graphene_info): return self.get_represented_pipeline().solid_selection def resolve_runs(self, graphene_info, **kwargs): runs_filter = PipelineRunsFilter( pipeline_name=self.get_represented_pipeline().name) return get_runs(graphene_info, runs_filter, kwargs.get("cursor"), kwargs.get("limit")) def resolve_schedules(self, graphene_info): represented_pipeline = self.get_represented_pipeline() if not isinstance(represented_pipeline, ExternalPipeline): # this is an historical pipeline snapshot, so there are not any associated running # schedules return [] pipeline_selector = represented_pipeline.handle.to_selector() schedules = get_schedule_definitions_for_pipeline( graphene_info, pipeline_selector) return schedules def resolve_parent_snapshot_id(self, _graphene_info): lineage_snapshot = self.get_represented_pipeline( ).pipeline_snapshot.lineage_snapshot if lineage_snapshot: return lineage_snapshot.parent_snapshot_id else: return None
class DauphinEnvironmentSchema(dauphin.ObjectType): def __init__(self, environment_schema, dagster_pipeline): from dagster.core.definitions.environment_schema import EnvironmentSchema from dagster.core.definitions.pipeline import PipelineDefinition self._environment_schema = check.inst_param(environment_schema, 'environment_schema', EnvironmentSchema) self._dagster_pipeline = check.inst_param(dagster_pipeline, 'dagster_pipeline', PipelineDefinition) class Meta: name = 'EnvironmentSchema' description = '''The environment schema represents the all the config type information given a certain execution selection and mode of execution of that selection. All config interactions (e.g. checking config validity, fetching all config types, fetching in a particular config type) should be done through this type ''' rootEnvironmentType = dauphin.Field( dauphin.NonNull('ConfigType'), description= '''Fetch the root environment type. Concretely this is the type that is in scope at the root of configuration document for a particular execution selection. It is the type that is in scope initially with a blank config editor.''', ) allConfigTypes = dauphin.Field( dauphin.non_null_list('ConfigType'), description= '''Fetch all the named config types that are in the schema. Useful for things like a type browser UI, or for fetching all the types are in the scope of a document so that the index can be built for the autocompleting editor. ''', ) configTypeOrError = dauphin.Field( dauphin.NonNull('ConfigTypeOrError'), configTypeName=dauphin.Argument(dauphin.NonNull(dauphin.String)), description='''Fetch a particular config type''', ) isEnvironmentConfigValid = dauphin.Field( dauphin.NonNull('PipelineConfigValidationResult'), args={ 'environmentConfigData': dauphin.Argument('EnvironmentConfigData') }, description= '''Parse a particular environment config result. The return value either indicates that the validation succeeded by returning `PipelineConfigValidationValid` or that there are configuration errors by returning `PipelineConfigValidationInvalid' which containers a list errors so that can be rendered for the user''', ) def resolve_allConfigTypes(self, _graphene_info): return sorted( list( map(to_dauphin_config_type, self._environment_schema.all_config_types())), key=lambda ct: ct.name if ct.name else '', ) def resolve_rootEnvironmentType(self, _graphene_info): return to_dauphin_config_type( self._environment_schema.environment_type) def resolve_configTypeOrError(self, graphene_info, **kwargs): return resolve_config_type_or_error( graphene_info, self._environment_schema, self._dagster_pipeline, kwargs['configTypeName'], ) def resolve_isEnvironmentConfigValid(self, graphene_info, **kwargs): return resolve_is_environment_config_valid( graphene_info, self._environment_schema, self._dagster_pipeline, kwargs.get('environmentConfigData'), )
class Arguments: pipelineName = dauphin.NonNull(dauphin.String) config = dauphin.Argument('PipelineConfig') stepKeys = dauphin.List(dauphin.NonNull(dauphin.String)) executionMetadata = dauphin.Argument(dauphin.NonNull(DauphinExecutionMetadata))
class DauphinPipelineRun(dauphin.ObjectType): class Meta(object): name = 'PipelineRun' runId = dauphin.NonNull(dauphin.String) # Nullable because of historical runs pipelineSnapshotId = dauphin.String() status = dauphin.NonNull('PipelineRunStatus') pipeline = dauphin.NonNull('PipelineReference') pipelineName = dauphin.NonNull(dauphin.String) solidSelection = dauphin.List(dauphin.NonNull(dauphin.String)) stats = dauphin.NonNull('PipelineRunStatsOrError') stepStats = dauphin.non_null_list('PipelineRunStepStats') computeLogs = dauphin.Field( dauphin.NonNull('ComputeLogs'), stepKey=dauphin.Argument(dauphin.NonNull(dauphin.String)), description=''' Compute logs are the stdout/stderr logs for a given solid step computation ''', ) executionPlan = dauphin.Field('ExecutionPlan') stepKeysToExecute = dauphin.List(dauphin.NonNull(dauphin.String)) runConfigYaml = dauphin.NonNull(dauphin.String) mode = dauphin.NonNull(dauphin.String) tags = dauphin.non_null_list('PipelineTag') rootRunId = dauphin.Field(dauphin.String) parentRunId = dauphin.Field(dauphin.String) canTerminate = dauphin.NonNull(dauphin.Boolean) assets = dauphin.non_null_list('Asset') def __init__(self, pipeline_run): super(DauphinPipelineRun, self).__init__( runId=pipeline_run.run_id, status=pipeline_run.status, mode=pipeline_run.mode ) self._pipeline_run = check.inst_param(pipeline_run, 'pipeline_run', PipelineRun) def resolve_pipeline(self, graphene_info): return get_pipeline_reference_or_raise(graphene_info, self._pipeline_run,) def resolve_pipelineName(self, _graphene_info): return self._pipeline_run.pipeline_name def resolve_solidSelection(self, _graphene_info): return self._pipeline_run.solid_selection def resolve_pipelineSnapshotId(self, _): return self._pipeline_run.pipeline_snapshot_id def resolve_stats(self, graphene_info): return get_stats(graphene_info, self.run_id) def resolve_stepStats(self, graphene_info): return get_step_stats(graphene_info, self.run_id) def resolve_computeLogs(self, graphene_info, stepKey): return graphene_info.schema.type_named('ComputeLogs')(runId=self.run_id, stepKey=stepKey) def resolve_executionPlan(self, graphene_info): if not ( self._pipeline_run.execution_plan_snapshot_id and self._pipeline_run.pipeline_snapshot_id ): return None from .execution import DauphinExecutionPlan instance = graphene_info.context.instance historical_pipeline = instance.get_historical_pipeline( self._pipeline_run.pipeline_snapshot_id ) execution_plan_snapshot = instance.get_execution_plan_snapshot( self._pipeline_run.execution_plan_snapshot_id ) return ( DauphinExecutionPlan( ExternalExecutionPlan( execution_plan_snapshot=execution_plan_snapshot, represented_pipeline=historical_pipeline, ) ) if execution_plan_snapshot and historical_pipeline else None ) def resolve_stepKeysToExecute(self, _): return self._pipeline_run.step_keys_to_execute def resolve_runConfigYaml(self, _graphene_info): return yaml.dump(self._pipeline_run.environment_dict, default_flow_style=False) def resolve_tags(self, graphene_info): return [ graphene_info.schema.type_named('PipelineTag')(key=key, value=value) for key, value in self._pipeline_run.tags.items() ] def resolve_rootRunId(self, _): return self._pipeline_run.root_run_id def resolve_parentRunId(self, _): return self._pipeline_run.parent_run_id @property def run_id(self): return self.runId def resolve_canTerminate(self, graphene_info): return graphene_info.context.instance.run_launcher.can_terminate(self.run_id) def resolve_assets(self, graphene_info): return get_assets_for_run_id(graphene_info, self.run_id)
class DauphinRunConfigSchema(dauphin.ObjectType): def __init__(self, represented_pipeline, mode): self._represented_pipeline = check.inst_param(represented_pipeline, 'represented_pipeline', RepresentedPipeline) self._mode = check.str_param(mode, 'mode') class Meta(object): name = 'RunConfigSchema' description = '''The run config schema represents the all the config type information given a certain execution selection and mode of execution of that selection. All config interactions (e.g. checking config validity, fetching all config types, fetching in a particular config type) should be done through this type ''' rootConfigType = dauphin.Field( dauphin.NonNull('ConfigType'), description= '''Fetch the root environment type. Concretely this is the type that is in scope at the root of configuration document for a particular execution selection. It is the type that is in scope initially with a blank config editor.''', ) allConfigTypes = dauphin.Field( dauphin.non_null_list('ConfigType'), description= '''Fetch all the named config types that are in the schema. Useful for things like a type browser UI, or for fetching all the types are in the scope of a document so that the index can be built for the autocompleting editor. ''', ) isRunConfigValid = dauphin.Field( dauphin.NonNull('PipelineConfigValidationResult'), args={'runConfigData': dauphin.Argument('RunConfigData')}, description= '''Parse a particular environment config result. The return value either indicates that the validation succeeded by returning `PipelineConfigValidationValid` or that there are configuration errors by returning `PipelineConfigValidationInvalid' which containers a list errors so that can be rendered for the user''', ) def resolve_allConfigTypes(self, _graphene_info): return sorted( list( map( lambda key: to_dauphin_config_type( self._represented_pipeline.config_schema_snapshot, key ), self._represented_pipeline.config_schema_snapshot. all_config_keys, )), key=lambda ct: ct.key, ) def resolve_rootConfigType(self, _graphene_info): return to_dauphin_config_type( self._represented_pipeline.config_schema_snapshot, self._represented_pipeline.get_mode_def_snap( self._mode).root_config_key, ) def resolve_isRunConfigValid(self, graphene_info, **kwargs): return resolve_is_run_config_valid( graphene_info, self._represented_pipeline, self._mode, kwargs.get('runConfigData', {}), )
class Arguments(object): executionParams = dauphin.NonNull('ExecutionParams') retries = dauphin.Argument('Retries')
class DauphinIPipelineSnapshotMixin(object): # Mixin this class to implement IPipelineSnapshot # # Graphene has some strange properties that make it so that you cannot # implement ABCs nor use properties in an overridable way. So the way # the mixin works is that the target classes have to have a method # get_pipeline_index() # def get_pipeline_index(self): raise NotImplementedError() name = dauphin.NonNull(dauphin.String) description = dauphin.String() runtime_types = dauphin.non_null_list('RuntimeType') solids = dauphin.non_null_list('Solid') runs = dauphin.non_null_list('PipelineRun') modes = dauphin.non_null_list('Mode') solid_handles = dauphin.Field(dauphin.non_null_list('SolidHandle'), parentHandleID=dauphin.String()) solid_handle = dauphin.Field( 'SolidHandle', handleID=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) tags = dauphin.non_null_list('PipelineTag') def resolve_name(self, _): return self.get_pipeline_index().name def resolve_description(self, _): return self.get_pipeline_index().description def resolve_runtime_types(self, _graphene_info): # TODO yuhan rename runtime_type in schema pipeline_index = self.get_pipeline_index() return sorted( list( map( lambda dt: to_dauphin_dagster_type( pipeline_index.pipeline_snapshot, dt.key), [ t for t in pipeline_index.get_dagster_type_snaps() if t.name ], )), key=lambda dagster_type: dagster_type.name, ) def resolve_solids(self, _graphene_info): pipeline_index = self.get_pipeline_index() return build_dauphin_solids(pipeline_index, pipeline_index.dep_structure_index) def resolve_runs(self, graphene_info): return [ graphene_info.schema.type_named('PipelineRun')(r) for r in graphene_info.context.instance.get_runs( filters=PipelineRunsFilter( pipeline_name=self.get_pipeline_index().name)) ] def resolve_modes(self, _): pipeline_snapshot = self.get_pipeline_index().pipeline_snapshot return [ DauphinMode(pipeline_snapshot.config_schema_snapshot, mode_def_snap) for mode_def_snap in sorted(pipeline_snapshot.mode_def_snaps, key=lambda item: item.name) ] def resolve_solid_handle(self, _graphene_info, handleID): return _get_solid_handles(self.get_pipeline_index()).get(handleID) def resolve_solid_handles(self, _graphene_info, **kwargs): handles = _get_solid_handles(self.get_pipeline_index()) parentHandleID = kwargs.get('parentHandleID') if parentHandleID == "": handles = { key: handle for key, handle in handles.items() if not handle.parent } elif parentHandleID is not None: handles = { key: handle for key, handle in handles.items() if handle.parent and handle.parent.handleID.to_string() == parentHandleID } return [handles[key] for key in sorted(handles)] def resolve_tags(self, graphene_info): return [ graphene_info.schema.type_named('PipelineTag')(key=key, value=value) for key, value in self.get_pipeline_index().pipeline_snapshot.tags.items() ]
class DauphinPipeline(dauphin.ObjectType): class Meta(object): name = 'Pipeline' interfaces = (DauphinSolidContainer, DauphinPipelineReference) name = dauphin.NonNull(dauphin.String) description = dauphin.String() solids = dauphin.non_null_list('Solid') runtime_types = dauphin.non_null_list('RuntimeType') runs = dauphin.non_null_list('PipelineRun') modes = dauphin.non_null_list('Mode') solid_handles = dauphin.Field( dauphin.non_null_list('SolidHandle'), parentHandleID=dauphin.String() ) presets = dauphin.non_null_list('PipelinePreset') solid_handle = dauphin.Field( 'SolidHandle', handleID=dauphin.Argument(dauphin.NonNull(dauphin.String)), ) def __init__(self, pipeline): super(DauphinPipeline, self).__init__(name=pipeline.name, description=pipeline.description) self._pipeline = check.inst_param(pipeline, 'pipeline', PipelineDefinition) def resolve_solids(self, _graphene_info): return build_dauphin_solids(self._pipeline.get_pipeline_snapshot(), self._pipeline) def resolve_runtime_types(self, _graphene_info): # TODO yuhan rename runtime_type in schema return sorted( list( map( lambda dt: to_dauphin_dagster_type( self._pipeline.get_pipeline_snapshot(), dt.key ), [t for t in self._pipeline.all_dagster_types() if t.name], ) ), key=lambda dagster_type: dagster_type.name, ) def resolve_runs(self, graphene_info): return [ graphene_info.schema.type_named('PipelineRun')(r) for r in graphene_info.context.instance.get_runs( filters=PipelineRunsFilter(pipeline_name=self._pipeline.name) ) ] def resolve_modes(self, _): return [ DauphinMode(self._pipeline.get_config_schema_snapshot(), mode_definition) for mode_definition in sorted( self._pipeline.mode_definitions, key=lambda item: item.name ) ] def resolve_solid_handle(self, _graphene_info, handleID): return _get_solid_handles(self._pipeline).get(handleID) def resolve_solid_handles(self, _graphene_info, **kwargs): handles = _get_solid_handles(self._pipeline) parentHandleID = kwargs.get('parentHandleID') if parentHandleID == "": handles = {key: handle for key, handle in handles.items() if not handle.parent} elif parentHandleID is not None: handles = { key: handle for key, handle in handles.items() if handle.parent and handle.parent.handleID.to_string() == parentHandleID } return [handles[key] for key in sorted(handles)] def resolve_presets(self, _graphene_info): return [ DauphinPipelinePreset(preset, self._pipeline.name) for preset in sorted(self._pipeline.get_presets(), key=lambda item: item.name) ]
class DauphinQuery(dauphin.ObjectType): class Meta(object): name = 'Query' version = dauphin.NonNull(dauphin.String) repositoriesOrError = dauphin.NonNull('RepositoriesOrError') repositoryOrError = dauphin.Field( dauphin.NonNull('RepositoryOrError'), repositorySelector=dauphin.NonNull('RepositorySelector'), ) pipelineOrError = dauphin.Field(dauphin.NonNull('PipelineOrError'), params=dauphin.NonNull('PipelineSelector')) pipelineSnapshotOrError = dauphin.Field( dauphin.NonNull('PipelineSnapshotOrError'), snapshotId=dauphin.String(), activePipelineSelector=dauphin.Argument('PipelineSelector'), ) scheduler = dauphin.Field(dauphin.NonNull('SchedulerOrError')) scheduleDefinitionOrError = dauphin.Field( dauphin.NonNull('ScheduleDefinitionOrError'), schedule_selector=dauphin.NonNull('ScheduleSelector'), ) scheduleDefinitionsOrError = dauphin.Field( dauphin.NonNull('ScheduleDefinitionsOrError'), repositorySelector=dauphin.NonNull('RepositorySelector'), ) scheduleStatesOrError = dauphin.Field( dauphin.NonNull('ScheduleStatesOrError'), repositorySelector=dauphin.NonNull('RepositorySelector'), withNoScheduleDefinition=dauphin.Boolean(), ) partitionSetsOrError = dauphin.Field( dauphin.NonNull('PartitionSetsOrError'), repositorySelector=dauphin.NonNull('RepositorySelector'), pipelineName=dauphin.NonNull(dauphin.String), ) partitionSetOrError = dauphin.Field( dauphin.NonNull('PartitionSetOrError'), repositorySelector=dauphin.NonNull('RepositorySelector'), partitionSetName=dauphin.String(), ) pipelineRunsOrError = dauphin.Field( dauphin.NonNull('PipelineRunsOrError'), filter=dauphin.Argument('PipelineRunsFilter'), cursor=dauphin.String(), limit=dauphin.Int(), ) pipelineRunOrError = dauphin.Field(dauphin.NonNull('PipelineRunOrError'), runId=dauphin.NonNull(dauphin.ID)) pipelineRunTags = dauphin.non_null_list('PipelineTagAndValues') runGroupOrError = dauphin.Field(dauphin.NonNull('RunGroupOrError'), runId=dauphin.NonNull(dauphin.ID)) runGroupsOrError = dauphin.Field( dauphin.NonNull('RunGroupsOrError'), filter=dauphin.Argument('PipelineRunsFilter'), cursor=dauphin.String(), limit=dauphin.Int(), ) isPipelineConfigValid = dauphin.Field( dauphin.NonNull('PipelineConfigValidationResult'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('PipelineSelector')), 'runConfigData': dauphin.Argument('RunConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) executionPlanOrError = dauphin.Field( dauphin.NonNull('ExecutionPlanOrError'), args={ 'pipeline': dauphin.Argument(dauphin.NonNull('PipelineSelector')), 'runConfigData': dauphin.Argument('RunConfigData'), 'mode': dauphin.Argument(dauphin.NonNull(dauphin.String)), }, ) runConfigSchemaOrError = dauphin.Field( dauphin.NonNull('RunConfigSchemaOrError'), args={ 'selector': dauphin.Argument(dauphin.NonNull('PipelineSelector')), 'mode': dauphin.Argument(dauphin.String), }, description= '''Fetch an environment schema given an execution selection and a mode. See the descripton on RunConfigSchema for more information.''', ) instance = dauphin.NonNull('Instance') assetsOrError = dauphin.Field(dauphin.NonNull('AssetsOrError')) assetOrError = dauphin.Field( dauphin.NonNull('AssetOrError'), assetKey=dauphin.Argument(dauphin.NonNull('AssetKeyInput')), ) def resolve_repositoriesOrError(self, graphene_info): return fetch_repositories(graphene_info) def resolve_repositoryOrError(self, graphene_info, **kwargs): return fetch_repository( graphene_info, RepositorySelector.from_graphql_input( kwargs.get('repositorySelector')), ) def resolve_pipelineSnapshotOrError(self, graphene_info, **kwargs): snapshot_id_arg = kwargs.get('snapshotId') pipeline_selector_arg = kwargs.get('activePipelineSelector') check.invariant( not (snapshot_id_arg and pipeline_selector_arg), 'Must only pass one of snapshotId or activePipelineSelector', ) check.invariant( snapshot_id_arg or pipeline_selector_arg, 'Must set one of snapshotId or activePipelineSelector', ) if pipeline_selector_arg: pipeline_selector = pipeline_selector_from_graphql( graphene_info.context, kwargs['activePipelineSelector']) return get_pipeline_snapshot_or_error_from_pipeline_selector( graphene_info, pipeline_selector) else: return get_pipeline_snapshot_or_error_from_snapshot_id( graphene_info, snapshot_id_arg) def resolve_version(self, graphene_info): return graphene_info.context.version def resolve_scheduler(self, graphene_info): return get_scheduler_or_error(graphene_info) def resolve_scheduleDefinitionOrError(self, graphene_info, schedule_selector): return get_schedule_definition_or_error( graphene_info, ScheduleSelector.from_graphql_input(schedule_selector)) def resolve_scheduleDefinitionsOrError(self, graphene_info, **kwargs): return get_schedule_definitions_or_error( graphene_info, RepositorySelector.from_graphql_input( kwargs.get('repositorySelector'))) def resolve_scheduleStatesOrError(self, graphene_info, **kwargs): return get_schedule_states_or_error( graphene_info, RepositorySelector.from_graphql_input( kwargs.get('repositorySelector')), kwargs.get('withNoScheduleDefinition'), ) def resolve_pipelineOrError(self, graphene_info, **kwargs): return get_pipeline_or_error( graphene_info, pipeline_selector_from_graphql(graphene_info.context, kwargs['params']), ) def resolve_pipelineRunsOrError(self, graphene_info, **kwargs): filters = kwargs.get('filter') if filters is not None: filters = filters.to_selector() return graphene_info.schema.type_named('PipelineRuns')( results=get_runs(graphene_info, filters, kwargs.get('cursor'), kwargs.get('limit'))) def resolve_pipelineRunOrError(self, graphene_info, runId): return get_run_by_id(graphene_info, runId) def resolve_runGroupsOrError(self, graphene_info, **kwargs): filters = kwargs.get('filter') if filters is not None: filters = filters.to_selector() return graphene_info.schema.type_named('RunGroupsOrError')( results=get_run_groups(graphene_info, filters, kwargs.get( 'cursor'), kwargs.get('limit'))) def resolve_partitionSetsOrError(self, graphene_info, **kwargs): return get_partition_sets_or_error( graphene_info, RepositorySelector.from_graphql_input( kwargs.get('repositorySelector')), kwargs.get('pipelineName'), ) def resolve_partitionSetOrError(self, graphene_info, **kwargs): return get_partition_set( graphene_info, RepositorySelector.from_graphql_input( kwargs.get('repositorySelector')), kwargs.get('partitionSetName'), ) def resolve_pipelineRunTags(self, graphene_info): return get_run_tags(graphene_info) def resolve_runGroupOrError(self, graphene_info, runId): return get_run_group(graphene_info, runId) def resolve_isPipelineConfigValid(self, graphene_info, pipeline, **kwargs): return validate_pipeline_config( graphene_info, pipeline_selector_from_graphql(graphene_info.context, pipeline), kwargs.get('runConfigData'), kwargs.get('mode'), ) def resolve_executionPlanOrError(self, graphene_info, pipeline, **kwargs): return get_execution_plan( graphene_info, pipeline_selector_from_graphql(graphene_info.context, pipeline), kwargs.get('runConfigData'), kwargs.get('mode'), ) def resolve_runConfigSchemaOrError(self, graphene_info, **kwargs): return resolve_run_config_schema_or_error( graphene_info, pipeline_selector_from_graphql(graphene_info.context, kwargs['selector']), kwargs.get('mode'), ) def resolve_instance(self, graphene_info): return graphene_info.schema.type_named('Instance')( graphene_info.context.instance) def resolve_assetsOrError(self, graphene_info): return get_assets(graphene_info) def resolve_assetOrError(self, graphene_info, **kwargs): return get_asset(graphene_info, AssetKey.from_graphql_input(kwargs['assetKey']))