def get_pipeline_run_observable(graphene_info, run_id, after=None): check.inst_param(graphene_info, 'graphene_info', ResolveInfo) check.str_param(run_id, 'run_id') check.opt_int_param(after, 'after') instance = graphene_info.context.instance run = instance.get_run_by_id(run_id) if not run: def _get_error_observable(observer): observer.on_next( graphene_info.schema. type_named('PipelineRunLogsSubscriptionFailure')( missingRunId=run_id, message='Could not load run with id {}'.format(run_id))) return Observable.create(_get_error_observable) # pylint: disable=E1101 # pylint: disable=E1101 return Observable.create( PipelineRunObservableSubscribe(instance, run_id, after_cursor=after) ).map(lambda events: graphene_info.schema.type_named( 'PipelineRunLogsSubscriptionSuccess')( run=graphene_info.schema.type_named('PipelineRun')(run), messages= [from_event_record(event, run.pipeline_name) for event in events], ))
def get_pipeline_run_observable(graphene_info, run_id, after=None): check.inst_param(graphene_info, 'graphene_info', ResolveInfo) check.str_param(run_id, 'run_id') check.opt_str_param(after, 'after') instance = graphene_info.context.instance run = instance.get_run(run_id) if not run: def _get_error_observable(observer): observer.on_next( graphene_info.schema.type_named( 'PipelineRunLogsSubscriptionMissingRunIdFailure')( missingRunId=run_id)) return Observable.create(_get_error_observable) # pylint: disable=E1101 pipeline = get_dauphin_pipeline_from_selector(graphene_info, run.selector) execution_plan = create_execution_plan(pipeline.get_dagster_pipeline(), run.environment_dict, RunConfig(mode=run.mode)) # pylint: disable=E1101 return Observable.create( PipelineRunObservableSubscribe( instance, run_id, after_cursor=after)).map( lambda events: graphene_info.schema. type_named('PipelineRunLogsSubscriptionSuccess')( runId=run_id, messages=[ from_event_record(graphene_info, event, pipeline, execution_plan) for event in events ], ))
def get_observable(pipeline): return run.observable_after_cursor(after).map( lambda events: graphene_info.schema.type_named( 'PipelineRunLogsSubscriptionSuccess')( runId=run_id, messages=[ from_event_record(graphene_info, event, pipeline, run. execution_plan) for event in events ], ))
def get_pipeline_run_observable(graphene_info, run_id, after=None): check.inst_param(graphene_info, 'graphene_info', ResolveInfo) check.str_param(run_id, 'run_id') check.opt_int_param(after, 'after') instance = graphene_info.context.instance run = instance.get_run(run_id) if not run: def _get_error_observable(observer): observer.on_next( graphene_info.schema. type_named('PipelineRunLogsSubscriptionFailure')( missingRunId=run_id, message='Could not load run with id {}'.format(run_id))) return Observable.create(_get_error_observable) # pylint: disable=E1101 if not instance.can_watch_events: def _get_error_observable(observer): observer.on_next( graphene_info.schema.type_named( 'PipelineRunLogsSubscriptionFailure') (message= 'Event log storage on current DagsterInstance is not watchable.' )) return Observable.create(_get_error_observable) # pylint: disable=E1101 pipeline = get_dauphin_pipeline_reference_from_selector( graphene_info, run.selector) from ..schema.pipelines import DauphinPipeline if not isinstance(pipeline, DauphinPipeline): return Observable.empty() # pylint: disable=no-member execution_plan = create_execution_plan(pipeline.get_dagster_pipeline(), run.environment_dict, RunConfig(mode=run.mode)) # pylint: disable=E1101 return Observable.create( PipelineRunObservableSubscribe( instance, run_id, after_cursor=after)).map( lambda events: graphene_info.schema. type_named('PipelineRunLogsSubscriptionSuccess')( runId=run_id, messages=[ from_event_record(graphene_info, event, pipeline, execution_plan) for event in events ], ))
def get_observable(pipeline): execution_plan = create_execution_plan(pipeline.get_dagster_pipeline(), run.config, RunConfig(mode=run.mode)) return run.observable_after_cursor(after).map( lambda events: graphene_info.schema.type_named( 'PipelineRunLogsSubscriptionSuccess')( runId=run_id, messages=[ from_event_record(graphene_info, event, pipeline, execution_plan) for event in events ], ))
def get_pipeline_run_observable(graphene_info, run_id, after=None): check.inst_param(graphene_info, 'graphene_info', ResolveInfo) check.str_param(run_id, 'run_id') check.opt_int_param(after, 'after') instance = graphene_info.context.instance run = instance.get_run_by_id(run_id) if not run: def _get_error_observable(observer): observer.on_next( graphene_info.schema. type_named('PipelineRunLogsSubscriptionFailure')( missingRunId=run_id, message='Could not load run with id {}'.format(run_id))) return Observable.create(_get_error_observable) # pylint: disable=E1101 pipeline_ref = get_dauphin_pipeline_reference_from_selector( graphene_info, run.selector) execution_plan = None execution_plan_index = None if isinstance(pipeline_ref, DauphinPipeline): pipeline_def = get_pipeline_def_from_selector(graphene_info, run.selector) if is_config_valid(pipeline_def, run.environment_dict, run.mode): execution_plan = create_execution_plan(pipeline_def, run.environment_dict, mode=run.mode) execution_plan_index = ExecutionPlanIndex.from_plan_and_index( execution_plan, pipeline_def.get_pipeline_index()) # pylint: disable=E1101 return Observable.create( PipelineRunObservableSubscribe(instance, run_id, after_cursor=after) ).map(lambda events: graphene_info.schema.type_named( 'PipelineRunLogsSubscriptionSuccess')( run=graphene_info.schema.type_named('PipelineRun')(run), messages=[ from_event_record(graphene_info, event, pipeline_ref, execution_plan_index) for event in events ], ))