def _core_execute_run(recon_pipeline, pipeline_run, instance): check.inst_param(recon_pipeline, "recon_pipeline", ReconstructablePipeline) check.inst_param(pipeline_run, "pipeline_run", PipelineRun) check.inst_param(instance, "instance", DagsterInstance) try: for event in execute_run_iterator(recon_pipeline, pipeline_run, instance): yield event except DagsterSubprocessError as err: if not all([ err_info.cls_name == "KeyboardInterrupt" for err_info in err.subprocess_error_infos ]): yield instance.report_engine_event( "An exception was thrown during execution that is likely a framework error, " "rather than an error in user code.", pipeline_run, EngineEventData.engine_error( serializable_error_info_from_exc_info(sys.exc_info())), ) instance.report_run_failed(pipeline_run) except Exception: # pylint: disable=broad-except yield instance.report_engine_event( "An exception was thrown during execution that is likely a framework error, " "rather than an error in user code.", pipeline_run, EngineEventData.engine_error( serializable_error_info_from_exc_info(sys.exc_info())), ) instance.report_run_failed(pipeline_run)
def run_one(self, instance): assert len(self._queue) > 0 run = self._queue.pop(0) pipeline_def = define_repository().get_pipeline(run.pipeline_name) return [ ev for ev in execute_run_iterator( InMemoryExecutablePipeline(pipeline_def), run, instance) ]
def execute_pipeline(self, _, pipeline, pipeline_run, instance): check.inst_param(pipeline, 'pipeline', PipelineDefinition) event_list = [] for event in execute_run_iterator(pipeline, pipeline_run, instance): event_list.append(event) return PipelineExecutionResult(pipeline, pipeline_run.run_id, event_list, lambda: None)
def _execute_run_command_body(task_id, recon_pipeline, pipeline_run_id, instance, write_stream_fn): # we need to send but the fact that we have loaded the args so the calling # process knows it is safe to clean up the temp input file # write_stream_fn(ExecuteRunArgsLoadComplete()) pipeline_run = instance.get_run_by_id(pipeline_run_id) instance.report_engine_event( message=f"Pipeline execution starting (task: {task_id})", pipeline_run=pipeline_run, ) try: with allow_join_result(): for event in execute_run_iterator(recon_pipeline, pipeline_run, instance): write_stream_fn(event) except KeyboardInterrupt: instance.report_engine_event( message="Pipeline execution terminated by interrupt", pipeline_run=pipeline_run, ) except DagsterSubprocessError as err: if not all([ err_info.cls_name == "KeyboardInterrupt" for err_info in err.subprocess_error_infos ]): instance.report_engine_event( "An exception was thrown during execution that is likely a framework error, " "rather than an error in user code.", pipeline_run, EngineEventData.engine_error( serializable_error_info_from_exc_info(sys.exc_info())), ) except Exception as exc: # pylint: disable=broad-except if isinstance( exc, CheckError ) and 'in state PipelineRunStatus.STARTED, expected PipelineRunStatus.NOT_STARTED' in str( exc): # TODO Should this log? return instance.report_engine_event( "An exception was thrown during execution that is likely a framework error, " "rather than an error in user code.", pipeline_run, EngineEventData.engine_error( serializable_error_info_from_exc_info(sys.exc_info())), ) finally: instance.report_engine_event( "Task for pipeline completed (task: {task_id}).".format( task_id=task_id), pipeline_run, )
def _execute_run_command_body( output_file, recon_repo, pipeline_run_id, instance_ref_json, ): with ipc_write_stream(output_file) as stream: instance = _get_instance(stream, instance_ref_json) if not instance: return pipeline_run = instance.get_run_by_id(pipeline_run_id) pid = os.getpid() instance.report_engine_event( 'Started process for pipeline (pid: {pid}).'.format(pid=pid), pipeline_run, EngineEventData.in_process(pid, marker_end='cli_api_subprocess_init'), ) recon_pipeline = _recon_pipeline(stream, recon_repo, pipeline_run) # Perform setup so that termination of the execution will unwind and report to the # instance correctly setup_interrupt_support() try: for event in execute_run_iterator(recon_pipeline, pipeline_run, instance): stream.send(event) except DagsterSubprocessError as err: if not all([ err_info.cls_name == 'KeyboardInterrupt' for err_info in err.subprocess_error_infos ]): instance.report_engine_event( 'An exception was thrown during execution that is likely a framework error, ' 'rather than an error in user code.', pipeline_run, EngineEventData.engine_error( serializable_error_info_from_exc_info(sys.exc_info())), ) except Exception: # pylint: disable=broad-except instance.report_engine_event( 'An exception was thrown during execution that is likely a framework error, ' 'rather than an error in user code.', pipeline_run, EngineEventData.engine_error( serializable_error_info_from_exc_info(sys.exc_info())), ) finally: instance.report_engine_event( 'Process for pipeline exited (pid: {pid}).'.format(pid=pid), pipeline_run, )
def _in_mp_process(handle, pipeline_run, instance_ref, term_event): """ Execute pipeline using message queue as a transport """ run_id = pipeline_run.run_id pipeline_name = pipeline_run.pipeline_name instance = DagsterInstance.from_ref(instance_ref) instance.handle_new_event( build_process_started_event(run_id, pipeline_name, os.getpid())) start_termination_thread(term_event) try: handle.build_repository_definition() pipeline_def = handle.with_pipeline_name( pipeline_name).build_pipeline_definition() except Exception: # pylint: disable=broad-except repo_error = sys.exc_info() instance.handle_new_event( build_synthetic_pipeline_error_record( run_id, serializable_error_info_from_exc_info(repo_error), pipeline_name)) return try: event_list = [] for event in execute_run_iterator( pipeline_def.build_sub_pipeline( pipeline_run.selector.solid_subset), pipeline_run, instance, ): event_list.append(event) return PipelineExecutionResult(pipeline_def, run_id, event_list, lambda: None) # Add a DagsterEvent for unexpected exceptions # Explicitly ignore KeyboardInterrupts since they are used for termination except DagsterSubprocessError as err: if not all([ err_info.cls_name == 'KeyboardInterrupt' for err_info in err.subprocess_error_infos ]): error_info = serializable_error_info_from_exc_info(sys.exc_info()) instance.handle_new_event( build_synthetic_pipeline_error_record(run_id, error_info, pipeline_name)) except Exception: # pylint: disable=broad-except error_info = serializable_error_info_from_exc_info(sys.exc_info()) instance.handle_new_event( build_synthetic_pipeline_error_record(run_id, error_info, pipeline_name)) finally: instance.handle_new_event( build_process_exited_event(run_id, pipeline_name, os.getpid()))
def test_execute_run_iterator(): records = [] def event_callback(record): assert isinstance(record, EventRecord) records.append(record) instance = DagsterInstance.local_temp() pipeline = PipelineDefinition( name='basic_resource_pipeline', solid_defs=[resource_solid], mode_defs=[ ModeDefinition( resource_defs={ 'a': resource_a, 'b': resource_b }, logger_defs={ 'callback': construct_event_logger(event_callback) }, ) ], ) pipeline_run = instance.create_run( PipelineRun( pipeline_name=pipeline.name, run_id=make_new_run_id(), environment_dict={'loggers': { 'callback': {} }}, mode='default', status=PipelineRunStatus.NOT_STARTED, )) iterator = execute_run_iterator(pipeline, pipeline_run, instance=instance) event_type = None while event_type != 'STEP_START': event = next(iterator) event_type = event.event_type_value iterator.close() events = [ record.dagster_event for record in records if record.is_dagster_event ] messages = [ record.user_message for record in records if not record.is_dagster_event ] assert len([event for event in events if event.is_pipeline_failure]) > 0 assert len([message for message in messages if message == 'CLEANING A']) > 0 assert len([message for message in messages if message == 'CLEANING B']) > 0
def test_execute_run_iterator(): records = [] def event_callback(record): assert isinstance(record, EventRecord) records.append(record) instance = DagsterInstance.local_temp() pipeline_def = PipelineDefinition( name="basic_resource_pipeline", solid_defs=[resource_solid], mode_defs=[ ModeDefinition( resource_defs={ "a": resource_a, "b": resource_b }, logger_defs={ "callback": construct_event_logger(event_callback) }, ) ], ) pipeline_run = instance.create_run_for_pipeline( pipeline_def=pipeline_def, run_config={"loggers": { "callback": {} }}, mode="default", ) iterator = execute_run_iterator(InMemoryExecutablePipeline(pipeline_def), pipeline_run, instance=instance) event_type = None while event_type != "STEP_START": event = next(iterator) event_type = event.event_type_value iterator.close() events = [ record.dagster_event for record in records if record.is_dagster_event ] messages = [ record.user_message for record in records if not record.is_dagster_event ] assert len([event for event in events if event.is_pipeline_failure]) > 0 assert len([message for message in messages if message == "CLEANING A"]) > 0 assert len([message for message in messages if message == "CLEANING B"]) > 0
def _execute_run_command_body(recon_pipeline, pipeline_run_id, instance, write_stream_fn): # we need to send but the fact that we have loaded the args so the calling # process knows it is safe to clean up the temp input file write_stream_fn(ExecuteRunArgsLoadComplete()) pipeline_run = instance.get_run_by_id(pipeline_run_id) pid = os.getpid() instance.report_engine_event( "Started process for pipeline (pid: {pid}).".format(pid=pid), pipeline_run, EngineEventData.in_process(pid, marker_end="cli_api_subprocess_init"), ) # Perform setup so that termination of the execution will unwind and report to the # instance correctly setup_windows_interrupt_support() try: for event in execute_run_iterator(recon_pipeline, pipeline_run, instance): write_stream_fn(event) except KeyboardInterrupt: instance.report_engine_event( message="Pipeline execution terminated by interrupt", pipeline_run=pipeline_run, ) except DagsterSubprocessError as err: if not all([ err_info.cls_name == "KeyboardInterrupt" for err_info in err.subprocess_error_infos ]): instance.report_engine_event( "An exception was thrown during execution that is likely a framework error, " "rather than an error in user code.", pipeline_run, EngineEventData.engine_error( serializable_error_info_from_exc_info(sys.exc_info())), ) except Exception: # pylint: disable=broad-except instance.report_engine_event( "An exception was thrown during execution that is likely a framework error, " "rather than an error in user code.", pipeline_run, EngineEventData.engine_error( serializable_error_info_from_exc_info(sys.exc_info())), ) finally: instance.report_engine_event( "Process for pipeline exited (pid: {pid}).".format(pid=pid), pipeline_run, )
def execute_pipeline(self, _, pipeline, pipeline_run, instance): check.inst_param(pipeline, 'pipeline', PipelineDefinition) check.inst_param(pipeline_run, 'pipeline_run', PipelineRun) check.inst_param(instance, 'instance', DagsterInstance) event_list = [] self._active.add(pipeline_run.run_id) for event in execute_run_iterator(pipeline, pipeline_run, instance): event_list.append(event) self._active.remove(pipeline_run.run_id) return PipelineExecutionResult(pipeline, pipeline_run.run_id, event_list, lambda: None)
def _execute_run_command_body(output_file, recon_pipeline, pipeline_run_id, instance_ref): with ipc_write_stream(output_file) as stream: # we need to send but the fact that we have loaded the args so the calling # process knows it is safe to clean up the temp input file stream.send(ExecuteRunArgsLoadComplete()) instance = DagsterInstance.from_ref(instance_ref) pipeline_run = instance.get_run_by_id(pipeline_run_id) pid = os.getpid() instance.report_engine_event( 'Started process for pipeline (pid: {pid}).'.format(pid=pid), pipeline_run, EngineEventData.in_process(pid, marker_end='cli_api_subprocess_init'), ) # Perform setup so that termination of the execution will unwind and report to the # instance correctly setup_interrupt_support() try: for event in execute_run_iterator(recon_pipeline, pipeline_run, instance): stream.send(event) except DagsterSubprocessError as err: if not all([ err_info.cls_name == 'KeyboardInterrupt' for err_info in err.subprocess_error_infos ]): instance.report_engine_event( 'An exception was thrown during execution that is likely a framework error, ' 'rather than an error in user code.', pipeline_run, EngineEventData.engine_error( serializable_error_info_from_exc_info(sys.exc_info())), ) except Exception: # pylint: disable=broad-except instance.report_engine_event( 'An exception was thrown during execution that is likely a framework error, ' 'rather than an error in user code.', pipeline_run, EngineEventData.engine_error( serializable_error_info_from_exc_info(sys.exc_info())), ) finally: instance.report_engine_event( 'Process for pipeline exited (pid: {pid}).'.format(pid=pid), pipeline_run, )
def core_execute_run( recon_pipeline: ReconstructablePipeline, pipeline_run: PipelineRun, instance: DagsterInstance, resume_from_failure: bool = False, ) -> Generator[DagsterEvent, None, None]: check.inst_param(recon_pipeline, "recon_pipeline", ReconstructablePipeline) check.inst_param(pipeline_run, "pipeline_run", PipelineRun) check.inst_param(instance, "instance", DagsterInstance) # try to load the pipeline definition early try: recon_pipeline.get_definition() except Exception: yield instance.report_engine_event( "Could not load pipeline definition.", pipeline_run, EngineEventData.engine_error( serializable_error_info_from_exc_info(sys.exc_info())), ) yield from _report_run_failed_if_not_finished(instance, pipeline_run.run_id) raise try: yield from execute_run_iterator( recon_pipeline, pipeline_run, instance, resume_from_failure=resume_from_failure) except (KeyboardInterrupt, DagsterExecutionInterruptedError): yield from _report_run_failed_if_not_finished(instance, pipeline_run.run_id) yield instance.report_engine_event( message="Run execution terminated by interrupt", pipeline_run=pipeline_run, ) raise except Exception: yield instance.report_engine_event( "An exception was thrown during execution that is likely a framework error, " "rather than an error in user code.", pipeline_run, EngineEventData.engine_error( serializable_error_info_from_exc_info(sys.exc_info())), ) yield from _report_run_failed_if_not_finished(instance, pipeline_run.run_id) raise
def execute_pipeline_through_queue(handle, pipeline_run, message_queue, instance_ref): """ Execute pipeline using message queue as a transport """ run_id = pipeline_run.run_id pipeline_name = pipeline_run.pipeline_name message_queue.put( build_process_started_event(run_id, pipeline_name, os.getpid())) try: handle.build_repository_definition() pipeline_def = handle.with_pipeline_name( pipeline_name).build_pipeline_definition() except Exception: # pylint: disable=broad-except repo_error = sys.exc_info() message_queue.put( build_synthetic_pipeline_error_record( run_id, serializable_error_info_from_exc_info(repo_error), pipeline_name)) return try: event_list = [] for event in execute_run_iterator( pipeline_def.build_sub_pipeline( pipeline_run.selector.solid_subset), pipeline_run, DagsterInstance.from_ref(instance_ref), ): event_list.append(event) return PipelineExecutionResult(pipeline_def, run_id, event_list, lambda: None) except Exception: # pylint: disable=broad-except error_info = serializable_error_info_from_exc_info(sys.exc_info()) message_queue.put( build_synthetic_pipeline_error_record(run_id, error_info, pipeline_name)) finally: message_queue.put( build_process_exited_event(run_id, pipeline_name, os.getpid())) message_queue.close()
def core_execute_run(recon_pipeline, pipeline_run, instance): check.inst_param(recon_pipeline, "recon_pipeline", ReconstructablePipeline) check.inst_param(pipeline_run, "pipeline_run", PipelineRun) check.inst_param(instance, "instance", DagsterInstance) try: yield from execute_run_iterator(recon_pipeline, pipeline_run, instance) except (KeyboardInterrupt, DagsterExecutionInterruptedError): yield from _report_run_failed_if_not_finished(instance, pipeline_run.run_id) yield instance.report_engine_event( message="Pipeline execution terminated by interrupt", pipeline_run=pipeline_run, ) except Exception: # pylint: disable=broad-except yield instance.report_engine_event( "An exception was thrown during execution that is likely a framework error, " "rather than an error in user code.", pipeline_run, EngineEventData.engine_error(serializable_error_info_from_exc_info(sys.exc_info())), ) yield from _report_run_failed_if_not_finished(instance, pipeline_run.run_id)
def execute_pipeline(self, _, pipeline, pipeline_run, instance, raise_on_error): check.inst_param(pipeline, 'pipeline', PipelineDefinition) try: event_list = [] for event in execute_run_iterator(pipeline, pipeline_run, instance): event_list.append(event) return PipelineExecutionResult(pipeline, pipeline_run.run_id, event_list, lambda: None) except Exception: # pylint: disable=broad-except if raise_on_error: six.reraise(*sys.exc_info()) instance.handle_new_event( build_synthetic_pipeline_error_record( pipeline_run.run_id, serializable_error_info_from_exc_info(sys.exc_info()), pipeline.name, ))
def run_one(self, instance): assert len(self._queue) > 0 run = self._queue.pop(0) pipeline = define_repository().get_pipeline(run.pipeline_name) instance.create_run(run) return [ev for ev in execute_run_iterator(pipeline, run, instance)]
def test_execute_run_iterator(): records = [] def event_callback(record): assert isinstance(record, EventRecord) records.append(record) instance = DagsterInstance.local_temp() pipeline_def = PipelineDefinition( name="basic_resource_pipeline", solid_defs=[resource_solid], mode_defs=[ ModeDefinition( resource_defs={ "a": resource_a, "b": resource_b }, logger_defs={ "callback": construct_event_logger(event_callback) }, ) ], ) pipeline_run = instance.create_run_for_pipeline( pipeline_def=pipeline_def, run_config={"loggers": { "callback": {} }}, mode="default", ) iterator = execute_run_iterator(InMemoryPipeline(pipeline_def), pipeline_run, instance=instance) event_type = None while event_type != "STEP_START": event = next(iterator) event_type = event.event_type_value iterator.close() events = [ record.dagster_event for record in records if record.is_dagster_event ] messages = [ record.user_message for record in records if not record.is_dagster_event ] assert len([event for event in events if event.is_pipeline_failure]) > 0 assert len([message for message in messages if message == "CLEANING A"]) > 0 assert len([message for message in messages if message == "CLEANING B"]) > 0 pipeline_run = instance.create_run_for_pipeline( pipeline_def=pipeline_def, run_config={ "loggers": { "callback": {} } }, mode="default", ).with_status(PipelineRunStatus.SUCCESS) with pytest.raises( check.CheckError, match=r"Pipeline run basic_resource_pipeline \({}\) in state" r" PipelineRunStatus.SUCCESS, expected PipelineRunStatus.NOT_STARTED" .format(pipeline_run.run_id), ): execute_run_iterator(InMemoryPipeline(pipeline_def), pipeline_run, instance=instance)
def in_mp_process(cls, handle, pipeline_run, instance_ref, term_event): """ Execute pipeline using message queue as a transport """ run_id = pipeline_run.run_id pipeline_name = pipeline_run.pipeline_name instance = DagsterInstance.from_ref(instance_ref) pid = os.getpid() instance.report_engine_event( 'Started process for pipeline (pid: {pid}).'.format(pid=pid), pipeline_run, EngineEventData.in_process(pid, marker_end='dagit_subprocess_init'), cls, ) start_termination_thread(term_event) try: handle.build_repository_definition() pipeline_def = handle.with_pipeline_name( pipeline_name).build_pipeline_definition() except Exception: # pylint: disable=broad-except instance.report_engine_event( 'Failed attempting to load pipeline "{}"'.format( pipeline_name), pipeline_run, EngineEventData.engine_error( serializable_error_info_from_exc_info(sys.exc_info())), cls, ) return try: event_list = [] for event in execute_run_iterator( pipeline_def.build_sub_pipeline( pipeline_run.selector.solid_subset), pipeline_run, instance, ): event_list.append(event) return PipelineExecutionResult(pipeline_def, run_id, event_list, lambda: None) # Add a DagsterEvent for unexpected exceptions # Explicitly ignore KeyboardInterrupts since they are used for termination except DagsterSubprocessError as err: if not all([ err_info.cls_name == 'KeyboardInterrupt' for err_info in err.subprocess_error_infos ]): instance.report_engine_event( 'An exception was thrown during execution that is likely a framework error, ' 'rather than an error in user code.', pipeline_run, EngineEventData.engine_error( serializable_error_info_from_exc_info(sys.exc_info())), cls, ) except Exception: # pylint: disable=broad-except instance.report_engine_event( 'An exception was thrown during execution that is likely a framework error, ' 'rather than an error in user code.', pipeline_run, EngineEventData.engine_error( serializable_error_info_from_exc_info(sys.exc_info())), cls, ) finally: instance.report_engine_event( 'Process for pipeline exited (pid: {pid}).'.format(pid=pid), pipeline_run, cls=cls, )
def _execute_run(request): try: execute_run_args = deserialize_json_to_dagster_namedtuple( request.serialized_execute_run_args) check.inst_param(execute_run_args, 'execute_run_args', ExecuteRunArgs) recon_pipeline = recon_pipeline_from_origin( execute_run_args.pipeline_origin) instance = DagsterInstance.from_ref(execute_run_args.instance_ref) pipeline_run = instance.get_run_by_id(execute_run_args.pipeline_run_id) pid = os.getpid() except: # pylint: disable=bare-except yield IPCErrorMessage( serializable_error_info=serializable_error_info_from_exc_info( sys.exc_info()), message='Error during RPC setup for ExecuteRun', ) return yield instance.report_engine_event( 'Started process for pipeline (pid: {pid}).'.format(pid=pid), pipeline_run, EngineEventData.in_process(pid, marker_end='cli_api_subprocess_init'), ) # This is so nasty but seemingly unavoidable # https://amir.rachum.com/blog/2017/03/03/generator-cleanup/ closed = False try: for event in execute_run_iterator(recon_pipeline, pipeline_run, instance): yield event except DagsterSubprocessError as err: if not all([ err_info.cls_name == 'KeyboardInterrupt' for err_info in err.subprocess_error_infos ]): yield instance.report_engine_event( 'An exception was thrown during execution that is likely a framework error, ' 'rather than an error in user code.', pipeline_run, EngineEventData.engine_error( serializable_error_info_from_exc_info(sys.exc_info())), ) instance.report_run_failed(pipeline_run) except GeneratorExit: closed = True raise except Exception: # pylint: disable=broad-except yield instance.report_engine_event( 'An exception was thrown during execution that is likely a framework error, ' 'rather than an error in user code.', pipeline_run, EngineEventData.engine_error( serializable_error_info_from_exc_info(sys.exc_info())), ) instance.report_run_failed(pipeline_run) finally: if not closed: yield instance.report_engine_event( 'Process for pipeline exited (pid: {pid}).'.format(pid=pid), pipeline_run, )
def test_execute_run_iterator(): records = [] def event_callback(record): assert isinstance(record, EventLogEntry) records.append(record) with instance_for_test() as instance: pipeline_def = PipelineDefinition( name="basic_resource_pipeline", solid_defs=[resource_solid], mode_defs=[ ModeDefinition( resource_defs={ "a": resource_a, "b": resource_b }, logger_defs={ "callback": construct_event_logger(event_callback) }, ) ], ) pipeline_run = instance.create_run_for_pipeline( pipeline_def=pipeline_def, run_config={"loggers": { "callback": {} }}, mode="default", ) iterator = execute_run_iterator(InMemoryPipeline(pipeline_def), pipeline_run, instance=instance) event_type = None while event_type != "STEP_START": event = next(iterator) event_type = event.event_type_value iterator.close() events = [ record.dagster_event for record in records if record.is_dagster_event ] messages = [ record.user_message for record in records if not record.is_dagster_event ] pipeline_failure_events = [ event for event in events if event.is_pipeline_failure ] assert len(pipeline_failure_events) == 1 assert "GeneratorExit" in pipeline_failure_events[ 0].pipeline_failure_data.error.message assert len( [message for message in messages if message == "CLEANING A"]) > 0 assert len( [message for message in messages if message == "CLEANING B"]) > 0 pipeline_run = instance.create_run_for_pipeline( pipeline_def=pipeline_def, run_config={ "loggers": { "callback": {} } }, mode="default", ).with_status(PipelineRunStatus.SUCCESS) with pytest.raises( Exception, match=r"basic_resource_pipeline \({}\) started a new " r"run while the run was already in state DagsterRunStatus.SUCCESS." .format(pipeline_run.run_id), ): execute_run_iterator(InMemoryPipeline(pipeline_def), pipeline_run, instance=instance) with instance_for_test( overrides={ "run_launcher": { "module": "dagster_tests.daemon_tests.test_monitoring_daemon", "class": "TestRunLauncher", }, "run_monitoring": { "enabled": True }, }) as run_monitoring_instance: event = next( execute_run_iterator(InMemoryPipeline(pipeline_def), pipeline_run, instance=run_monitoring_instance)) assert ( "Ignoring a duplicate run that was started from somewhere other than the run monitor daemon" in event.message) with pytest.raises( check.CheckError, match= r"in state DagsterRunStatus.SUCCESS, expected STARTED or STARTING " r"because it's resuming from a run worker failure", ): execute_run_iterator( InMemoryPipeline(pipeline_def), pipeline_run, instance=run_monitoring_instance, resume_from_failure=True, ) pipeline_run = instance.create_run_for_pipeline( pipeline_def=pipeline_def, run_config={ "loggers": { "callback": {} } }, mode="default", ).with_status(PipelineRunStatus.CANCELED) events = list( execute_run_iterator(InMemoryPipeline(pipeline_def), pipeline_run, instance=instance)) assert len(events) == 1 assert ( events[0].message == "Not starting execution since the run was canceled before execution could start" )
def test_execute_canceled_state(): def event_callback(_record): pass with instance_for_test() as instance: pipeline_def = PipelineDefinition( name="basic_resource_pipeline", solid_defs=[resource_solid], mode_defs=[ ModeDefinition( resource_defs={ "a": resource_a, "b": resource_b }, logger_defs={ "callback": construct_event_logger(event_callback) }, ) ], ) pipeline_run = instance.create_run_for_pipeline( pipeline_def=pipeline_def, run_config={ "loggers": { "callback": {} } }, mode="default", ).with_status(PipelineRunStatus.CANCELED) with pytest.raises(DagsterInvariantViolationError): execute_run( InMemoryPipeline(pipeline_def), pipeline_run, instance=instance, ) logs = instance.all_logs(pipeline_run.run_id) assert len(logs) == 1 assert ( "Not starting execution since the run was canceled before execution could start" in logs[0].message) iter_run = instance.create_run_for_pipeline( pipeline_def=pipeline_def, run_config={ "loggers": { "callback": {} } }, mode="default", ).with_status(PipelineRunStatus.CANCELED) iter_events = list( execute_run_iterator(InMemoryPipeline(pipeline_def), iter_run, instance=instance)) assert len(iter_events) == 1 assert ( "Not starting execution since the run was canceled before execution could start" in iter_events[0].message)
def test_execute_run_iterator(): records = [] def event_callback(record): assert isinstance(record, EventRecord) records.append(record) with instance_for_test() as instance: pipeline_def = PipelineDefinition( name="basic_resource_pipeline", solid_defs=[resource_solid], mode_defs=[ ModeDefinition( resource_defs={ "a": resource_a, "b": resource_b }, logger_defs={ "callback": construct_event_logger(event_callback) }, ) ], ) pipeline_run = instance.create_run_for_pipeline( pipeline_def=pipeline_def, run_config={"loggers": { "callback": {} }}, mode="default", ) iterator = execute_run_iterator(InMemoryPipeline(pipeline_def), pipeline_run, instance=instance) event_type = None while event_type != "STEP_START": event = next(iterator) event_type = event.event_type_value iterator.close() events = [ record.dagster_event for record in records if record.is_dagster_event ] messages = [ record.user_message for record in records if not record.is_dagster_event ] pipeline_failure_events = [ event for event in events if event.is_pipeline_failure ] assert len(pipeline_failure_events) == 1 assert "GeneratorExit" in pipeline_failure_events[ 0].pipeline_failure_data.error.message assert len( [message for message in messages if message == "CLEANING A"]) > 0 assert len( [message for message in messages if message == "CLEANING B"]) > 0 pipeline_run = instance.create_run_for_pipeline( pipeline_def=pipeline_def, run_config={ "loggers": { "callback": {} } }, mode="default", ).with_status(PipelineRunStatus.SUCCESS) with pytest.raises( check.CheckError, match=r"Pipeline run basic_resource_pipeline \({}\) in state" r" PipelineRunStatus.SUCCESS, expected NOT_STARTED or STARTING" .format(pipeline_run.run_id), ): execute_run_iterator(InMemoryPipeline(pipeline_def), pipeline_run, instance=instance) pipeline_run = instance.create_run_for_pipeline( pipeline_def=pipeline_def, run_config={ "loggers": { "callback": {} } }, mode="default", ).with_status(PipelineRunStatus.CANCELED) events = list( execute_run_iterator(InMemoryPipeline(pipeline_def), pipeline_run, instance=instance)) assert len(events) == 1 assert ( events[0].message == "Not starting execution since the run was canceled before execution could start" )