def _create_context_free_log_manager(run_config, pipeline_def): '''In the event of pipeline initialization failure, we want to be able to log the failure without a dependency on the ExecutionContext to initialize DagsterLogManager. Args: run_config (dagster.core.execution_context.RunConfig) pipeline_def (dagster.definitions.PipelineDefinition) ''' check.inst_param(run_config, 'run_config', RunConfig) check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition) loggers = [] # Use the default logger for (logger_def, logger_config) in default_system_loggers(): loggers += [ logger_def.logger_fn( InitLoggerContext(logger_config, pipeline_def, logger_def, run_config.run_id) ) ] if run_config.event_callback: event_logger_def = construct_event_logger(run_config.event_callback) loggers += [ event_logger_def.logger_fn( InitLoggerContext({}, pipeline_def, event_logger_def, run_config.run_id) ) ] elif run_config.loggers: loggers += run_config.loggers return DagsterLogManager(run_config.run_id, get_logging_tags(run_config, pipeline_def), loggers)
def _create_context_free_log_manager( instance: DagsterInstance, pipeline_run: PipelineRun, pipeline_def: PipelineDefinition) -> DagsterLogManager: """In the event of pipeline initialization failure, we want to be able to log the failure without a dependency on the PlanExecutionContext to initialize DagsterLogManager. Args: pipeline_run (dagster.core.storage.pipeline_run.PipelineRun) pipeline_def (dagster.definitions.PipelineDefinition) """ check.inst_param(instance, "instance", DagsterInstance) check.inst_param(pipeline_run, "pipeline_run", PipelineRun) check.inst_param(pipeline_def, "pipeline_def", PipelineDefinition) loggers = [instance.get_logger()] # Use the default logger for (logger_def, logger_config) in default_system_loggers(): loggers += [ logger_def.logger_fn( InitLoggerContext( logger_config, logger_def, pipeline_def=pipeline_def, run_id=pipeline_run.run_id, )) ] return DagsterLogManager(pipeline_run.run_id, get_logging_tags(pipeline_run), loggers)
def initialize_console_manager(pipeline_run: Optional[PipelineRun]) -> DagsterLogManager: # initialize default colored console logger loggers = [] for logger_def, logger_config in default_system_loggers(): loggers.append( logger_def.logger_fn( InitLoggerContext( logger_config, logger_def, run_id=pipeline_run.run_id if pipeline_run else None ) ) ) return DagsterLogManager.create(loggers=loggers, pipeline_run=pipeline_run)
def create_log_manager( context_creation_data: ContextCreationData) -> DagsterLogManager: check.inst_param(context_creation_data, "context_creation_data", ContextCreationData) pipeline_def, mode_def, environment_config, pipeline_run = ( context_creation_data.pipeline_def, context_creation_data.mode_def, context_creation_data.environment_config, context_creation_data.pipeline_run, ) # The following logic is tightly coupled to the processing of logger config in # python_modules/dagster/dagster/core/system_config/objects.py#config_map_loggers # Changes here should be accompanied checked against that function, which applies config mapping # via ConfigurableDefinition (@configured) to incoming logger configs. See docstring for more details. loggers = [] for logger_key, logger_def in mode_def.loggers.items() or default_loggers( ).items(): if logger_key in environment_config.loggers: loggers.append( logger_def.logger_fn( InitLoggerContext( environment_config.loggers.get(logger_key, {}).get("config"), logger_def, pipeline_def=pipeline_def, run_id=pipeline_run.run_id, ))) if not loggers: for (logger_def, logger_config) in default_system_loggers(): loggers.append( logger_def.logger_fn( InitLoggerContext( logger_config, logger_def, pipeline_def=pipeline_def, run_id=pipeline_run.run_id, ))) # should this be first in loggers list? loggers.append(context_creation_data.instance.get_logger()) return DagsterLogManager( run_id=pipeline_run.run_id, logging_tags=get_logging_tags(pipeline_run), loggers=loggers, )
def create_log_manager(context_creation_data): check.inst_param(context_creation_data, 'context_creation_data', ContextCreationData) pipeline_def, mode_def, environment_config, run_config = ( context_creation_data.pipeline_def, context_creation_data.mode_def, context_creation_data.environment_config, context_creation_data.run_config, ) loggers = [] for logger_key, logger_def in mode_def.loggers.items() or default_loggers( ).items(): if logger_key in environment_config.loggers: loggers.append( logger_def.logger_fn( InitLoggerContext( environment_config.loggers.get(logger_key, {}).get('config'), pipeline_def, logger_def, run_config.run_id, ))) if run_config.loggers: for logger in run_config.loggers: loggers.append(logger) if not loggers: for (logger_def, logger_config) in default_system_loggers(): loggers.append( logger_def.logger_fn( InitLoggerContext(logger_config, pipeline_def, logger_def, run_config.run_id))) if run_config.event_callback: init_logger_context = InitLoggerContext({}, pipeline_def, logger_def, run_config.run_id) loggers.append( construct_event_logger( run_config.event_callback).logger_fn(init_logger_context)) return DagsterLogManager( run_id=run_config.run_id, logging_tags=get_logging_tags(context_creation_data.run_config, context_creation_data.pipeline_def), loggers=loggers, )
def create_log_manager(context_creation_data): check.inst_param(context_creation_data, 'context_creation_data', ContextCreationData) pipeline_def, mode_def, environment_config, run_config = ( context_creation_data.pipeline_def, context_creation_data.mode_def, context_creation_data.environment_config, context_creation_data.run_config, ) loggers = [] for logger_key, logger_def in mode_def.loggers.items() or default_loggers().items(): if logger_key in environment_config.loggers: loggers.append( logger_def.logger_fn( InitLoggerContext( environment_config.loggers.get(logger_key, {}).get('config'), pipeline_def, logger_def, run_config.run_id, ) ) ) if not loggers: for (logger_def, logger_config) in default_system_loggers(): loggers.append( logger_def.logger_fn( InitLoggerContext(logger_config, pipeline_def, logger_def, run_config.run_id) ) ) # should this be first in loggers list? loggers.append(context_creation_data.instance.get_event_listener()) return DagsterLogManager( run_id=run_config.run_id, logging_tags=get_logging_tags( context_creation_data.run_config, context_creation_data.pipeline_def ), loggers=loggers, )
def _create_loggers(environment_config, run_config, pipeline_def, mode_def): check.inst_param(environment_config, 'environment_config', EnvironmentConfig) check.inst_param(run_config, 'run_config', RunConfig) check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition) check.inst_param(mode_def, 'mode_def', ModeDefinition) loggers = [] for logger_key, logger_def in mode_def.loggers.items() or default_loggers().items(): if logger_key in environment_config.loggers: loggers.append( logger_def.logger_fn( InitLoggerContext( environment_config.loggers.get(logger_key, {}).get('config'), pipeline_def, logger_def, run_config.run_id, ) ) ) if run_config.loggers: for logger in run_config.loggers: loggers.append(logger) if not loggers: for (logger_def, logger_config) in default_system_loggers(): loggers.append( logger_def.logger_fn( InitLoggerContext(logger_config, pipeline_def, logger_def, run_config.run_id) ) ) if run_config.event_callback: init_logger_context = InitLoggerContext({}, pipeline_def, logger_def, run_config.run_id) loggers.append( construct_event_logger(run_config.event_callback).logger_fn(init_logger_context) ) return loggers
def host_mode_execution_context_event_generator( pipeline, execution_plan, run_config, pipeline_run, instance, raise_on_error, executor_defs, output_capture, resume_from_failure: bool = False, ): check.inst_param(execution_plan, "execution_plan", ExecutionPlan) check.inst_param(pipeline, "pipeline", ReconstructablePipeline) check.dict_param(run_config, "run_config", key_type=str) check.inst_param(pipeline_run, "pipeline_run", PipelineRun) check.inst_param(instance, "instance", DagsterInstance) executor_defs = check.list_param(executor_defs, "executor_defs", of_type=ExecutorDefinition) check.bool_param(raise_on_error, "raise_on_error") check.invariant(output_capture is None) execution_context = None loggers = [] for (logger_def, logger_config) in default_system_loggers(): loggers.append( logger_def.logger_fn( InitLoggerContext( logger_config, pipeline_def=None, logger_def=logger_def, run_id=pipeline_run.run_id, ))) log_manager = DagsterLogManager.create(loggers=loggers, pipeline_run=pipeline_run, instance=instance) try: executor = _get_host_mode_executor(pipeline, run_config, executor_defs, instance) execution_context = PlanOrchestrationContext( plan_data=PlanData( pipeline=pipeline, pipeline_run=pipeline_run, instance=instance, execution_plan=execution_plan, raise_on_error=raise_on_error, retry_mode=executor.retries, ), log_manager=log_manager, executor=executor, output_capture=None, resume_from_failure=resume_from_failure, ) yield execution_context except DagsterError as dagster_error: if execution_context is None: user_facing_exc_info = ( # pylint does not know original_exc_info exists is is_user_code_error is true # pylint: disable=no-member dagster_error.original_exc_info # type: ignore if dagster_error.is_user_code_error else sys.exc_info()) error_info = serializable_error_info_from_exc_info( user_facing_exc_info) event = DagsterEvent.pipeline_failure( pipeline_context_or_name=pipeline_run.pipeline_name, context_msg= (f'Pipeline failure during initialization for pipeline "{pipeline_run.pipeline_name}". ' "This may be due to a failure in initializing the executor or one of the loggers." ), error_info=error_info, ) log_manager.log_dagster_event( level=logging.ERROR, msg=event.message, dagster_event=event # type: ignore ) yield event else: # pipeline teardown failure raise dagster_error if raise_on_error: raise dagster_error
def host_mode_execution_context_event_generator( pipeline, execution_plan, run_config, pipeline_run, instance, raise_on_error, get_executor_def_fn, output_capture, ): check.inst_param(execution_plan, "execution_plan", ExecutionPlan) check.inst_param(pipeline, "pipeline", ReconstructablePipeline) check.dict_param(run_config, "run_config", key_type=str) check.inst_param(pipeline_run, "pipeline_run", PipelineRun) check.inst_param(instance, "instance", DagsterInstance) get_executor_def_fn = check.opt_callable_param( get_executor_def_fn, "get_executor_def_fn", _default_get_executor_def_fn) check.bool_param(raise_on_error, "raise_on_error") check.invariant(output_capture is None) execution_context = None loggers = [] for (logger_def, logger_config) in default_system_loggers(): loggers.append( logger_def.logger_fn( InitLoggerContext( logger_config, pipeline_def=None, logger_def=logger_def, run_id=pipeline_run.run_id, ))) loggers.append(instance.get_logger()) log_manager = DagsterLogManager( run_id=pipeline_run.run_id, logging_tags=get_logging_tags(pipeline_run), loggers=loggers, ) try: executor = _get_host_mode_executor(pipeline, run_config, get_executor_def_fn, instance) execution_context = PlanOrchestrationContext( plan_data=PlanData( pipeline=pipeline, pipeline_run=pipeline_run, instance=instance, execution_plan=execution_plan, raise_on_error=raise_on_error, retry_mode=executor.retries, ), log_manager=log_manager, executor=executor, output_capture=None, ) yield execution_context except DagsterError as dagster_error: if execution_context is None: user_facing_exc_info = ( # pylint does not know original_exc_info exists is is_user_code_error is true # pylint: disable=no-member dagster_error.original_exc_info if dagster_error.is_user_code_error else sys.exc_info()) error_info = serializable_error_info_from_exc_info( user_facing_exc_info) yield DagsterEvent.pipeline_init_failure( pipeline_name=pipeline_run.pipeline_name, failure_data=PipelineInitFailureData(error=error_info), log_manager=log_manager, ) else: # pipeline teardown failure raise dagster_error if raise_on_error: raise dagster_error
def host_mode_execution_context_event_generator( execution_plan, recon_pipeline, run_config, pipeline_run, instance, executor, raise_on_error ): check.inst_param(execution_plan, "execution_plan", ExecutionPlan) check.inst_param(recon_pipeline, "recon_pipeline", ReconstructablePipeline) check.dict_param(run_config, "run_config", key_type=str) check.inst_param(pipeline_run, "pipeline_run", PipelineRun) check.inst_param(instance, "instance", DagsterInstance) check.inst_param(executor, "executor", Executor) check.bool_param(raise_on_error, "raise_on_error") execution_context = None loggers = [] # Should these be configurable from the run config (without loading # the full EnvironmentConfig??) for (logger_def, logger_config) in default_system_loggers(): loggers.append( logger_def.logger_fn( InitLoggerContext( logger_config, pipeline_def=None, logger_def=logger_def, run_id=pipeline_run.run_id, ) ) ) loggers.append(instance.get_logger()) log_manager = DagsterLogManager( run_id=pipeline_run.run_id, logging_tags=_get_logging_tags(pipeline_run), loggers=loggers, ) # Create an executor (again how do we pull config from run_config # without going through the full EnvironmentConfig.build flow) try: execution_context = HostModeRunWorkerExecutionContext( execution_context_data=HostModeExecutionContextData( pipeline_run=pipeline_run, recon_pipeline=recon_pipeline, execution_plan=execution_plan, instance=instance, raise_on_error=raise_on_error, retry_mode=executor.retries, ), log_manager=log_manager, executor=executor, ) yield execution_context except DagsterError as dagster_error: if execution_context is None: user_facing_exc_info = ( # pylint does not know original_exc_info exists is is_user_code_error is true # pylint: disable=no-member dagster_error.original_exc_info if dagster_error.is_user_code_error else sys.exc_info() ) error_info = serializable_error_info_from_exc_info(user_facing_exc_info) yield DagsterEvent.pipeline_init_failure( pipeline_name=pipeline_run.pipeline_name, failure_data=PipelineInitFailureData(error=error_info), log_manager=log_manager, ) else: # pipeline teardown failure raise dagster_error if raise_on_error: raise dagster_error