Esempio n. 1
0
def test_logging_bad_custom_log_levels():
    with _setup_logger("test") as (_, logger):

        dl = DagsterLogManager.create(loggers=[logger],
                                      pipeline_run=PipelineRun(run_id="123"))
        with pytest.raises(check.CheckError):
            dl.log(level="test", msg="foobar")
Esempio n. 2
0
def test_logging_no_loggers_registered():
    dl = DagsterLogManager.create(loggers=[])
    dl.debug("test")
    dl.info("test")
    dl.warning("test")
    dl.error("test")
    dl.critical("test")
Esempio n. 3
0
def test_logging_integer_log_levels():
    with _setup_logger("test", {"FOO": 3}) as (_captured_results, logger):

        dl = DagsterLogManager.create(
            loggers=[logger],
            pipeline_run=PipelineRun(pipeline_name="system", run_id="123"))
        dl.log(3, "test")  # pylint: disable=no-member
Esempio n. 4
0
def test_logging_custom_log_levels():
    with _setup_logger("test", {"FOO": 3}) as (_captured_results, logger):

        dl = DagsterLogManager.create(loggers=[logger],
                                      pipeline_run=PipelineRun(run_id="123"))
        with pytest.raises(AttributeError):
            dl.foo("test")  # pylint: disable=no-member
Esempio n. 5
0
def _create_context_free_log_manager(
    instance: DagsterInstance, pipeline_run: PipelineRun, pipeline_def: PipelineDefinition
) -> DagsterLogManager:
    """In the event of pipeline initialization failure, we want to be able to log the failure
    without a dependency on the PlanExecutionContext to initialize DagsterLogManager.
    Args:
        pipeline_run (dagster.core.storage.pipeline_run.PipelineRun)
        pipeline_def (dagster.definitions.PipelineDefinition)
    """
    check.inst_param(instance, "instance", DagsterInstance)
    check.inst_param(pipeline_run, "pipeline_run", PipelineRun)
    check.inst_param(pipeline_def, "pipeline_def", PipelineDefinition)

    loggers = []
    # Use the default logger
    for (logger_def, logger_config) in default_system_loggers():
        loggers += [
            logger_def.logger_fn(
                InitLoggerContext(
                    logger_config,
                    logger_def,
                    pipeline_def=pipeline_def,
                    run_id=pipeline_run.run_id,
                )
            )
        ]

    return DagsterLogManager.create(loggers=loggers, instance=instance, pipeline_run=pipeline_run)
Esempio n. 6
0
def test_log_level_filtering():
    records = []
    critical_records = []

    debug_logger_def = construct_single_handler_logger("debug_handler",
                                                       "debug",
                                                       LogTestHandler(records))
    critical_logger_def = construct_single_handler_logger(
        "critical_handler", "critical", LogTestHandler(critical_records))

    loggers = [
        logger_def.logger_fn(
            InitLoggerContext(
                {},
                logger_def,
                pipeline_def=PipelineDefinition([], "test"),
                run_id="",
            )) for logger_def in [debug_logger_def, critical_logger_def]
    ]

    log_manager = DagsterLogManager.create(loggers=loggers)

    log_manager.debug("Hello, there!")

    messages = [x.dagster_meta["orig_message"] for x in records]

    assert "Hello, there!" in messages

    critical_messages = [
        x.dagster_meta["orig_message"] for x in critical_records
    ]

    assert "Hello, there!" not in critical_messages
Esempio n. 7
0
def test_clean_event_generator_exit():
    """Testing for generator cleanup
    (see https://amir.rachum.com/blog/2017/03/03/generator-cleanup/)
    """
    from dagster.core.execution.context.init import InitResourceContext
    from dagster.core.definitions.resource_definition import ScopedResourcesBuilder

    pipeline_def = gen_basic_resource_pipeline()
    instance = DagsterInstance.ephemeral()
    execution_plan = create_execution_plan(pipeline_def)
    pipeline_run = instance.create_run_for_pipeline(
        pipeline_def=pipeline_def, execution_plan=execution_plan)
    log_manager = DagsterLogManager.create(loggers=[],
                                           pipeline_run=pipeline_run)
    resolved_run_config = ResolvedRunConfig.build(pipeline_def)
    execution_plan = create_execution_plan(pipeline_def)

    resource_name, resource_def = next(
        iter(pipeline_def.get_default_mode().resource_defs.items()))
    resource_context = InitResourceContext(
        resource_def=resource_def,
        resources=ScopedResourcesBuilder().build(None),
        resource_config=None,
        pipeline_run=pipeline_run,
        instance=instance,
    )
    generator = single_resource_event_generator(resource_context,
                                                resource_name, resource_def)
    next(generator)
    generator.close()

    resource_defs = pipeline_def.get_mode_definition(resolved_run_config.mode)

    generator = resource_initialization_event_generator(
        resource_defs=resource_defs,
        resource_configs=resolved_run_config.resources,
        log_manager=log_manager,
        execution_plan=execution_plan,
        pipeline_run=pipeline_run,
        resource_keys_to_init={"a"},
        instance=instance,
        emit_persistent_events=True,
        pipeline_def_for_backwards_compat=pipeline_def,
    )
    next(generator)
    generator.close()

    generator = PlanExecutionContextManager(  # pylint: disable=protected-access
        pipeline=InMemoryPipeline(pipeline_def),
        execution_plan=execution_plan,
        run_config={},
        pipeline_run=pipeline_run,
        instance=instance,
        retry_mode=RetryMode.DISABLED,
        scoped_resources_builder_cm=resource_initialization_manager,
    ).get_generator()
    next(generator)
    generator.close()
Esempio n. 8
0
def test_logging_basic():
    with _setup_logger("test") as (captured_results, logger):

        dl = DagsterLogManager.create(loggers=[logger],
                                      pipeline_run=PipelineRun(run_id="123"))
        dl.debug("test")
        dl.info("test")
        dl.warning("test")
        dl.error("test")
        dl.critical("test")

        assert captured_results == ["system - 123 - test"] * 5
Esempio n. 9
0
def initialize_console_manager(pipeline_run: Optional[PipelineRun]) -> DagsterLogManager:
    # initialize default colored console logger
    loggers = []
    for logger_def, logger_config in default_system_loggers():
        loggers.append(
            logger_def.logger_fn(
                InitLoggerContext(
                    logger_config, logger_def, run_id=pipeline_run.run_id if pipeline_run else None
                )
            )
        )
    return DagsterLogManager.create(loggers=loggers, pipeline_run=pipeline_run)
Esempio n. 10
0
def _setup_test_two_handler_log_mgr():
    test_formatter = logging.Formatter(fmt="%(levelname)s :: %(message)s")

    test_info_handler = logging.StreamHandler(sys.stdout)
    test_info_handler.setLevel("INFO")
    test_info_handler.setFormatter(test_formatter)

    test_warn_handler = logging.StreamHandler(sys.stdout)
    test_warn_handler.setLevel("WARN")
    test_warn_handler.setFormatter(test_formatter)

    return DagsterLogManager.create(
        loggers=[],
        handlers=[test_info_handler, test_warn_handler],
        pipeline_run=PipelineRun(pipeline_name="system", run_id="123"),
    )
Esempio n. 11
0
def create_log_manager(
    context_creation_data: ContextCreationData,
) -> DagsterLogManager:
    check.inst_param(context_creation_data, "context_creation_data", ContextCreationData)

    pipeline_def, mode_def, resolved_run_config, pipeline_run = (
        context_creation_data.pipeline_def,
        context_creation_data.mode_def,
        context_creation_data.resolved_run_config,
        context_creation_data.pipeline_run,
    )

    # The following logic is tightly coupled to the processing of logger config in
    # python_modules/dagster/dagster/core/system_config/objects.py#config_map_loggers
    # Changes here should be accompanied checked against that function, which applies config mapping
    # via ConfigurableDefinition (@configured) to incoming logger configs. See docstring for more details.

    loggers = []
    for logger_key, logger_def in mode_def.loggers.items() or default_loggers().items():
        if logger_key in resolved_run_config.loggers:
            loggers.append(
                logger_def.logger_fn(
                    InitLoggerContext(
                        resolved_run_config.loggers.get(logger_key, {}).get("config"),
                        logger_def,
                        pipeline_def=pipeline_def,
                        run_id=pipeline_run.run_id,
                    )
                )
            )

    if not loggers:
        for (logger_def, logger_config) in default_system_loggers():
            loggers.append(
                logger_def.logger_fn(
                    InitLoggerContext(
                        logger_config,
                        logger_def,
                        pipeline_def=pipeline_def,
                        run_id=pipeline_run.run_id,
                    )
                )
            )

    return DagsterLogManager.create(
        loggers=loggers, pipeline_run=pipeline_run, instance=context_creation_data.instance
    )
Esempio n. 12
0
def test_multiline_logging_complex():
    msg = "DagsterEventType.STEP_FAILURE for step start.materialization.output.result.0"
    dagster_event = DagsterEvent(
        event_type_value="STEP_FAILURE",
        pipeline_name="error_monster",
        step_key="start.materialization.output.result.0",
        solid_handle=NodeHandle("start", None),
        step_kind_value="MATERIALIZATION_THUNK",
        logging_tags={
            "pipeline": "error_monster",
            "step_key": "start.materialization.output.result.0",
            "solid": "start",
            "solid_definition": "emit_num",
        },
        event_specific_data=StepFailureData(
            error=SerializableErrorInfo(
                message=
                "FileNotFoundError: [Errno 2] No such file or directory: '/path/to/file'\n",
                stack=["a stack message"],
                cls_name="FileNotFoundError",
            ),
            user_failure_data=None,
        ),
    )

    with _setup_logger(DAGSTER_DEFAULT_LOGGER) as (captured_results, logger):

        dl = DagsterLogManager.create(loggers=[logger],
                                      pipeline_run=PipelineRun(
                                          run_id="123",
                                          pipeline_name="error_monster"))
        dl.log_dagster_event(logging.INFO, msg, dagster_event)

    expected_results = [
        "error_monster - 123 - STEP_FAILURE - DagsterEventType.STEP_FAILURE for step "
        "start.materialization.output.result.0",
        "",
        "FileNotFoundError: [Errno 2] No such file or directory: '/path/to/file'",
        "",
        "Stack Trace:",
        "a stack message",
    ]

    assert captured_results[0].split("\n") == expected_results
Esempio n. 13
0
def test_capture_handler_log_records():
    capture_handler = CaptureHandler()

    dl = DagsterLogManager.create(
        loggers=[],
        handlers=[capture_handler],
        pipeline_run=PipelineRun(run_id="123456", pipeline_name="pipeline"),
    ).with_tags(step_key="some_step")

    dl.info("info")
    dl.critical("critical error", extra={"foo": "bar"})

    assert len(capture_handler.captured) == 2

    captured_info_record = capture_handler.captured[0]
    assert captured_info_record.name == "dagster"
    assert captured_info_record.msg == "pipeline - 123456 - some_step - info"
    assert captured_info_record.levelno == logging.INFO

    captured_critical_record = capture_handler.captured[1]
    assert captured_critical_record.name == "dagster"
    assert captured_critical_record.msg == "pipeline - 123456 - some_step - critical error"
    assert captured_critical_record.levelno == logging.CRITICAL
    assert captured_critical_record.foo == "bar"
Esempio n. 14
0
def host_mode_execution_context_event_generator(
    pipeline,
    execution_plan,
    run_config,
    pipeline_run,
    instance,
    raise_on_error,
    executor_defs,
    output_capture,
    resume_from_failure: bool = False,
):
    check.inst_param(execution_plan, "execution_plan", ExecutionPlan)
    check.inst_param(pipeline, "pipeline", ReconstructablePipeline)

    check.dict_param(run_config, "run_config", key_type=str)
    check.inst_param(pipeline_run, "pipeline_run", PipelineRun)
    check.inst_param(instance, "instance", DagsterInstance)
    executor_defs = check.list_param(executor_defs,
                                     "executor_defs",
                                     of_type=ExecutorDefinition)
    check.bool_param(raise_on_error, "raise_on_error")
    check.invariant(output_capture is None)

    execution_context = None

    loggers = []

    for (logger_def, logger_config) in default_system_loggers():
        loggers.append(
            logger_def.logger_fn(
                InitLoggerContext(
                    logger_config,
                    pipeline_def=None,
                    logger_def=logger_def,
                    run_id=pipeline_run.run_id,
                )))

    log_manager = DagsterLogManager.create(loggers=loggers,
                                           pipeline_run=pipeline_run,
                                           instance=instance)

    try:
        executor = _get_host_mode_executor(pipeline, run_config, executor_defs,
                                           instance)
        execution_context = PlanOrchestrationContext(
            plan_data=PlanData(
                pipeline=pipeline,
                pipeline_run=pipeline_run,
                instance=instance,
                execution_plan=execution_plan,
                raise_on_error=raise_on_error,
                retry_mode=executor.retries,
            ),
            log_manager=log_manager,
            executor=executor,
            output_capture=None,
            resume_from_failure=resume_from_failure,
        )

        yield execution_context

    except DagsterError as dagster_error:
        if execution_context is None:
            user_facing_exc_info = (
                # pylint does not know original_exc_info exists is is_user_code_error is true
                # pylint: disable=no-member
                dagster_error.original_exc_info  # type: ignore
                if dagster_error.is_user_code_error else sys.exc_info())
            error_info = serializable_error_info_from_exc_info(
                user_facing_exc_info)

            event = DagsterEvent.pipeline_failure(
                pipeline_context_or_name=pipeline_run.pipeline_name,
                context_msg=
                (f'Pipeline failure during initialization for pipeline "{pipeline_run.pipeline_name}". '
                 "This may be due to a failure in initializing the executor or one of the loggers."
                 ),
                error_info=error_info,
            )
            log_manager.log_dagster_event(
                level=logging.ERROR,
                msg=event.message,
                dagster_event=event  # type: ignore
            )
            yield event
        else:
            # pipeline teardown failure
            raise dagster_error

        if raise_on_error:
            raise dagster_error