Exemplo n.º 1
0
    def test_get_logger(self):
        """
        When using `get_logger` to get a logger
            When the logger does not exist
                We create a new DDLogger
            When the logger exists
                We return the expected logger
            When a different logger is requested
                We return a new DDLogger
        """
        # Assert the logger doesn't already exist
        self.assertNotIn('test.logger', self.manager.loggerDict)

        # Fetch a new logger
        log = get_logger('test.logger')
        self.assertEqual(log.name, 'test.logger')
        self.assertEqual(log.level, logging.NOTSET)

        # Ensure it is a DDLogger
        self.assertIsInstance(log, DDLogger)
        # Make sure it is stored in all the places we expect
        self.assertEqual(self.manager.getLogger('test.logger'), log)
        self.assertEqual(self.manager.loggerDict['test.logger'], log)

        # Fetch the same logger
        same_log = get_logger('test.logger')
        # Assert we got the same logger
        self.assertEqual(log, same_log)

        # Fetch a different logger
        new_log = get_logger('new.test.logger')
        # Make sure we didn't get the same one
        self.assertNotEqual(log, new_log)
Exemplo n.º 2
0
    def test_logger_handle_bucket_skipped_msg(self, base_handle):
        """
        When calling `DDLogger.handle`
            When a bucket exists for a previous time frame
                We pass only the record to the base `Logger.handle`
                We update the record message to include the number of skipped messages
        """
        log = get_logger('test.logger')

        # Create log record to handle
        original_msg = 'hello %s'
        original_args = (1, )
        record = self._make_record(log, msg=original_msg, args=(1, ))

        # Create a bucket entry for this record
        key = (record.name, record.levelno, record.pathname, record.lineno)
        bucket = int(record.created / log.rate_limit)
        # We want the time bucket to be for an older bucket
        log.buckets[key] = DDLogger.LoggingBucket(bucket=bucket - 1,
                                                  skipped=20)

        # Handle our record
        log.handle(record)

        # We passed to base Logger.handle
        base_handle.assert_called_once_with(record)

        self.assertEqual(record.msg,
                         original_msg + ', %s additional messages skipped')
        self.assertEqual(record.args, original_args + (20, ))
        self.assertEqual(record.getMessage(),
                         'hello 1, 20 additional messages skipped')
Exemplo n.º 3
0
    def test_logger_handle_bucket_limited(self, base_handle):
        """
        When calling `DDLogger.handle`
            With multiple records in a single time frame
                We pass only the first to the base `Logger.handle`
                We keep track of the number skipped
        """
        log = get_logger('test.logger')

        # Create log record and handle it
        first_record = self._make_record(log, msg='first')
        log.handle(first_record)

        for _ in range(100):
            record = self._make_record(log)
            # DEV: Use the same timestamp as `first_record` to ensure we are in the same bucket
            record.created = first_record.created
            log.handle(record)

        # We passed to base Logger.handle
        base_handle.assert_called_once_with(first_record)

        # We added an bucket entry for these records
        key = (record.name, record.levelno, record.pathname, record.lineno)
        logging_bucket = log.buckets.get(key)

        # The bucket entry is correct
        expected_bucket = int(first_record.created / log.rate_limit)
        self.assertEqual(logging_bucket.bucket, expected_bucket)
        self.assertEqual(logging_bucket.skipped, 100)
Exemplo n.º 4
0
    def test_logger_handle_bucket(self, base_handle):
        """
        When calling `DDLogger.handle`
            With a record
                We pass it to the base `Logger.handle`
                We create a bucket for tracking
        """
        log = get_logger('test.logger')

        # Create log record and handle it
        record = self._make_record(log)
        log.handle(record)

        # We passed to base Logger.handle
        base_handle.assert_called_once_with(record)

        # We added an bucket entry for this record
        key = (record.name, record.levelno, record.pathname, record.lineno)
        logging_bucket = log.buckets.get(key)
        self.assertIsInstance(logging_bucket, DDLogger.LoggingBucket)

        # The bucket entry is correct
        expected_bucket = int(record.created / log.rate_limit)
        self.assertEqual(logging_bucket.bucket, expected_bucket)
        self.assertEqual(logging_bucket.skipped, 0)
Exemplo n.º 5
0
    def test_get_logger_parents(self):
        """
        When using `get_logger` to get a logger
            We appropriately assign parent loggers

        DEV: This test case is to ensure we are calling `manager._fixupParents(logger)`
        """
        # Fetch a new logger
        test_log = get_logger('test')
        self.assertEqual(test_log.parent, self.root)

        # Fetch a new child log
        # Auto-associate with parent `test` logger
        child_log = get_logger('test.child')
        self.assertEqual(child_log.parent, test_log)

        # Deep child
        deep_log = get_logger('test.child.logger.from.test.case')
        self.assertEqual(deep_log.parent, child_log)
Exemplo n.º 6
0
    def test_logger_handle_bucket_key(self):
        """
        When calling `DDLogger.handle`
            With different log messages
                We use different buckets to limit them
        """
        log = get_logger('test.logger')

        # DEV: This function is inlined in `logger.py`
        def get_key(record):
            return (record.name, record.levelno, record.pathname,
                    record.lineno)

        # Same record signature but different message
        # DEV: These count against the same bucket
        record1 = self._make_record(log, msg='record 1')
        record2 = self._make_record(log, msg='record 2')

        # Different line number (default is `10`)
        record3 = self._make_record(log, lno=10)

        # Different pathnames (default is `module.py`)
        record4 = self._make_record(log, fn='log.py')

        # Different level (default is `logging.INFO`)
        record5 = self._make_record(log, level=logging.WARN)

        # Different logger name
        record6 = self._make_record(log)
        record6.name = 'test.logger2'

        # Log all of our records
        all_records = (record1, record2, record3, record4, record5, record6)
        [log.handle(record) for record in all_records]

        buckets = log.buckets
        # We have 6 records but only end up with 5 buckets
        self.assertEqual(len(buckets), 5)

        # Assert bucket created for the record1 and record2
        bucket1 = buckets[get_key(record1)]
        self.assertEqual(bucket1.skipped, 1)

        bucket2 = buckets[get_key(record2)]
        self.assertEqual(bucket1, bucket2)

        # Assert bucket for the remaining records
        # None of these other messages should have been grouped together
        for record in (record3, record4, record5, record6):
            bucket = buckets[get_key(record)]
            self.assertEqual(bucket.skipped, 0)
Exemplo n.º 7
0
    def test_logger_log(self):
        """
        When calling `DDLogger` log methods
            We call `DDLogger.handle` with the expected log record
        """
        log = get_logger('test.logger')

        # -- NOTSET
        # By default no level is set so we only get warn, error, and critical messages
        self.assertEqual(log.level, logging.NOTSET)
        # `log.warn`, `log.warning`, `log.error`, `log.exception`, `log.critical`, `log.fatal`
        self.assert_log_records(
            log,
            ['WARNING', 'WARNING', 'ERROR', 'ERROR', 'CRITICAL', 'CRITICAL'])

        # -- CRITICAL
        log.setLevel(logging.CRITICAL)
        # `log.critical`, `log.fatal`
        self.assert_log_records(log, ['CRITICAL', 'CRITICAL'])

        # -- ERROR
        log.setLevel(logging.ERROR)
        # `log.error`, `log.exception`, `log.critical`, `log.fatal`
        self.assert_log_records(log,
                                ['ERROR', 'ERROR', 'CRITICAL', 'CRITICAL'])

        # -- WARN
        log.setLevel(logging.WARN)
        # `log.warn`, `log.warning`, `log.error`, `log.exception`, `log.critical`, `log.fatal`
        self.assert_log_records(
            log,
            ['WARNING', 'WARNING', 'ERROR', 'ERROR', 'CRITICAL', 'CRITICAL'])

        # -- INFO
        log.setLevel(logging.INFO)
        # `log.info`, `log.warn`, `log.warning`, `log.error`, `log.exception`, `log.critical`, `log.fatal`
        self.assert_log_records(log, [
            'INFO', 'WARNING', 'WARNING', 'ERROR', 'ERROR', 'CRITICAL',
            'CRITICAL'
        ])

        # -- DEBUG
        log.setLevel(logging.DEBUG)
        # `log.debug`, `log.info`, `log.warn`, `log.warning`, `log.error`, `log.exception`, `log.critical`, `log.fatal`
        self.assert_log_records(log, [
            'DEBUG', 'INFO', 'WARNING', 'WARNING', 'ERROR', 'ERROR',
            'CRITICAL', 'CRITICAL'
        ])
Exemplo n.º 8
0
    def test_logger_handle_no_limit(self, base_handle):
        """
        Calling `DDLogger.handle`
            When no rate limit is set
                Always calls the base `Logger.handle`
        """
        # Configure an INFO logger with no rate limit
        log = get_logger('test.logger')
        log.setLevel(logging.INFO)
        log.rate_limit = 0

        # Log a bunch of times very quickly (this is fast)
        for _ in range(1000):
            log.info('test')

        # Assert that we did not perform any rate limiting
        self.assertEqual(base_handle.call_count, 1000)

        # Our buckets are empty
        self.assertEqual(log.buckets, dict())
Exemplo n.º 9
0
    def test_logger_handle_debug(self, base_handle):
        """
        Calling `DDLogger.handle`
            When effective level is DEBUG
                Always calls the base `Logger.handle`
        """
        # Configure an INFO logger with no rate limit
        log = get_logger("test.logger")
        log.setLevel(logging.DEBUG)
        assert log.rate_limit > 0

        # Log a bunch of times very quickly (this is fast)
        for level in ALL_LEVEL_NAMES:
            log_fn = getattr(log, level)
            for _ in range(1000):
                log_fn("test")

        # Assert that we did not perform any rate limiting
        total = 1000 * len(ALL_LEVEL_NAMES)
        self.assertEqual(base_handle.call_count, total)

        # Our buckets are empty
        self.assertEqual(log.buckets, dict())
Exemplo n.º 10
0
    patch(logging=True)

debug = os.environ.get("DATADOG_TRACE_DEBUG")

# Set here a default logging format for basicConfig

# DEV: Once basicConfig is called here, future calls to it cannot be used to
# change the formatter since it applies the formatter to the root handler only
# upon initializing it the first time.
# See https://github.com/python/cpython/blob/112e4afd582515fcdcc0cde5012a4866e5cfda12/Lib/logging/__init__.py#L1550
if debug and debug.lower() == "true":
    logging.basicConfig(level=logging.DEBUG, format=DD_LOG_FORMAT)
else:
    logging.basicConfig(format=DD_LOG_FORMAT)

log = get_logger(__name__)

EXTRA_PATCHED_MODULES = {
    "bottle": True,
    "django": True,
    "falcon": True,
    "flask": True,
    "pylons": True,
    "pyramid": True,
}


def update_patched_modules():
    modules_to_patch = os.environ.get("DATADOG_PATCH_MODULES")
    if not modules_to_patch:
        return