Ejemplo n.º 1
0
 def test_RotationMovesFilesAlong(self):
     """Rotation moves the same logfile's sequence number forward."""
     # Number of times this test will rotate out the log file after its first
     # compression.
     rotations = 3
     # The rotation starts at 2 as the first compression happens at index 1.
     start_rotation = 2
     handler = servo_logging.ServodRotatingFileHandler(logdir=self.logdir,
                                                       ts=self.ts,
                                                       fmt=self.fmt,
                                                       level=self.loglevel)
     self.test_logger.addHandler(handler)
     self.test_logger.info('This is just some test content.')
     handler.doRollover()
     # At this point the compressed log-file should exist.
     rolled_fn = get_rolled_fn(handler.baseFilename, 1)
     assert os.path.exists(rolled_fn)
     md5sum = get_file_md5sum(rolled_fn)
     for i in range(start_rotation, start_rotation + rotations):
         handler.doRollover()
         rolled_fn = get_rolled_fn(handler.baseFilename, i)
         # Ensure that the file was rotated properly.
         assert os.path.exists(rolled_fn)
         # Ensure that the file is the same that started the rotation by validating
         # the checksum.
         assert md5sum == get_file_md5sum(rolled_fn)
Ejemplo n.º 2
0
 def test_HandleExistingLogDir(self):
     """The output directory for a specific port already existing is fine."""
     output_dir = servo_logging._buildLogdirName(self.logdir, 9998)
     os.makedirs(output_dir)
     _ = servo_logging.ServodRotatingFileHandler(logdir=self.logdir,
                                                 ts=self.ts,
                                                 fmt=self.fmt,
                                                 level=self.loglevel)
     assert os.path.isdir(output_dir)
Ejemplo n.º 3
0
 def test_LoggerLogsToFile(self):
     """Basic sanity that content is being output to the file."""
     test_str = 'This is a test string to make sure there is logging.'
     handler = servo_logging.ServodRotatingFileHandler(logdir=self.logdir,
                                                       ts=self.ts,
                                                       fmt=self.fmt,
                                                       level=self.loglevel)
     self.test_logger.addHandler(handler)
     self.test_logger.info(test_str)
     with open(handler.baseFilename, 'r') as log:
         assert log.read().strip() == test_str
Ejemplo n.º 4
0
    def test_DeleteMultipleInstancesPastBackupCount(self):
        """No more than backup count logs are kept across intances.

    Additionally, this test validates that the oldest get deleted.
    """
        new_count = servo_logging.UNCOMPRESSED_BACKUP_COUNT + 20
        handler = servo_logging.ServodRotatingFileHandler(
            logdir=self.logdir,
            backup_count=new_count,
            ts=self.ts,
            fmt=self.fmt,
            level=self.loglevel)
        for _ in range(new_count):
            handler.doRollover()
            # The assertion checks that there are at most new_count files.
            assert len(os.listdir(
                handler.logdir)) <= (new_count + BACKUP_COUNT_EXEMPT_FILES)
        assert len(os.listdir(handler.logdir)) == (new_count +
                                                   BACKUP_COUNT_EXEMPT_FILES)
        # Change the timestamp and create a new instance. Rotate out all old files.
        new_ts = servo_logging._generateTs()
        servo_logging._compressOldFiles(logdir=self.logdir)
        handler = servo_logging.ServodRotatingFileHandler(
            logdir=self.logdir,
            backup_count=new_count,
            ts=new_ts,
            fmt=self.fmt,
            level=self.loglevel)
        for _ in range(new_count):
            handler.doRollover()
            # The assertion checks that there are at most new_count files.
            assert len(os.listdir(
                handler.logdir)) <= (new_count + BACKUP_COUNT_EXEMPT_FILES)
        assert len(os.listdir(handler.logdir)) == (new_count +
                                                   BACKUP_COUNT_EXEMPT_FILES)
        # After two new_count rotations, the first timestamp should no longer
        # be around as it has been rotated out. Verify that.
        assert not any(self.ts in f for f in os.listdir(handler.logdir))
Ejemplo n.º 5
0
 def test_CompressOldFilesTwoLoglevels(self):
     """At most |UNCOMPRESSED_BACKUP_COUNT| are kept per loglevel."""
     self.ts = servo_logging._generateTs()
     handler = servo_logging.ServodRotatingFileHandler(logdir=self.logdir,
                                                       ts=self.ts,
                                                       fmt=self.fmt,
                                                       level=self.loglevel)
     self.test_logger.addHandler(handler)
     self.test_logger.info('Test content.')
     for _ in range(servo_logging.UNCOMPRESSED_BACKUP_COUNT):
         handler.doRollover()
     # At this point the maximum number of uncompressed files should exist.
     new_ts = servo_logging._generateTs()
     handler2 = servo_logging.ServodRotatingFileHandler(logdir=self.logdir,
                                                        ts=new_ts,
                                                        fmt=self.fmt,
                                                        level=logging.INFO)
     for _ in range(servo_logging.UNCOMPRESSED_BACKUP_COUNT):
         handler2.doRollover()
     # At this point both handlers have created the maximum number of
     # uncompressed logs. Since they both use different loglevels
     # no files should be purged or compressed.
     pre_purge_filecount = len([
         f for f in os.listdir(self.logdir)
         if servo_logging.COMPRESSION_SUFFIX not in f
     ])
     servo_logging._compressOldFiles(logdir=self.logdir)
     post_purge_filecount = len([
         f for f in os.listdir(self.logdir)
         if servo_logging.COMPRESSION_SUFFIX not in f
     ])
     assert pre_purge_filecount == post_purge_filecount
     # Ensure the file hasn't been compressed.
     assert os.path.exists(handler.baseFilename)
     cls = servo_logging.ServodRotatingFileHandler
     handler_compressed_fn = cls.getCompressedPathname(handler.baseFilename)
     assert not os.path.exists(handler_compressed_fn)
Ejemplo n.º 6
0
 def test_CompressOldFiles(self):
     """At most |UNCOMPRESSED_BACKUP_COUNT| around after old file compression."""
     self.ts = servo_logging._generateTs()
     handler = servo_logging.ServodRotatingFileHandler(logdir=self.logdir,
                                                       ts=self.ts,
                                                       fmt=self.fmt,
                                                       level=self.loglevel)
     self.test_logger.addHandler(handler)
     self.test_logger.info('Test content.')
     for _ in range(servo_logging.UNCOMPRESSED_BACKUP_COUNT):
         handler.doRollover()
     # At this point the maximum number of uncompressed files should exist.
     new_ts = servo_logging._generateTs()
     handler2 = servo_logging.ServodRotatingFileHandler(logdir=self.logdir,
                                                        ts=new_ts,
                                                        fmt=self.fmt,
                                                        level=self.loglevel)
     for _ in range(servo_logging.UNCOMPRESSED_BACKUP_COUNT):
         handler2.doRollover()
     # At this point both handlers have created the maximum number of
     # uncompressed logs. Since they both use the same suffix, compression
     # should compress all the ones from the first handler.
     pre_purge_filecount = len([
         f for f in os.listdir(self.logdir)
         if servo_logging.COMPRESSION_SUFFIX not in f
     ])
     servo_logging._compressOldFiles(logdir=self.logdir)
     post_purge_filecount = len([
         f for f in os.listdir(self.logdir)
         if servo_logging.COMPRESSION_SUFFIX not in f
     ])
     assert pre_purge_filecount == post_purge_filecount
     assert not os.path.exists(handler.baseFilename)
     cls = servo_logging.ServodRotatingFileHandler
     handler_compressed_fn = cls.getCompressedPathname(handler.baseFilename)
     assert os.path.exists(handler_compressed_fn)
Ejemplo n.º 7
0
 def test_DeleteMultiplePastBackupCount(self):
     """No more than backup count logs are kept."""
     # Set the backup count to only be 3 compressed for this test.
     new_count = servo_logging.UNCOMPRESSED_BACKUP_COUNT + 3
     handler = servo_logging.ServodRotatingFileHandler(
         logdir=self.logdir,
         backup_count=new_count,
         ts=self.ts,
         fmt=self.fmt,
         level=self.loglevel)
     for _ in range(2 * new_count):
         handler.doRollover()
         # The assertion checks that there are at most new_count files.
         assert len(os.listdir(
             handler.logdir)) <= (new_count + BACKUP_COUNT_EXEMPT_FILES)
Ejemplo n.º 8
0
 def test_RotationOccursWhenFileGrowsTooLarge(self):
     """Growing log-file beyond limit causes a rotation."""
     test_max_log_bytes = 40
     setattr(servo_logging, 'MAX_LOG_BYTES', test_max_log_bytes)
     handler = servo_logging.ServodRotatingFileHandler(logdir=self.logdir,
                                                       ts=self.ts,
                                                       fmt=self.fmt,
                                                       level=self.loglevel)
     self.test_logger.addHandler(handler)
     # The first log is only 20 bytes and should not cause rotation.
     log1 = 'Here are 20 bytes la'
     # The second log is 40 bytes and should cause rotation.
     log2 = 'This is an attempt to make 40 bytes laaa'
     self.test_logger.info(log1)
     # No rolling should have occured yet.
     assert not os.path.exists(get_rolled_fn(handler.baseFilename, 1))
     # Rolling should have occured by now.
     self.test_logger.info(log2)
     assert os.path.exists(get_rolled_fn(handler.baseFilename, 1))