def _maybe_compress(self): if self._compression_type: buf = self._buffer if self._compression_type == self.CODEC_GZIP: compressed = gzip_encode(buf) elif self._compression_type == self.CODEC_SNAPPY: compressed = snappy_encode(buf) elif self._compression_type == self.CODEC_LZ4: if self._magic == 0: compressed = lz4_encode_old_kafka(bytes(buf)) else: compressed = lz4_encode(bytes(buf)) compressed_size = len(compressed) size = self._size_in_bytes(key_size=0, value_size=compressed_size) if size > len(self._buffer): self._buffer = bytearray(size) else: del self._buffer[size:] self._encode_msg(self._buffer, offset=0, timestamp=0, key_size=0, key=None, value_size=compressed_size, value=compressed, attributes=self._compression_type) self._pos = size return True return False
def _maybe_compress(self): if self._compression_type: buf = self._buffer if self._compression_type == self.CODEC_GZIP: compressed = gzip_encode(buf) elif self._compression_type == self.CODEC_SNAPPY: compressed = snappy_encode(buf) elif self._compression_type == self.CODEC_LZ4: if self._magic == 0: compressed = lz4_encode_old_kafka(bytes(buf)) else: compressed = lz4_encode(bytes(buf)) compressed_size = len(compressed) size = self._size_in_bytes(key_size=0, value_size=compressed_size) if size > len(self._buffer): self._buffer = bytearray(size) else: del self._buffer[size:] self._encode_msg( self._buffer, offset=0, timestamp=0, key_size=0, key=None, value_size=compressed_size, value=compressed, attributes=self._compression_type) self._pos = size return True return False
def _maybe_compress(self): if self._compression_type: self._assert_has_codec(self._compression_type) data = bytes(self._buffer) if self._compression_type == self.CODEC_GZIP: compressed = gzip_encode(data) elif self._compression_type == self.CODEC_SNAPPY: compressed = snappy_encode(data) elif self._compression_type == self.CODEC_LZ4: if self._magic == 0: compressed = lz4_encode_old_kafka(data) else: compressed = lz4_encode(data) size = self.size_in_bytes(0, timestamp=0, key=None, value=compressed) # We will try to reuse the same buffer if we have enough space if size > len(self._buffer): self._buffer = bytearray(size) else: del self._buffer[size:] self._encode_msg(start_pos=0, offset=0, timestamp=0, key=None, value=compressed, attributes=self._compression_type) return True return False
def _maybe_compress(self): if self._compression_type: self._assert_has_codec(self._compression_type) data = bytes(self._buffer) if self._compression_type == self.CODEC_GZIP: compressed = gzip_encode(data) elif self._compression_type == self.CODEC_SNAPPY: compressed = snappy_encode(data) elif self._compression_type == self.CODEC_LZ4: if self._magic == 0: compressed = lz4_encode_old_kafka(data) else: compressed = lz4_encode(data) size = self.size_in_bytes( 0, timestamp=0, key=None, value=compressed) # We will try to reuse the same buffer if we have enough space if size > len(self._buffer): self._buffer = bytearray(size) else: del self._buffer[size:] self._encode_msg( start_pos=0, offset=0, timestamp=0, key=None, value=compressed, attributes=self._compression_type) return True return False
def test_lz4_old(): for i in range(1000): b1 = random_string(100).encode('utf-8') b2 = lz4_decode_old_kafka(lz4_encode_old_kafka(b1)) assert len(b1) == len(b2) assert b1 == b2
def test_lz4_old(): for i in xrange(1000): b1 = random_string(100).encode('utf-8') b2 = lz4_decode_old_kafka(lz4_encode_old_kafka(b1)) assert len(b1) == len(b2) assert b1 == b2