def test_no_dict_id(self): samples = [] for i in range(128): samples.append(b'foo' * 64) samples.append(b'bar' * 64) samples.append(b'foobar' * 64) d = zstd.train_dictionary(1024, samples) with_dict_id = io.BytesIO() cctx = zstd.ZstdCompressor(level=1, dict_data=d) with cctx.write_to(with_dict_id) as compressor: self.assertEqual(compressor.write(b'foobarfoobar'), 0) cctx = zstd.ZstdCompressor(level=1, dict_data=d, write_dict_id=False) no_dict_id = io.BytesIO() with cctx.write_to(no_dict_id) as compressor: self.assertEqual(compressor.write(b'foobarfoobar'), 0) no_params = zstd.get_frame_parameters(no_dict_id.getvalue()) with_params = zstd.get_frame_parameters(with_dict_id.getvalue()) self.assertEqual(no_params.content_size, 0) self.assertEqual(with_params.content_size, 0) self.assertEqual(no_params.dict_id, 0) self.assertEqual(with_params.dict_id, d.dict_id()) self.assertFalse(no_params.has_checksum) self.assertFalse(with_params.has_checksum) self.assertEqual(len(with_dict_id.getvalue()), len(no_dict_id.getvalue()) + 4)
def test_dictionary(self): samples = [] for i in range(128): samples.append(b'foo' * 64) samples.append(b'bar' * 64) samples.append(b'foobar' * 64) d = zstd.train_dictionary(8192, samples) buffer = io.BytesIO() cctx = zstd.ZstdCompressor(level=9, dict_data=d) with cctx.write_to(buffer) as compressor: self.assertEqual(compressor.write(b'foo'), 0) self.assertEqual(compressor.write(b'bar'), 0) self.assertEqual(compressor.write(b'foo' * 16384), 634) compressed = buffer.getvalue() params = zstd.get_frame_parameters(compressed) self.assertEqual(params.content_size, 0) self.assertEqual(params.window_size, 1024) self.assertEqual(params.dict_id, d.dict_id()) self.assertFalse(params.has_checksum) self.assertEqual( compressed[0:32], b'\x28\xb5\x2f\xfd\x03\x00\x55\x7b\x6b\x5e\x54\x00' b'\x00\x00\x02\xfc\xf4\xa5\xba\x23\x3f\x85\xb3\x54' b'\x00\x00\x18\x6f\x6f\x66\x01\x00') h = hashlib.sha1(compressed).hexdigest() self.assertEqual(h, '1c5bcd25181bcd8c1a73ea8773323e0056129f92')
def test_compress_dict_multiple(self): samples = [] for i in range(128): samples.append(b'foo' * 64) samples.append(b'bar' * 64) samples.append(b'foobar' * 64) d = zstd.train_dictionary(8192, samples) cctx = zstd.ZstdCompressor(level=1, dict_data=d) for i in range(32): cctx.compress(b'foo bar foobar foo bar foobar')
def test_no_dict_id(self): samples = [] for i in range(128): samples.append(b'foo' * 64) samples.append(b'bar' * 64) samples.append(b'foobar' * 64) d = zstd.train_dictionary(1024, samples) cctx = zstd.ZstdCompressor(level=1, dict_data=d) with_dict_id = cctx.compress(b'foobarfoobar') cctx = zstd.ZstdCompressor(level=1, dict_data=d, write_dict_id=False) no_dict_id = cctx.compress(b'foobarfoobar') self.assertEqual(len(with_dict_id), len(no_dict_id) + 4)
def test_dictionary(self): samples = [] for i in range(128): samples.append(b'foo' * 64) samples.append(b'bar' * 64) samples.append(b'foobar' * 64) d = zstd.train_dictionary(8192, samples) buffer = io.BytesIO() cctx = zstd.ZstdCompressor(level=9, dict_data=d) with cctx.write_to(buffer) as compressor: compressor.write(b'foo') compressor.write(b'bar') compressor.write(b'foo' * 16384) compressed = buffer.getvalue() h = hashlib.sha1(compressed).hexdigest() self.assertEqual(h, '1c5bcd25181bcd8c1a73ea8773323e0056129f92')
def test_dictionary(self): samples = [] for i in range(128): samples.append(b'foo' * 64) samples.append(b'bar' * 64) samples.append(b'foobar' * 64) d = zstd.train_dictionary(8192, samples) orig = b'foobar' * 16384 cctx = zstd.ZstdCompressor(level=1, dict_data=d, write_content_size=True) compressed = cctx.compress(orig) dctx = zstd.ZstdDecompressor(dict_data=d) decompressed = dctx.decompress(compressed) self.assertEqual(decompressed, orig)
def test_no_dict_id(self): samples = [] for i in range(128): samples.append(b'foo' * 64) samples.append(b'bar' * 64) samples.append(b'foobar' * 64) d = zstd.train_dictionary(1024, samples) with_dict_id = io.BytesIO() cctx = zstd.ZstdCompressor(level=1, dict_data=d) with cctx.write_to(with_dict_id) as compressor: compressor.write(b'foobarfoobar') cctx = zstd.ZstdCompressor(level=1, dict_data=d, write_dict_id=False) no_dict_id = io.BytesIO() with cctx.write_to(no_dict_id) as compressor: compressor.write(b'foobarfoobar') self.assertEqual(len(with_dict_id.getvalue()), len(no_dict_id.getvalue()) + 4)
def test_dictionary_multiple(self): samples = [] for i in range(128): samples.append(b'foo' * 64) samples.append(b'bar' * 64) samples.append(b'foobar' * 64) d = zstd.train_dictionary(8192, samples) sources = (b'foobar' * 8192, b'foo' * 8192, b'bar' * 8192) compressed = [] cctx = zstd.ZstdCompressor(level=1, dict_data=d, write_content_size=True) for source in sources: compressed.append(cctx.compress(source)) dctx = zstd.ZstdDecompressor(dict_data=d) for i in range(len(sources)): decompressed = dctx.decompress(compressed[i]) self.assertEqual(decompressed, sources[i])
def processFile(inputFileName, outputDir, dictSize): print('Processing %s' % inputFileName) # Check that the file is not encrypted inputConn = sqlite3.connect(inputFileName) inputConn.isolation_level = None cursor = inputConn.cursor() cursor.execute("SELECT value FROM metadata WHERE name='shared_zlib_dict'") if cursor.fetchone(): print('Dictionary already applied') inputConn.close() return # Load sample of tiles cursor2 = inputConn.cursor() cursor2.execute("SELECT SUM(LENGTH(tile_data)) FROM tiles") size = cursor2.fetchone()[0] skip = int(size / (50 * 1024 * 1024)) cursor2.execute( "SELECT tile_column, tile_row, zoom_level, tile_data FROM tiles") tiles = [] count = 0 for row in cursor2: count += 1 if skip > 0: if count % skip != 0: continue x, y, zoom, tileData = row tiles.append(decompressData(tileData)) inputConn.close() # Do the training zdict = zstd.train_dictionary(dictSize, tiles) outputFileName = "%s/%s.zdict" % ( outputDir, os.path.splitext(os.path.basename(inputFileName))[0]) with io.open(outputFileName, 'wb') as f: f.write(zdict.as_bytes()) return outputFileName
def test_multithreaded_unsupported(self): samples = [] for i in range(128): samples.append(b'foo' * 64) samples.append(b'bar' * 64) d = zstd.train_dictionary(8192, samples) cctx = zstd.ZstdCompressor(dict_data=d, threads=2) with self.assertRaisesRegexp( zstd.ZstdError, 'compress\(\) cannot be used with both dictionaries and multi-threaded compression' ): cctx.compress(b'foo') params = zstd.get_compression_parameters(3) cctx = zstd.ZstdCompressor(compression_params=params, threads=2) with self.assertRaisesRegexp( zstd.ZstdError, 'compress\(\) cannot be used with both compression parameters and multi-threaded compression' ): cctx.compress(b'foo')
def test_dictionary(self): samples = [] for i in range(128): samples.append(b'foo' * 64) samples.append(b'bar' * 64) samples.append(b'foobar' * 64) d = zstd.train_dictionary(8192, samples) orig = b'foobar' * 16384 buffer = io.BytesIO() cctx = zstd.ZstdCompressor(dict_data=d) with cctx.write_to(buffer) as compressor: self.assertEqual(compressor.write(orig), 1544) compressed = buffer.getvalue() buffer = io.BytesIO() dctx = zstd.ZstdDecompressor(dict_data=d) with dctx.write_to(buffer) as decompressor: self.assertEqual(decompressor.write(compressed), len(orig)) self.assertEqual(buffer.getvalue(), orig)