def test_compress_blocksize_default(use_threads): arr = np.arange(1000, dtype='i4') blosc.use_threads = use_threads # default blocksize enc = blosc.compress(arr, b'lz4', 1, Blosc.NOSHUFFLE) _, _, blocksize = blosc.cbuffer_sizes(enc) assert blocksize > 0 # explicit default blocksize enc = blosc.compress(arr, b'lz4', 1, Blosc.NOSHUFFLE, 0) _, _, blocksize = blosc.cbuffer_sizes(enc) assert blocksize > 0
def prepare_chunk(self): assert self.buff is None header = self.fs.read_block(self.key_path, 0, 16) nbytes, self.cbytes, blocksize = cbuffer_sizes(header) typesize, _shuffle, _memcpyd = cbuffer_metainfo(header) self.buff = mmap.mmap(-1, self.cbytes) self.buff[0:16] = header self.nblocks = nbytes / blocksize self.nblocks = ( int(self.nblocks) if self.nblocks == int(self.nblocks) else int(self.nblocks + 1) ) if self.nblocks == 1: self.buff = self.read_full() return start_points_buffer = self.fs.read_block( self.key_path, 16, int(self.nblocks * 4) ) self.start_points = np.frombuffer( start_points_buffer, count=self.nblocks, dtype=np.int32 ) self.start_points_max = self.start_points.max() self.buff[16: (16 + (self.nblocks * 4))] = start_points_buffer self.n_per_block = blocksize / typesize
def test_compress_blocksize(use_threads, bs): arr = np.arange(1000, dtype='i4') blosc.use_threads = use_threads enc = blosc.compress(arr, b'lz4', 1, Blosc.NOSHUFFLE, bs) _, _, blocksize = blosc.cbuffer_sizes(enc) assert blocksize == bs
def test_compress_blocksize(): arr = np.arange(1000, dtype='i4') for use_threads in True, False, None: blosc.use_threads = use_threads # default blocksize enc = blosc.compress(arr, b'lz4', 1, Blosc.NOSHUFFLE) _, _, blocksize = blosc.cbuffer_sizes(enc) assert blocksize > 0 # explicit default blocksize enc = blosc.compress(arr, b'lz4', 1, Blosc.NOSHUFFLE, 0) _, _, blocksize = blosc.cbuffer_sizes(enc) assert blocksize > 0 # custom blocksize for bs in 2**7, 2**8: enc = blosc.compress(arr, b'lz4', 1, Blosc.NOSHUFFLE, bs) _, _, blocksize = blosc.cbuffer_sizes(enc) assert blocksize == bs