def test_fpzip(): for N in range(0, 100): flts = np.array(range(N), dtype=np.float32).reshape((N, 1, 1, 1)) compressed = encode(flts, 'fpzip') assert compressed != flts decompressed = decode(compressed, 'fpzip') assert np.all(decompressed == flts) for N in range(0, 200, 2): flts = np.array(range(N), dtype=np.float32).reshape((N // 2, 2, 1, 1)) compressed = encode(flts, 'fpzip') assert compressed != flts decompressed = decode(compressed, 'fpzip') assert np.all(decompressed == flts)
def encode_decode(data, format, num_chan=1): encoded = encode(data, format) result = decode(encoded, format, shape=(64, 64, 64, num_chan), dtype=np.uint8) assert np.all(result.shape == data.shape) assert np.all(data == result)
def test_kempression(): data = np.random.random_sample(size=1024 * 3).reshape( (64, 4, 4, 3)).astype(np.float32) encoded = encode(data, 'kempressed') result = decode(encoded, 'kempressed', shape=(64, 4, 4, 3), dtype=np.float32) assert np.all(result.shape == data.shape) assert np.all(np.abs(data - result) <= np.finfo(np.float32).eps)
def test_http_read_brotli_image(): fn = "2_2_50/4096-4608_4096-4608_112-128" bbox = Bbox.from_filename( fn) # possible off by one error w/ exclusive bounds with Storage("https://open-neurodata.s3.amazonaws.com/kharris15/apical/em" ) as stor: img_bytes = stor.get_file(fn) img = chunks.decode(img_bytes, 'raw', shape=bbox.size3(), dtype="uint8") assert img.shape == (512, 512, 16)
def test_jpeg(): data = np.zeros(shape=(64, 64, 64, 1), dtype=np.uint8) encode_decode(data, 'jpeg') encode_decode(data + 255, 'jpeg') # Random jpeg won't decompress to exactly the same image # but it should have nearly the same average power random_data = np.random.randint(255, size=(64, 64, 64, 1), dtype=np.uint8) pre_avg = random_data.copy().flatten().mean() encoded = encode(random_data, 'jpeg') decoded = decode(encoded, 'jpeg', shape=(64, 64, 64, 1), dtype=np.uint8) post_avg = decoded.copy().flatten().mean() assert abs(pre_avg - post_avg) < 1
def test_jpeg(shape, num_channels): import simplejpeg xshape = list(shape) + [ num_channels ] data = np.zeros(shape=xshape, dtype=np.uint8) encode_decode(data, 'jpeg', shape, num_channels) encode_decode(data + 255, 'jpeg', shape, num_channels) jpg = simplejpeg.decode_jpeg( encode(data, 'jpeg'), colorspace="GRAY", ) assert jpg.shape[0] == shape[1] * shape[2] assert jpg.shape[1] == shape[0] # Random jpeg won't decompress to exactly the same image # but it should have nearly the same average power random_data = np.random.randint(255, size=xshape, dtype=np.uint8) pre_avg = random_data.copy().flatten().mean() encoded = encode(random_data, 'jpeg') decoded = decode(encoded, 'jpeg', shape=xshape, dtype=np.uint8) post_avg = decoded.copy().flatten().mean() assert abs(pre_avg - post_avg) < 1
def test_caching(): image = np.zeros(shape=(128, 128, 128, 1), dtype=np.uint8) image[0:64, 0:64, 0:64] = 1 image[64:128, 0:64, 0:64] = 2 image[0:64, 64:128, 0:64] = 3 image[0:64, 0:64, 64:128] = 4 image[64:128, 64:128, 0:64] = 5 image[64:128, 0:64, 64:128] = 6 image[0:64, 64:128, 64:128] = 7 image[64:128, 64:128, 64:128] = 8 dirpath = '/tmp/cloudvolume/caching-volume-' + str(TEST_NUMBER) layer_path = 'file://' + dirpath vol = CloudVolume.from_numpy( image, voxel_offset=(0, 0, 0), vol_path=layer_path, layer_type='image', resolution=(1, 1, 1), encoding='raw', chunk_size=(64, 64, 64), ) vol.cache.enabled = True vol.cache.flush() # Test that reading populates the cache read1 = vol[:, :, :] assert np.all(read1 == image) read2 = vol[:, :, :] assert np.all(read2 == image) assert len(vol.cache.list()) > 0 files = vol.cache.list() validation_set = [ '0-64_0-64_0-64', '64-128_0-64_0-64', '0-64_64-128_0-64', '0-64_0-64_64-128', '64-128_64-128_0-64', '64-128_0-64_64-128', '0-64_64-128_64-128', '64-128_64-128_64-128' ] assert set([os.path.splitext(fname)[0] for fname in files]) == set(validation_set) for i in range(8): fname = os.path.join(vol.cache.path, vol.key, validation_set[i]) + '.gz' with gzip.GzipFile(fname, mode='rb') as gfile: chunk = gfile.read() img3d = chunks.decode(chunk, 'raw', (64, 64, 64, 1), np.uint8) assert np.all(img3d == (i + 1)) vol.cache.flush() assert not os.path.exists(vol.cache.path) # Test that writing populates the cache vol[:, :, :] = image assert os.path.exists(vol.cache.path) assert np.all(vol[:, :, :] == image) vol.cache.flush() # Test that partial reads work too result = vol[0:64, 0:64, :] assert np.all(result == image[0:64, 0:64, :]) files = vol.cache.list() assert len(files) == 2 result = vol[:, :, :] assert np.all(result == image) files = vol.cache.list() assert len(files) == 8 vol.cache.flush() # Test Non-standard Cache Destination dirpath = '/tmp/cloudvolume/caching-cache-' + str(TEST_NUMBER) vol.cache.enabled = True vol.cache.path = dirpath vol[:, :, :] = image assert len(os.listdir(os.path.join(dirpath, vol.key))) == 8 vol.cache.flush() # Test that caching doesn't occur when cache is not set vol.cache.enabled = False result = vol[:, :, :] if os.path.exists(vol.cache.path): files = vol.cache.list() assert len(files) == 0 vol[:, :, :] = image if os.path.exists(vol.cache.path): files = vol.cache.list() assert len(files) == 0 vol.cache.flush() # Test that deletion works too vol.cache.enabled = True vol[:, :, :] = image files = vol.cache.list() assert len(files) == 8 vol.delete(np.s_[:, :, :]) files = vol.cache.list() assert len(files) == 0 vol.cache.flush() vol[:, :, :] = image files = vol.cache.list() assert len(files) == 8 vol.cache.flush(preserve=np.s_[:, :, :]) files = vol.cache.list() assert len(files) == 8 vol.cache.flush(preserve=np.s_[:64, :64, :]) files = vol.cache.list() assert len(files) == 2 vol.cache.flush() vol[:, :, :] = image files = vol.cache.list() assert len(files) == 8 vol.cache.flush_region(Bbox((50, 50, 0), (100, 100, 10))) files = vol.cache.list() assert len(files) == 4 vol.cache.flush() vol[:, :, :] = image files = vol.cache.list() assert len(files) == 8 vol.cache.flush_region(np.s_[50:100, 50:100, 0:10]) files = vol.cache.list() assert len(files) == 4 vol.cache.flush()