def pack_unpack_fp(repeats, chunk_size=DEFAULT_CHUNK_SIZE, progress=False, metadata=None): in_fp, out_fp, dcmp_fp = StringIO(), StringIO(), StringIO() if progress: print("Creating test array") create_array_fp(repeats, in_fp, progress=progress) in_fp_size = in_fp.tell() if progress: print("Compressing") in_fp.seek(0) nchunks, chunk_size, last_chunk_size = \ calculate_nchunks(in_fp_size, chunk_size) source = PlainFPSource(in_fp) sink = CompressedFPSink(out_fp) pack(source, sink, nchunks, chunk_size, last_chunk_size, metadata=metadata) out_fp.seek(0) if progress: print("Decompressing") source = CompressedFPSource(out_fp) sink = PlainFPSink(dcmp_fp) unpack(source, sink) if progress: print("Verifying") cmp_fp(in_fp, dcmp_fp) return source.metadata
def test_append_single_chunk(): orig, new, dcmp = StringIO(), StringIO(), StringIO() create_array_fp(1, new) new_size = new.tell() new.seek(0) chunking = calculate_nchunks(new_size, chunk_size=new_size) source = PlainFPSource(new) sink = CompressedFPSink(orig) pack(source, sink, *chunking) orig.seek(0) new.seek(0) # append a single chunk reset_append_fp(orig, new, new_size) bloscpack_header = reset_read_beginning(orig)[0] nt.assert_equal(bloscpack_header['nchunks'], 2) # append a large content, that amounts to two chunks new_content = new.read() new.seek(0) reset_append_fp(orig, StringIO(new_content * 2), new_size * 2) bloscpack_header = reset_read_beginning(orig)[0] nt.assert_equal(bloscpack_header['nchunks'], 4) # append half a chunk reset_append_fp(orig, StringIO(new_content[:len(new_content)]), new_size//2) bloscpack_header = reset_read_beginning(orig)[0] nt.assert_equal(bloscpack_header['nchunks'], 5) # append a few bytes reset_append_fp(orig, StringIO(new_content[:1023]), 1024) # make sure it is squashed into the lat chunk bloscpack_header = reset_read_beginning(orig)[0] nt.assert_equal(bloscpack_header['nchunks'], 5)
def test_append_into_last_chunk(): # first create an array with a single chunk orig, new, dcmp = StringIO(), StringIO(), StringIO() create_array_fp(1, new) new_size = new.tell() new.seek(0) chunking = calculate_nchunks(new_size, chunk_size=new_size) source = PlainFPSource(new) sink = CompressedFPSink(orig) pack(source, sink, *chunking) orig.seek(0) new.seek(0) # append a few bytes, creating a new, smaller, last_chunk new_content = new.read() new.seek(0) nchunks = reset_append_fp(orig, StringIO(new_content[:1023]), 1023) bloscpack_header = reset_read_beginning(orig)[0] nt.assert_equal(nchunks, 1) nt.assert_equal(bloscpack_header['last_chunk'], 1023) # now append into that last chunk nchunks = reset_append_fp(orig, StringIO(new_content[:1023]), 1023) bloscpack_header = reset_read_beginning(orig)[0] nt.assert_equal(nchunks, 0) nt.assert_equal(bloscpack_header['last_chunk'], 2046) # now check by unpacking source = CompressedFPSource(orig) sink = PlainFPSink(dcmp) unpack(source, sink) dcmp.seek(0) new.seek(0) new_str = new.read() dcmp_str = dcmp.read() nt.assert_equal(len(dcmp_str), len(new_str) + 2046) nt.assert_equal(dcmp_str, new_str + new_str[:1023] * 2)
def test_append_single_chunk(): orig, new, dcmp = StringIO(), StringIO(), StringIO() create_array_fp(1, new) new_size = new.tell() new.seek(0) chunking = calculate_nchunks(new_size, chunk_size=new_size) source = PlainFPSource(new) sink = CompressedFPSink(orig) pack(source, sink, *chunking) orig.seek(0) new.seek(0) # append a single chunk reset_append_fp(orig, new, new_size) bloscpack_header = reset_read_beginning(orig)[0] nt.assert_equal(bloscpack_header['nchunks'], 2) # append a large content, that amounts to two chunks new_content = new.read() new.seek(0) reset_append_fp(orig, StringIO(new_content * 2), new_size * 2) bloscpack_header = reset_read_beginning(orig)[0] nt.assert_equal(bloscpack_header['nchunks'], 4) # append half a chunk reset_append_fp(orig, StringIO(new_content[:len(new_content)]), new_size // 2) bloscpack_header = reset_read_beginning(orig)[0] nt.assert_equal(bloscpack_header['nchunks'], 5) # append a few bytes reset_append_fp(orig, StringIO(new_content[:1023]), 1024) # make sure it is squashed into the lat chunk bloscpack_header = reset_read_beginning(orig)[0] nt.assert_equal(bloscpack_header['nchunks'], 5)
def pack_unpack_mem(repeats, chunk_size=DEFAULT_CHUNK_SIZE, progress=False, metadata=None): in_fp, out_fp, dcmp_fp = StringIO(), StringIO(), StringIO() if progress: print("Creating test array") create_array_fp(repeats, in_fp, progress=progress) in_fp_size = in_fp.tell() if progress: print("Compressing") in_fp.seek(0) nchunks, chunk_size, last_chunk_size = \ calculate_nchunks(in_fp_size, chunk_size) # let us play merry go round source = PlainFPSource(in_fp) sink = CompressedMemorySink() pack(source, sink, nchunks, chunk_size, last_chunk_size, metadata=metadata) source = CompressedMemorySource(sink) sink = PlainMemorySink() unpack(source, sink) nt.assert_equal(metadata, source.metadata) source = PlainMemorySource(sink.chunks) sink = CompressedFPSink(out_fp) pack(source, sink, nchunks, chunk_size, last_chunk_size, metadata=metadata) out_fp.seek(0) source = CompressedFPSource(out_fp) sink = PlainFPSink(dcmp_fp) unpack(source, sink) nt.assert_equal(metadata, source.metadata) in_fp.seek(0) dcmp_fp.seek(0) cmp_fp(in_fp, dcmp_fp) return source.metadata
def test_disable_offsets(): in_fp, out_fp, dcmp_fp = StringIO(), StringIO(), StringIO() create_array_fp(1, in_fp) in_fp_size = in_fp.tell() in_fp.seek(0) bloscpack_args = BloscpackArgs(offsets=False) source = PlainFPSource(in_fp) sink = CompressedFPSink(out_fp) pack(source, sink, *calculate_nchunks(in_fp_size), bloscpack_args=bloscpack_args) out_fp.seek(0) bloscpack_header, metadata, metadata_header, offsets = _read_beginning(out_fp) nt.assert_true(len(offsets) == 0)
def test_offsets(): with create_tmp_files() as (tdir, in_file, out_file, dcmp_file): create_array(1, in_file) pack_file(in_file, out_file, chunk_size='2M') with open(out_file, 'r+b') as input_fp: bloscpack_header = _read_bloscpack_header(input_fp) total_entries = bloscpack_header.total_prospective_chunks offsets = _read_offsets(input_fp, bloscpack_header) # First chunks should start after header and offsets first = BLOSCPACK_HEADER_LENGTH + 8 * total_entries # We assume that the others are correct nt.assert_equal(offsets[0], first) nt.assert_equal([ 736, 368207, 633319, 902306, 1173771, 1419535, 1666981, 1913995 ], offsets) # try to read the second header input_fp.seek(offsets[1], 0) blosc_header_raw = input_fp.read(BLOSC_HEADER_LENGTH) expected = { 'versionlz': 1, 'blocksize': 262144, 'ctbytes': 265108, 'version': 2, 'flags': 1, 'nbytes': 2097152, 'typesize': 8 } blosc_header = decode_blosc_header(blosc_header_raw) nt.assert_equal(expected, blosc_header) # now check the same thing again, but w/o any max_app_chunks input_fp, output_fp = StringIO(), StringIO() create_array_fp(1, input_fp) nchunks, chunk_size, last_chunk_size = \ calculate_nchunks(input_fp.tell(), chunk_size='2M') input_fp.seek(0, 0) bloscpack_args = BloscpackArgs(max_app_chunks=0) source = PlainFPSource(input_fp) sink = CompressedFPSink(output_fp) pack(source, sink, nchunks, chunk_size, last_chunk_size, bloscpack_args=bloscpack_args) output_fp.seek(0, 0) bloscpack_header = _read_bloscpack_header(output_fp) nt.assert_equal(0, bloscpack_header.max_app_chunks) offsets = _read_offsets(output_fp, bloscpack_header) nt.assert_equal( [96, 367567, 632679, 901666, 1173131, 1418895, 1666341, 1913355], offsets)
def prep_array_for_append(blosc_args=BloscArgs(), bloscpack_args=BloscpackArgs()): orig, new, dcmp = StringIO(), StringIO(), StringIO() create_array_fp(1, new) new_size = new.tell() new.seek(0) chunking = calculate_nchunks(new_size) source = PlainFPSource(new) sink = CompressedFPSink(orig) pack(source, sink, *chunking, blosc_args=blosc_args, bloscpack_args=bloscpack_args) orig.seek(0) new.seek(0) return orig, new, new_size, dcmp
def test_offsets(): with create_tmp_files() as (tdir, in_file, out_file, dcmp_file): create_array(1, in_file) pack_file(in_file, out_file, chunk_size='2M') with open(out_file, 'r+b') as input_fp: bloscpack_header = _read_bloscpack_header(input_fp) total_entries = bloscpack_header.total_prospective_chunks offsets = _read_offsets(input_fp, bloscpack_header) # First chunks should start after header and offsets first = BLOSCPACK_HEADER_LENGTH + 8 * total_entries # We assume that the others are correct nt.assert_equal(offsets[0], first) nt.assert_equal([736, 368207, 633319, 902306, 1173771, 1419535, 1666981, 1913995], offsets) # try to read the second header input_fp.seek(offsets[1], 0) blosc_header_raw = input_fp.read(BLOSC_HEADER_LENGTH) expected = {'versionlz': 1, 'blocksize': 262144, 'ctbytes': 265108, 'version': 2, 'flags': 1, 'nbytes': 2097152, 'typesize': 8} blosc_header = decode_blosc_header(blosc_header_raw) nt.assert_equal(expected, blosc_header) # now check the same thing again, but w/o any max_app_chunks input_fp, output_fp = StringIO(), StringIO() create_array_fp(1, input_fp) nchunks, chunk_size, last_chunk_size = \ calculate_nchunks(input_fp.tell(), chunk_size='2M') input_fp.seek(0, 0) bloscpack_args = BloscpackArgs(max_app_chunks=0) source = PlainFPSource(input_fp) sink = CompressedFPSink(output_fp) pack(source, sink, nchunks, chunk_size, last_chunk_size, bloscpack_args=bloscpack_args ) output_fp.seek(0, 0) bloscpack_header = _read_bloscpack_header(output_fp) nt.assert_equal(0, bloscpack_header.max_app_chunks) offsets = _read_offsets(output_fp, bloscpack_header) nt.assert_equal([96, 367567, 632679, 901666, 1173131, 1418895, 1666341, 1913355], offsets)
def test_offsets(): with create_tmp_files() as (tdir, in_file, out_file, dcmp_file): create_array(1, in_file) pack_file(in_file, out_file, chunk_size='2M') with open(out_file, 'r+b') as input_fp: bloscpack_header = _read_bloscpack_header(input_fp) total_entries = bloscpack_header.total_prospective_chunks offsets = _read_offsets(input_fp, bloscpack_header) # First chunks should start after header and offsets first = BLOSCPACK_HEADER_LENGTH + 8 * total_entries # We assume that the others are correct nt.assert_equal(offsets[0], first) nt.assert_equal([736, 418578, 736870, 1050327, 1363364, 1660766, 1959218, 2257703], offsets) # try to read the second header input_fp.seek(offsets[1], 0) blosc_header_raw = input_fp.read(BLOSC_HEADER_LENGTH) expected = {'versionlz': 1, 'blocksize': 131072, 'ctbytes': 318288, 'version': 2, 'flags': 1, 'nbytes': 2097152, 'typesize': 8} blosc_header = decode_blosc_header(blosc_header_raw) nt.assert_equal(expected, blosc_header) # now check the same thing again, but w/o any max_app_chunks input_fp, output_fp = StringIO(), StringIO() create_array_fp(1, input_fp) nchunks, chunk_size, last_chunk_size = \ calculate_nchunks(input_fp.tell(), chunk_size='2M') input_fp.seek(0, 0) bloscpack_args = BloscpackArgs(max_app_chunks=0) source = PlainFPSource(input_fp) sink = CompressedFPSink(output_fp) pack(source, sink, nchunks, chunk_size, last_chunk_size, bloscpack_args=bloscpack_args ) output_fp.seek(0, 0) bloscpack_header = _read_bloscpack_header(output_fp) nt.assert_equal(0, bloscpack_header.max_app_chunks) offsets = _read_offsets(output_fp, bloscpack_header) nt.assert_equal([96, 417938, 736230, 1049687, 1362724, 1660126, 1958578, 2257063], offsets)
def test_offsets(): with create_tmp_files() as (tdir, in_file, out_file, dcmp_file): create_array(1, in_file) pack_file_to_file(in_file, out_file, chunk_size='2M') with open(out_file, 'r+b') as input_fp: bloscpack_header = _read_bloscpack_header(input_fp) total_entries = bloscpack_header.total_prospective_chunks offsets = _read_offsets(input_fp, bloscpack_header) # First chunks should start after header and offsets first = BLOSCPACK_HEADER_LENGTH + 8 * total_entries # We assume that the others are correct nt.assert_equal(offsets[0], first) nt.assert_equal(736, offsets[0]) # try to read the second header input_fp.seek(offsets[1], 0) blosc_header_raw = input_fp.read(BLOSC_HEADER_LENGTH) expected = { 'versionlz': 1, 'version': 2, 'flags': 1, 'nbytes': 2097152, 'typesize': 8 } blosc_header = decode_blosc_header(blosc_header_raw) blosc_header_slice = dict( (k, blosc_header[k]) for k in expected.keys()) nt.assert_equal(expected, blosc_header_slice) # now check the same thing again, but w/o any max_app_chunks input_fp, output_fp = StringIO(), StringIO() create_array_fp(1, input_fp) nchunks, chunk_size, last_chunk_size = \ calculate_nchunks(input_fp.tell(), chunk_size='2M') input_fp.seek(0, 0) bloscpack_args = BloscpackArgs(max_app_chunks=0) source = PlainFPSource(input_fp) sink = CompressedFPSink(output_fp) pack(source, sink, nchunks, chunk_size, last_chunk_size, bloscpack_args=bloscpack_args) output_fp.seek(0, 0) bloscpack_header = _read_bloscpack_header(output_fp) nt.assert_equal(0, bloscpack_header.max_app_chunks) offsets = _read_offsets(output_fp, bloscpack_header) nt.assert_equal(96, offsets[0])
def test_disable_offsets(): in_fp, out_fp, dcmp_fp = StringIO(), StringIO(), StringIO() create_array_fp(1, in_fp) in_fp_size = in_fp.tell() in_fp.seek(0) bloscpack_args = BloscpackArgs(offsets=False) source = PlainFPSource(in_fp) sink = CompressedFPSink(out_fp) pack(source, sink, *calculate_nchunks(in_fp_size), bloscpack_args=bloscpack_args) out_fp.seek(0) bloscpack_header, metadata, metadata_header, offsets = \ _read_beginning(out_fp) nt.assert_true(len(offsets) == 0)
def test_offsets(): with create_tmp_files() as (tdir, in_file, out_file, dcmp_file): create_array(1, in_file) pack_file(in_file, out_file, chunk_size='2M') with open(out_file, 'r+b') as input_fp: bloscpack_header = _read_bloscpack_header(input_fp) total_entries = bloscpack_header.total_prospective_chunks offsets = _read_offsets(input_fp, bloscpack_header) # First chunks should start after header and offsets first = BLOSCPACK_HEADER_LENGTH + 8 * total_entries # We assume that the others are correct nt.assert_equal(offsets[0], first) nt.assert_equal(736, offsets[0]) # try to read the second header input_fp.seek(offsets[1], 0) blosc_header_raw = input_fp.read(BLOSC_HEADER_LENGTH) expected = {'versionlz': 1, 'version': 2, 'flags': 1, 'nbytes': 2097152, 'typesize': 8} blosc_header = decode_blosc_header(blosc_header_raw) blosc_header_slice = dict((k, blosc_header[k]) for k in expected.keys()) nt.assert_equal(expected, blosc_header_slice) # now check the same thing again, but w/o any max_app_chunks input_fp, output_fp = StringIO(), StringIO() create_array_fp(1, input_fp) nchunks, chunk_size, last_chunk_size = \ calculate_nchunks(input_fp.tell(), chunk_size='2M') input_fp.seek(0, 0) bloscpack_args = BloscpackArgs(max_app_chunks=0) source = PlainFPSource(input_fp) sink = CompressedFPSink(output_fp) pack(source, sink, nchunks, chunk_size, last_chunk_size, bloscpack_args=bloscpack_args ) output_fp.seek(0, 0) bloscpack_header = _read_bloscpack_header(output_fp) nt.assert_equal(0, bloscpack_header.max_app_chunks) offsets = _read_offsets(output_fp, bloscpack_header) nt.assert_equal(96, offsets[0])
def test_append_metadata(): orig, new, dcmp = StringIO(), StringIO(), StringIO() create_array_fp(1, new) new_size = new.tell() new.seek(0) metadata = {"dtype": "float64", "shape": [1024], "others": []} chunking = calculate_nchunks(new_size, chunk_size=new_size) source = PlainFPSource(new) sink = CompressedFPSink(orig) pack(source, sink, *chunking, metadata=metadata) orig.seek(0) new.seek(0) reset_append_fp(orig, new, new_size) source = CompressedFPSource(orig) sink = PlainFPSink(dcmp) ans = unpack(source, sink) print(ans) dcmp.seek(0) new.seek(0) new_str = new.read() dcmp_str = dcmp.read() nt.assert_equal(len(dcmp_str), len(new_str) * 2) nt.assert_equal(dcmp_str, new_str * 2)