def test_decompress_reader_special_case(self): odb = LooseObjectDB(fixture_path('objects')) ostream = odb.stream(hex_to_bin('7bb839852ed5e3a069966281bb08d50012fb309b')) # if there is a bug, we will be missing one byte exactly ! data = ostream.read() assert len(data) == ostream.size
def do_put(content_path, object_hashes, content, filename): """Perform put operation. This is used in the distributed wrapper""" ldb = LooseObjectDB("/{}/objects/".format(content_path)) istream = IStream("blob", len(content), BytesIO(content)) ldb.store(istream) content_hash = istream.hexsha filename_hash = hashlib.sha1(filename.encode('utf-8')).hexdigest() result = object_hashes[filename_hash] = str( content_hash.decode('utf-8')) return result
def test_decompress_reader_special_case(self): odb = LooseObjectDB(fixture_path('objects')) mdb = MemoryDB() for sha in (b'888401851f15db0eed60eb1bc29dec5ddcace911', b'7bb839852ed5e3a069966281bb08d50012fb309b',): ostream = odb.stream(hex_to_bin(sha)) # if there is a bug, we will be missing one byte exactly ! data = ostream.read() assert len(data) == ostream.size # Putting it back in should yield nothing new - after all, we have dump = mdb.store(IStream(ostream.type, ostream.size, BytesIO(data))) assert dump.hexsha == sha
def test_large_data_streaming(self, rwrepo): # TODO: This part overlaps with the same file in gitdb.test.performance.test_stream # It should be shared if possible ldb = LooseObjectDB(osp.join(rwrepo.git_dir, 'objects')) for randomize in range(2): desc = (randomize and 'random ') or '' print("Creating %s data ..." % desc, file=sys.stderr) st = time() size, stream = make_memory_file(self.large_data_size_bytes, randomize) elapsed = time() - st print("Done (in %f s)" % elapsed, file=sys.stderr) # writing - due to the compression it will seem faster than it is st = time() binsha = ldb.store(IStream('blob', size, stream)).binsha elapsed_add = time() - st assert ldb.has_object(binsha) db_file = ldb.readable_db_object_path(bin_to_hex(binsha)) fsize_kib = osp.getsize(db_file) / 1000 size_kib = size / 1000 msg = "Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" msg %= (size_kib, fsize_kib, desc, elapsed_add, size_kib / elapsed_add) print(msg, file=sys.stderr) # reading all at once st = time() ostream = ldb.stream(binsha) shadata = ostream.read() elapsed_readall = time() - st stream.seek(0) assert shadata == stream.getvalue() msg = "Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" msg %= (size_kib, desc, elapsed_readall, size_kib / elapsed_readall) print(msg, file=sys.stderr) # reading in chunks of 1 MiB cs = 512 * 1000 chunks = list() st = time() ostream = ldb.stream(binsha) while True: data = ostream.read(cs) chunks.append(data) if len(data) < cs: break # END read in chunks elapsed_readchunks = time() - st stream.seek(0) assert b''.join(chunks) == stream.getvalue() cs_kib = cs / 1000 print("Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" % (size_kib, desc, cs_kib, elapsed_readchunks, size_kib / elapsed_readchunks), file=sys.stderr) # del db file so git has something to do ostream = None import gc gc.collect() os.remove(db_file) # VS. CGIT ########## # CGIT ! Can using the cgit programs be faster ? proc = rwrepo.git.hash_object('-w', '--stdin', as_process=True, istream=subprocess.PIPE) # write file - pump everything in at once to be a fast as possible data = stream.getvalue() # cache it st = time() proc.stdin.write(data) proc.stdin.close() gitsha = proc.stdout.read().strip() proc.wait() gelapsed_add = time() - st del(data) assert gitsha == bin_to_hex(binsha) # we do it the same way, right ? # as its the same sha, we reuse our path fsize_kib = osp.getsize(db_file) / 1000 msg = "Added %i KiB (filesize = %i KiB) of %s data to using git-hash-object in %f s ( %f Write KiB / s)" msg %= (size_kib, fsize_kib, desc, gelapsed_add, size_kib / gelapsed_add) print(msg, file=sys.stderr) # compare ... print("Git-Python is %f %% faster than git when adding big %s files" % (100.0 - (elapsed_add / gelapsed_add) * 100, desc), file=sys.stderr) # read all st = time() hexsha, typename, size, data = rwrepo.git.get_object_data(gitsha) # @UnusedVariable gelapsed_readall = time() - st print("Read %i KiB of %s data at once using git-cat-file in %f s ( %f Read KiB / s)" % (size_kib, desc, gelapsed_readall, size_kib / gelapsed_readall), file=sys.stderr) # compare print("Git-Python is %f %% faster than git when reading big %sfiles" % (100.0 - (elapsed_readall / gelapsed_readall) * 100, desc), file=sys.stderr) # read chunks st = time() hexsha, typename, size, stream = rwrepo.git.stream_object_data(gitsha) # @UnusedVariable while True: data = stream.read(cs) if len(data) < cs: break # END read stream gelapsed_readchunks = time() - st msg = "Read %i KiB of %s data in %i KiB chunks from git-cat-file in %f s ( %f Read KiB / s)" msg %= (size_kib, desc, cs_kib, gelapsed_readchunks, size_kib / gelapsed_readchunks) print(msg, file=sys.stderr) # compare print("Git-Python is %f %% faster than git when reading big %s files in chunks" % (100.0 - (elapsed_readchunks / gelapsed_readchunks) * 100, desc), file=sys.stderr)
def test_large_data_streaming(self, rwrepo): # TODO: This part overlaps with the same file in gitdb.test.performance.test_stream # It should be shared if possible ldb = LooseObjectDB(osp.join(rwrepo.git_dir, 'objects')) for randomize in range(2): desc = (randomize and 'random ') or '' print("Creating %s data ..." % desc, file=sys.stderr) st = time() size, stream = make_memory_file(self.large_data_size_bytes, randomize) elapsed = time() - st print("Done (in %f s)" % elapsed, file=sys.stderr) # writing - due to the compression it will seem faster than it is st = time() binsha = ldb.store(IStream('blob', size, stream)).binsha elapsed_add = time() - st assert ldb.has_object(binsha) db_file = ldb.readable_db_object_path(bin_to_hex(binsha)) fsize_kib = osp.getsize(db_file) / 1000 size_kib = size / 1000 msg = "Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" msg %= (size_kib, fsize_kib, desc, elapsed_add, size_kib / elapsed_add) print(msg, file=sys.stderr) # reading all at once st = time() ostream = ldb.stream(binsha) shadata = ostream.read() elapsed_readall = time() - st stream.seek(0) assert shadata == stream.getvalue() msg = "Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" msg %= (size_kib, desc, elapsed_readall, size_kib / elapsed_readall) print(msg, file=sys.stderr) # reading in chunks of 1 MiB cs = 512 * 1000 chunks = [] st = time() ostream = ldb.stream(binsha) while True: data = ostream.read(cs) chunks.append(data) if len(data) < cs: break # END read in chunks elapsed_readchunks = time() - st stream.seek(0) assert b''.join(chunks) == stream.getvalue() cs_kib = cs / 1000 print("Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" % (size_kib, desc, cs_kib, elapsed_readchunks, size_kib / elapsed_readchunks), file=sys.stderr) # del db file so git has something to do ostream = None import gc gc.collect() os.remove(db_file) # VS. CGIT ########## # CGIT ! Can using the cgit programs be faster ? proc = rwrepo.git.hash_object('-w', '--stdin', as_process=True, istream=subprocess.PIPE) # write file - pump everything in at once to be a fast as possible data = stream.getvalue() # cache it st = time() proc.stdin.write(data) proc.stdin.close() gitsha = proc.stdout.read().strip() proc.wait() gelapsed_add = time() - st del(data) assert gitsha == bin_to_hex(binsha) # we do it the same way, right ? # as its the same sha, we reuse our path fsize_kib = osp.getsize(db_file) / 1000 msg = "Added %i KiB (filesize = %i KiB) of %s data to using git-hash-object in %f s ( %f Write KiB / s)" msg %= (size_kib, fsize_kib, desc, gelapsed_add, size_kib / gelapsed_add) print(msg, file=sys.stderr) # compare ... print("Git-Python is %f %% faster than git when adding big %s files" % (100.0 - (elapsed_add / gelapsed_add) * 100, desc), file=sys.stderr) # read all st = time() hexsha, typename, size, data = rwrepo.git.get_object_data(gitsha) # @UnusedVariable gelapsed_readall = time() - st print("Read %i KiB of %s data at once using git-cat-file in %f s ( %f Read KiB / s)" % (size_kib, desc, gelapsed_readall, size_kib / gelapsed_readall), file=sys.stderr) # compare print("Git-Python is %f %% faster than git when reading big %sfiles" % (100.0 - (elapsed_readall / gelapsed_readall) * 100, desc), file=sys.stderr) # read chunks st = time() hexsha, typename, size, stream = rwrepo.git.stream_object_data(gitsha) # @UnusedVariable while True: data = stream.read(cs) if len(data) < cs: break # END read stream gelapsed_readchunks = time() - st msg = "Read %i KiB of %s data in %i KiB chunks from git-cat-file in %f s ( %f Read KiB / s)" msg %= (size_kib, desc, cs_kib, gelapsed_readchunks, size_kib / gelapsed_readchunks) print(msg, file=sys.stderr) # compare print("Git-Python is %f %% faster than git when reading big %s files in chunks" % (100.0 - (elapsed_readchunks / gelapsed_readchunks) * 100, desc), file=sys.stderr)