def _assert_object_writing(self, db): """General tests to verify object writing, compatible to ObjectDBW **Note:** requires write access to the database""" # start in 'dry-run' mode, using a simple sha1 writer ostreams = (ZippedStoreShaWriter, None) for ostreamcls in ostreams: for data in self.all_data: dry_run = ostreamcls is not None ostream = None if ostreamcls is not None: ostream = ostreamcls() assert isinstance(ostream, Sha1Writer) # END create ostream prev_ostream = db.set_ostream(ostream) assert type( prev_ostream) in ostreams or prev_ostream in ostreams istream = IStream(str_blob_type, len(data), BytesIO(data)) # store returns same istream instance, with new sha set my_istream = db.store(istream) sha = istream.binsha assert my_istream is istream assert db.has_object(sha) != dry_run assert len(sha) == 20 # verify data - the slow way, we want to run code if not dry_run: info = db.info(sha) assert str_blob_type == info.type assert info.size == len(data) ostream = db.stream(sha) assert ostream.read() == data assert ostream.type == str_blob_type assert ostream.size == len(data) else: self.failUnlessRaises(BadObject, db.info, sha) self.failUnlessRaises(BadObject, db.stream, sha) # DIRECT STREAM COPY # our data hase been written in object format to the StringIO # we pasesd as output stream. No physical database representation # was created. # Test direct stream copy of object streams, the result must be # identical to what we fed in ostream.seek(0) istream.stream = ostream assert istream.binsha is not None prev_sha = istream.binsha db.set_ostream(ZippedStoreShaWriter()) db.store(istream) assert istream.binsha == prev_sha new_ostream = db.ostream() # note: only works as long our store write uses the same compression # level, which is zip_best assert ostream.getvalue() == new_ostream.getvalue()
def store(self, istream): zstream = ZippedStoreShaWriter() self._db.set_ostream(zstream) istream = self._db.store(istream) zstream.close() # close to flush zstream.seek(0) # don't provide a size, the stream is written in object format, hence the # header needs decompression decomp_stream = DecompressMemMapReader(zstream.getvalue(), close_on_deletion=False) self._cache[istream.binsha] = OStream(istream.binsha, istream.type, istream.size, decomp_stream) return istream