def test_base(self):
        ldb = LooseObjectDB(os.path.join(self.gitrepopath, 'objects'))

        for sha1 in ldb.sha_iter():
            oinfo = ldb.info(sha1)
            ostream = ldb.stream(sha1)
            assert oinfo[:3] == ostream[:3]

            assert len(ostream.read()) == ostream.size
            assert ldb.has_object(oinfo.binsha)
        # END for each sha in database
        # assure we close all files
        try:
            del(ostream)
            del(oinfo)
        except UnboundLocalError:
            pass
        # END ignore exception if there are no loose objects

        data = "my data".encode("ascii")
        istream = IStream("blob", len(data), BytesIO(data))

        # the object does not yet have a sha
        assert istream.binsha is None
        ldb.store(istream)
        # now the sha is set
        assert len(istream.binsha) == 20
        assert ldb.has_object(istream.binsha)
    def test_base(self):
        ldb = LooseObjectDB(os.path.join(self.gitrepopath, "objects"))

        for sha1 in ldb.sha_iter():
            oinfo = ldb.info(sha1)
            ostream = ldb.stream(sha1)
            assert oinfo[:3] == ostream[:3]

            assert len(ostream.read()) == ostream.size
            assert ldb.has_object(oinfo.binsha)
        # END for each sha in database
        # assure we close all files
        try:
            del (ostream)
            del (oinfo)
        except UnboundLocalError:
            pass
        # END ignore exception if there are no loose objects

        data = "my data".encode("ascii")
        istream = IStream("blob", len(data), BytesIO(data))

        # the object does not yet have a sha
        assert istream.binsha is None
        ldb.store(istream)
        # now the sha is set
        assert len(istream.binsha) == 20
        assert ldb.has_object(istream.binsha)
Exemple #3
0
	def test_writing(self, path):
		mdb = MemoryDB()
		
		# write data
		self._assert_object_writing_simple(mdb)
		
		# test stream copy
		ldb = LooseObjectDB(path)
		assert ldb.size() == 0
		num_streams_copied = mdb.stream_copy(mdb.sha_iter(), ldb)
		assert num_streams_copied == mdb.size()
		
		assert ldb.size() == mdb.size()
		for sha in mdb.sha_iter():
			assert ldb.has_object(sha)
			assert ldb.stream(sha).read() == mdb.stream(sha).read() 
Exemple #4
0
    def test_large_data_streaming(self, path):
        ldb = LooseObjectDB(path)
        string_ios = list()         # list of streams we previously created

        # serial mode
        for randomize in range(2):
            desc = (randomize and 'random ') or ''
            print("Creating %s data ..." % desc, file=sys.stderr)
            st = time()
            size, stream = make_memory_file(self.large_data_size_bytes, randomize)
            elapsed = time() - st
            print("Done (in %f s)" % elapsed, file=sys.stderr)
            string_ios.append(stream)

            # writing - due to the compression it will seem faster than it is
            st = time()
            sha = ldb.store(IStream('blob', size, stream)).binsha
            elapsed_add = time() - st
            assert ldb.has_object(sha)
            db_file = ldb.readable_db_object_path(bin_to_hex(sha))
            fsize_kib = os.path.getsize(db_file) / 1000

            size_kib = size / 1000
            print("Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" %
                  (size_kib, fsize_kib, desc, elapsed_add, size_kib / (elapsed_add or 1)), file=sys.stderr)

            # reading all at once
            st = time()
            ostream = ldb.stream(sha)
            shadata = ostream.read()
            elapsed_readall = time() - st

            stream.seek(0)
            assert shadata == stream.getvalue()
            print("Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" %
                  (size_kib, desc, elapsed_readall, size_kib / (elapsed_readall or 1)), file=sys.stderr)

            # reading in chunks of 1 MiB
            cs = 512 * 1000
            chunks = list()
            st = time()
            ostream = ldb.stream(sha)
            while True:
                data = ostream.read(cs)
                chunks.append(data)
                if len(data) < cs:
                    break
            # END read in chunks
            elapsed_readchunks = time() - st

            stream.seek(0)
            assert b''.join(chunks) == stream.getvalue()

            cs_kib = cs / 1000
            print("Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" %
                  (size_kib, desc, cs_kib, elapsed_readchunks, size_kib / (elapsed_readchunks or 1)), file=sys.stderr)

            # del db file so we keep something to do
            ostream = None  # To release the file handle (win)
            remove(db_file)
Exemple #5
0
    def test_basics(self, path):
        ldb = LooseObjectDB(path)

        # write data
        self._assert_object_writing(ldb)

        # verify sha iteration and size
        shas = list(ldb.sha_iter())
        assert shas and len(shas[0]) == 20

        assert len(shas) == ldb.size()

        # verify find short object
        long_sha = bin_to_hex(shas[-1])
        for short_sha in (long_sha[:20], long_sha[:5]):
            assert bin_to_hex(ldb.partial_to_complete_sha_hex(short_sha)) == long_sha
        # END for each sha

        self.failUnlessRaises(BadObject, ldb.partial_to_complete_sha_hex, '0000')
Exemple #6
0
 def test_basics(self, path):
     ldb = LooseObjectDB(path)
     
     # write data
     self._assert_object_writing(ldb)
     self._assert_object_writing_async(ldb)
 
     # verify sha iteration and size
     shas = list(ldb.sha_iter())
     assert shas and len(shas[0]) == 20
     
     assert len(shas) == ldb.size()
     
     # verify find short object
     long_sha = bin_to_hex(shas[-1])
     for short_sha in (long_sha[:20], long_sha[:5]):
         assert bin_to_hex(ldb.partial_to_complete_sha_hex(short_sha)) == long_sha
     # END for each sha
     
     self.failUnlessRaises(BadObject, ldb.partial_to_complete_sha_hex, '0000')
Exemple #7
0
    def test_writing(self, path):
        mdb = MemoryDB()

        # write data
        self._assert_object_writing_simple(mdb)

        # test stream copy
        ldb = LooseObjectDB(path)
        assert ldb.size() == 0
        num_streams_copied = mdb.stream_copy(mdb.sha_iter(), ldb)
        assert num_streams_copied == mdb.size()

        assert ldb.size() == mdb.size()
        for sha in mdb.sha_iter():
            assert ldb.has_object(sha)
            assert ldb.stream(sha).read() == mdb.stream(sha).read()
Exemple #8
0
	def test_base(self):
		ldb = LooseObjectDB(fixture_path("../../../.git/objects"))
		
		for sha1 in ldb.sha_iter():
			oinfo = ldb.info(sha1)
			ostream = ldb.stream(sha1)
			assert oinfo[:3] == ostream[:3]
			
			assert len(ostream.read()) == ostream.size
			assert ldb.has_object(oinfo.binsha)
		# END for each sha in database
		# assure we close all files
		try:
			del(ostream)
			del(oinfo)
		except UnboundLocalError:
			pass
		# END ignore exception if there are no loose objects
			
		data = "my data"
		istream = IStream("blob", len(data), StringIO(data))
		
		# the object does not yet have a sha
		assert istream.binsha is None
		ldb.store(istream)
		# now the sha is set
		assert len(istream.binsha) == 20
		assert ldb.has_object(istream.binsha)
		
		
		# async operation
		# Create a reader from an iterator
		reader = IteratorReader(ldb.sha_iter())
		
		# get reader for object streams
		info_reader = ldb.stream_async(reader)
		
		# read one
		info = info_reader.read(1)[0]
		
		# read all the rest until depletion
		ostreams = info_reader.read()
		
		# set the pool to use two threads
		pool.set_size(2)
		
		# synchronize the mode of operation
		pool.set_size(0)