Esempio n. 1
0
	def has_object_async(self, reader):
		"""Return a reader yielding information about the membership of objects
		as identified by shas
		:param reader: Reader yielding 20 byte shas.
		:return: async.Reader yielding tuples of (sha, bool) pairs which indicate
			whether the given sha exists in the database or not"""
		task = ChannelThreadTask(reader, str(self.has_object_async), lambda sha: (sha, self.has_object(sha)))
		return pool.add_task(task) 
Esempio n. 2
0
 def has_object_async(self, reader):
     """Return a reader yielding information about the membership of objects
     as identified by shas
     :param reader: Reader yielding 20 byte shas.
     :return: async.Reader yielding tuples of (sha, bool) pairs which indicate
         whether the given sha exists in the database or not"""
     task = ChannelThreadTask(reader, str(self.has_object_async), lambda sha: (sha, self.has_object(sha)))
     return pool.add_task(task) 
Esempio n. 3
0
    def stream_async(self, reader):
        """Retrieve the OStream of multiple objects
		:param reader: see ``info``
		:param max_threads: see ``ObjectDBW.store``
		:return: async.Reader yielding OStream|InvalidOStream instances in any order
		:note: depending on the system configuration, it might not be possible to 
			read all OStreams at once. Instead, read them individually using reader.read(x)
			where x is small enough."""
        # base implementation just uses the stream method repeatedly
        task = ChannelThreadTask(reader, str(self.stream_async), self.stream)
        return pool.add_task(task)
Esempio n. 4
0
	def stream_async(self, reader):
		"""Retrieve the OStream of multiple objects
		:param reader: see ``info``
		:param max_threads: see ``ObjectDBW.store``
		:return: async.Reader yielding OStream|InvalidOStream instances in any order
		:note: depending on the system configuration, it might not be possible to 
			read all OStreams at once. Instead, read them individually using reader.read(x)
			where x is small enough."""
		# base implementation just uses the stream method repeatedly
		task = ChannelThreadTask(reader, str(self.stream_async), self.stream)
		return pool.add_task(task)
Esempio n. 5
0
    def store_async(self, reader):
        """
		Create multiple new objects in the database asynchronously. The method will 
		return right away, returning an output channel which receives the results as 
		they are computed.
		
		:return: Channel yielding your IStream which served as input, in any order.
			The IStreams sha will be set to the sha it received during the process, 
			or its error attribute will be set to the exception informing about the error.
			
		:param reader: async.Reader yielding IStream instances.
			The same instances will be used in the output channel as were received
			in by the Reader.
		
		:note:As some ODB implementations implement this operation atomic, they might 
			abort the whole operation if one item could not be processed. Hence check how 
			many items have actually been produced."""
        # base implementation uses store to perform the work
        task = ChannelThreadTask(reader, str(self.store_async), self.store)
        return pool.add_task(task)
Esempio n. 6
0
	def store_async(self, reader):
		"""
		Create multiple new objects in the database asynchronously. The method will 
		return right away, returning an output channel which receives the results as 
		they are computed.
		
		:return: Channel yielding your IStream which served as input, in any order.
			The IStreams sha will be set to the sha it received during the process, 
			or its error attribute will be set to the exception informing about the error.
			
		:param reader: async.Reader yielding IStream instances.
			The same instances will be used in the output channel as were received
			in by the Reader.
		
		:note:As some ODB implementations implement this operation atomic, they might 
			abort the whole operation if one item could not be processed. Hence check how 
			many items have actually been produced."""
		# base implementation uses store to perform the work
		task = ChannelThreadTask(reader, str(self.store_async), self.store) 
		return pool.add_task(task)
Esempio n. 7
0
    def info_async(self, reader):
        """Retrieve information of a multitude of objects asynchronously
		:param reader: Channel yielding the sha's of the objects of interest
		:return: async.Reader yielding OInfo|InvalidOInfo, in any order"""
        task = ChannelThreadTask(reader, str(self.info_async), self.info)
        return pool.add_task(task)
Esempio n. 8
0
	def test_large_data_streaming(self, path):
		ldb = LooseObjectDB(path)
		string_ios = list()			# list of streams we previously created
		
		# serial mode 
		for randomize in range(2):
			desc = (randomize and 'random ') or ''
			print >> sys.stderr, "Creating %s data ..." % desc
			st = time()
			size, stream = make_memory_file(self.large_data_size_bytes, randomize)
			elapsed = time() - st
			print >> sys.stderr, "Done (in %f s)" % elapsed
			string_ios.append(stream)
			
			# writing - due to the compression it will seem faster than it is 
			st = time()
			sha = ldb.store(IStream('blob', size, stream)).binsha
			elapsed_add = time() - st
			assert ldb.has_object(sha)
			db_file = ldb.readable_db_object_path(bin_to_hex(sha))
			fsize_kib = os.path.getsize(db_file) / 1000
			
			
			size_kib = size / 1000
			print >> sys.stderr, "Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" % (size_kib, fsize_kib, desc, elapsed_add, size_kib / elapsed_add)
			
			# reading all at once
			st = time()
			ostream = ldb.stream(sha)
			shadata = ostream.read()
			elapsed_readall = time() - st
			
			stream.seek(0)
			assert shadata == stream.getvalue()
			print >> sys.stderr, "Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" % (size_kib, desc, elapsed_readall, size_kib / elapsed_readall)
			
			
			# reading in chunks of 1 MiB
			cs = 512*1000
			chunks = list()
			st = time()
			ostream = ldb.stream(sha)
			while True:
				data = ostream.read(cs)
				chunks.append(data)
				if len(data) < cs:
					break
			# END read in chunks
			elapsed_readchunks = time() - st
			
			stream.seek(0)
			assert ''.join(chunks) == stream.getvalue()
			
			cs_kib = cs / 1000
			print >> sys.stderr, "Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" % (size_kib, desc, cs_kib, elapsed_readchunks, size_kib / elapsed_readchunks)
			
			# del db file so we keep something to do
			os.remove(db_file)
		# END for each randomization factor
		
		
		# multi-threaded mode
		# want two, should be supported by most of todays cpus
		pool.set_size(2)
		total_kib = 0
		nsios = len(string_ios)
		for stream in string_ios:
			stream.seek(0)
			total_kib += len(stream.getvalue()) / 1000
		# END rewind
		
		def istream_iter():
			for stream in string_ios:
				stream.seek(0)
				yield IStream(str_blob_type, len(stream.getvalue()), stream)
			# END for each stream
		# END util
		
		# write multiple objects at once, involving concurrent compression
		reader = IteratorReader(istream_iter())
		istream_reader = ldb.store_async(reader)
		istream_reader.task().max_chunksize = 1
		
		st = time()
		istreams = istream_reader.read(nsios)
		assert len(istreams) == nsios
		elapsed = time() - st
		
		print >> sys.stderr, "Threads(%i): Compressed %i KiB of data in loose odb in %f s ( %f Write KiB / s)" % (pool.size(), total_kib, elapsed, total_kib / elapsed)
		
		# decompress multiple at once, by reading them
		# chunk size is not important as the stream will not really be decompressed
		
		# until its read
		istream_reader = IteratorReader(iter([ i.binsha for i in istreams ]))
		ostream_reader = ldb.stream_async(istream_reader)
		
		chunk_task = TestStreamReader(ostream_reader, "chunker", None)
		output_reader = pool.add_task(chunk_task)
		output_reader.task().max_chunksize = 1
		
		st = time()
		assert len(output_reader.read(nsios)) == nsios
		elapsed = time() - st
		
		print >> sys.stderr, "Threads(%i): Decompressed %i KiB of data in loose odb in %f s ( %f Read KiB / s)" % (pool.size(), total_kib, elapsed, total_kib / elapsed)
		
		# store the files, and read them back. For the reading, we use a task 
		# as well which is chunked into one item per task. Reading all will
		# very quickly result in two threads handling two bytestreams of 
		# chained compression/decompression streams
		reader = IteratorReader(istream_iter())
		istream_reader = ldb.store_async(reader)
		istream_reader.task().max_chunksize = 1
		
		istream_to_sha = lambda items: [ i.binsha for i in items ]
		istream_reader.set_post_cb(istream_to_sha)
		
		ostream_reader = ldb.stream_async(istream_reader)
		
		chunk_task = TestStreamReader(ostream_reader, "chunker", None)
		output_reader = pool.add_task(chunk_task)
		output_reader.max_chunksize = 1
		
		st = time()
		assert len(output_reader.read(nsios)) == nsios
		elapsed = time() - st
		
		print >> sys.stderr, "Threads(%i): Compressed and decompressed and read %i KiB of data in loose odb in %f s ( %f Combined KiB / s)" % (pool.size(), total_kib, elapsed, total_kib / elapsed)
Esempio n. 9
0
	def info_async(self, reader):
		"""Retrieve information of a multitude of objects asynchronously
		:param reader: Channel yielding the sha's of the objects of interest
		:return: async.Reader yielding OInfo|InvalidOInfo, in any order"""
		task = ChannelThreadTask(reader, str(self.info_async), self.info)
		return pool.add_task(task)
Esempio n. 10
0
    def test_large_data_streaming(self, path):
        ldb = LooseObjectDB(path)
        string_ios = list()  # list of streams we previously created

        # serial mode
        for randomize in range(2):
            desc = (randomize and 'random ') or ''
            print >> sys.stderr, "Creating %s data ..." % desc
            st = time()
            size, stream = make_memory_file(self.large_data_size_bytes,
                                            randomize)
            elapsed = time() - st
            print >> sys.stderr, "Done (in %f s)" % elapsed
            string_ios.append(stream)

            # writing - due to the compression it will seem faster than it is
            st = time()
            sha = ldb.store(IStream('blob', size, stream)).binsha
            elapsed_add = time() - st
            assert ldb.has_object(sha)
            db_file = ldb.readable_db_object_path(bin_to_hex(sha))
            fsize_kib = os.path.getsize(db_file) / 1000

            size_kib = size / 1000
            print >> sys.stderr, "Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" % (
                size_kib, fsize_kib, desc, elapsed_add, size_kib / elapsed_add)

            # reading all at once
            st = time()
            ostream = ldb.stream(sha)
            shadata = ostream.read()
            elapsed_readall = time() - st

            stream.seek(0)
            assert shadata == stream.getvalue()
            print >> sys.stderr, "Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" % (
                size_kib, desc, elapsed_readall, size_kib / elapsed_readall)

            # reading in chunks of 1 MiB
            cs = 512 * 1000
            chunks = list()
            st = time()
            ostream = ldb.stream(sha)
            while True:
                data = ostream.read(cs)
                chunks.append(data)
                if len(data) < cs:
                    break
            # END read in chunks
            elapsed_readchunks = time() - st

            stream.seek(0)
            assert ''.join(chunks) == stream.getvalue()

            cs_kib = cs / 1000
            print >> sys.stderr, "Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" % (
                size_kib, desc, cs_kib, elapsed_readchunks,
                size_kib / elapsed_readchunks)

            # del db file so we keep something to do
            os.remove(db_file)
        # END for each randomization factor

        # multi-threaded mode
        # want two, should be supported by most of todays cpus
        pool.set_size(2)
        total_kib = 0
        nsios = len(string_ios)
        for stream in string_ios:
            stream.seek(0)
            total_kib += len(stream.getvalue()) / 1000
        # END rewind

        def istream_iter():
            for stream in string_ios:
                stream.seek(0)
                yield IStream(str_blob_type, len(stream.getvalue()), stream)
            # END for each stream

        # END util

        # write multiple objects at once, involving concurrent compression
        reader = IteratorReader(istream_iter())
        istream_reader = ldb.store_async(reader)
        istream_reader.task().max_chunksize = 1

        st = time()
        istreams = istream_reader.read(nsios)
        assert len(istreams) == nsios
        elapsed = time() - st

        print >> sys.stderr, "Threads(%i): Compressed %i KiB of data in loose odb in %f s ( %f Write KiB / s)" % (
            pool.size(), total_kib, elapsed, total_kib / elapsed)

        # decompress multiple at once, by reading them
        # chunk size is not important as the stream will not really be decompressed

        # until its read
        istream_reader = IteratorReader(iter([i.binsha for i in istreams]))
        ostream_reader = ldb.stream_async(istream_reader)

        chunk_task = TestStreamReader(ostream_reader, "chunker", None)
        output_reader = pool.add_task(chunk_task)
        output_reader.task().max_chunksize = 1

        st = time()
        assert len(output_reader.read(nsios)) == nsios
        elapsed = time() - st

        print >> sys.stderr, "Threads(%i): Decompressed %i KiB of data in loose odb in %f s ( %f Read KiB / s)" % (
            pool.size(), total_kib, elapsed, total_kib / elapsed)

        # store the files, and read them back. For the reading, we use a task
        # as well which is chunked into one item per task. Reading all will
        # very quickly result in two threads handling two bytestreams of
        # chained compression/decompression streams
        reader = IteratorReader(istream_iter())
        istream_reader = ldb.store_async(reader)
        istream_reader.task().max_chunksize = 1

        istream_to_sha = lambda items: [i.binsha for i in items]
        istream_reader.set_post_cb(istream_to_sha)

        ostream_reader = ldb.stream_async(istream_reader)

        chunk_task = TestStreamReader(ostream_reader, "chunker", None)
        output_reader = pool.add_task(chunk_task)
        output_reader.max_chunksize = 1

        st = time()
        assert len(output_reader.read(nsios)) == nsios
        elapsed = time() - st

        print >> sys.stderr, "Threads(%i): Compressed and decompressed and read %i KiB of data in loose odb in %f s ( %f Combined KiB / s)" % (
            pool.size(), total_kib, elapsed, total_kib / elapsed)