Esempio n. 1
0
    def test_decompress_reader(self):
        for close_on_deletion in range(2):
            for with_size in range(2):
                for ds in self.data_sizes:
                    cdata = make_bytes(ds, randomize=False)

                    # zdata = zipped actual data
                    # cdata = original content data

                    # create reader
                    if with_size:
                        # need object data
                        zdata = zlib.compress(make_object(
                            str_blob_type, cdata))
                        type, size, reader = DecompressMemMapReader.new(
                            zdata, close_on_deletion)
                        assert size == len(cdata)
                        assert type == str_blob_type

                        # even if we don't set the size, it will be set automatically on first read
                        test_reader = DecompressMemMapReader(
                            zdata, close_on_deletion=False)
                        assert test_reader._s == len(cdata)
                    else:
                        # here we need content data
                        zdata = zlib.compress(cdata)
                        reader = DecompressMemMapReader(
                            zdata, close_on_deletion, len(cdata))
                        assert reader._s == len(cdata)
                    # END get reader

                    self._assert_stream_reader(reader, cdata,
                                               lambda r: r.seek(0))

                    # put in a dummy stream for closing
                    dummy = DummyStream()
                    reader._m = dummy

                    assert not dummy.closed
                    del (reader)
                    assert dummy.closed == close_on_deletion
Esempio n. 2
0
	def test_compressed_writer(self):
		for ds in self.data_sizes:
			fd, path = tempfile.mkstemp()
			ostream = FDCompressedSha1Writer(fd)
			data = make_bytes(ds, randomize=False)
			
			# for now, just a single write, code doesn't care about chunking
			assert len(data) == ostream.write(data)
			ostream.close()
		
			# its closed already
			self.failUnlessRaises(OSError, os.close, fd)
			
			# read everything back, compare to data we zip
			fd = os.open(path, os.O_RDONLY|getattr(os, 'O_BINARY', 0))
			written_data = os.read(fd, os.path.getsize(path))
			assert len(written_data) == os.path.getsize(path)
			os.close(fd)
			assert written_data == zlib.compress(data, 1)	# best speed
			
			os.remove(path)
Esempio n. 3
0
    def test_compressed_writer(self):
        for ds in self.data_sizes:
            fd, path = tempfile.mkstemp()
            ostream = FDCompressedSha1Writer(fd)
            data = make_bytes(ds, randomize=False)

            # for now, just a single write, code doesn't care about chunking
            assert len(data) == ostream.write(data)
            ostream.close()

            # its closed already
            self.failUnlessRaises(OSError, os.close, fd)

            # read everything back, compare to data we zip
            fd = os.open(path, os.O_RDONLY | getattr(os, 'O_BINARY', 0))
            written_data = os.read(fd, os.path.getsize(path))
            assert len(written_data) == os.path.getsize(path)
            os.close(fd)
            assert written_data == zlib.compress(data, 1)  # best speed

            os.remove(path)
Esempio n. 4
0
	def test_decompress_reader(self):
		for close_on_deletion in range(2):
			for with_size in range(2):
				for ds in self.data_sizes:
					cdata = make_bytes(ds, randomize=False)
					
					# zdata = zipped actual data
					# cdata = original content data
					
					# create reader
					if with_size:
						# need object data
						zdata = zlib.compress(make_object(str_blob_type, cdata))
						type, size, reader = DecompressMemMapReader.new(zdata, close_on_deletion)
						assert size == len(cdata)
						assert type == str_blob_type
						
						# even if we don't set the size, it will be set automatically on first read
						test_reader = DecompressMemMapReader(zdata, close_on_deletion=False)
						assert test_reader._s == len(cdata)
					else:
						# here we need content data
						zdata = zlib.compress(cdata)
						reader = DecompressMemMapReader(zdata, close_on_deletion, len(cdata))
						assert reader._s == len(cdata)
					# END get reader 
					
					self._assert_stream_reader(reader, cdata, lambda r: r.seek(0))
					
					# put in a dummy stream for closing
					dummy = DummyStream()
					reader._m = dummy
					
					assert not dummy.closed
					del(reader)
					assert dummy.closed == close_on_deletion