def read_zlib_chunks(read_some, dec_size, buffer_size=4096): """Read zlib data from a buffer. This function requires that the buffer have additional data following the compressed data, which is guaranteed to be the case for git pack files. :param read_some: Read function that returns at least one byte, but may return less than the requested size :param dec_size: Expected size of the decompressed buffer :param buffer_size: Size of the read buffer :return: Tuple with list of chunks, length of compressed data length and and unused read data. :raise zlib.error: if a decompression error occurred. """ if dec_size <= -1: raise ValueError("non-negative zlib data stream size expected") obj = zlib.decompressobj() ret = [] fed = 0 size = 0 while obj.unused_data == "": add = read_some(buffer_size) if not add: raise zlib.error("EOF before end of zlib stream") fed += len(add) decomp = obj.decompress(add) size += len(decomp) ret.append(decomp) if size != dec_size: raise zlib.error("decompressed data does not match expected size") comp_len = fed - len(obj.unused_data) return ret, comp_len, obj.unused_data
def read(self, size: int = -1) -> bytes: if size < 0: return self.readall() if not size: return b'' return_data = b'' stream_position = self._stream.tell() while True: if self._decompressor.eof: break read_size = size - len(return_data) if read_size == 0: break compressed_chunk = self._decompressor.unconsumed_tail or self._stream.read( self._buffer_size) if compressed_chunk == b'': break try: decompressed_part = self._decompressor.decompress( compressed_chunk, read_size) except zlib.error as e: self._stream.seek(stream_position) raise zlib.error('Corrupted stream') from e return_data += decompressed_part return return_data
def decompress(head_: bytes, handle_: BytesIO) -> Iterator[bytes]: # try several most common offsets offsets = [zlib.MAX_WBITS, -zlib.MAX_WBITS, zlib.MAX_WBITS | 16] for offset in offsets: with suppress(zlib.error): dec = zlib.decompressobj(offset) first = dec.decompress(head_) return (F(chunk) >> (map, dec.decompress) >> (chain, [first]) >> (filter, bool))(handle_) raise zlib.error('failed decompression')
def decompress(self, stream): try: dec = create_decompressor() for chunk in stream: rv = dec.decompress(chunk) if rv: yield rv if dec.unused_data: stream.unshift(dec.unused_data) dec = create_decompressor() except zlib.error as e: msg = str(e) if msg.startswith('Error -3 '): msg += ". Use NoDecompressor if you're using uncompressed input." six.reraise(zlib.error, zlib.error(msg), sys.exc_info()[2])
def test_decompress_zlib_error(self, mock_zlib): the_error = zlib.error("TEST ERROR") def raiser(*args, **kwargs): raise the_error mock_zlib.decompress.side_effect = raiser # put this back. mock_zlib.error = zlib.error input = mock.Mock(name='compressed') with self.assertRaises(cache.CacheDecodeError) as cm: self._cls.decompress(input) self.assertIs(cm.exception.from_err, the_error)
def test_decompress_pkg_index_gz_failure(self): """ Test decompression for Packages.gz file failure handling. :return: """ zdcmp = MagicMock(side_effect=zlib.error("Firewall currently too hot")) xdcmp = MagicMock(side_effect=lzma.LZMAError("")) with patch("spacewalk.common.repo.zlib.decompress", zdcmp) as m_zlib, \ patch("spacewalk.common.repo.lzma.decompress", xdcmp) as m_lzma: with pytest.raises(GeneralRepoException) as exc: DpkgRepo("http://dummy_url").decompress_pkg_index() assert not xdcmp.called assert zdcmp.called assert "hot" in str(exc.value)
def _read_blocks(self): f = self.f self.loc = self.header_size data = b'' for n in range(self.nblocks): block_size = b2i(f.read(WORD)) block_size_decomp = b2i(f.read(WORD)) self.loc += DWORD raw = f.read(block_size) # Have to use Decompression obj rather than the decompress() func. # This avoids 'incomplete or truncated stream' errors # dat = zlib.decompress(raw, 15, block_size_decomp) d = zlib.decompressobj() dat = d.decompress(raw, block_size_decomp) if len(dat) != block_size_decomp: raise zlib.error( "Decompressed data size does not match expected size.") data += dat self._parse_blocks(data)
def eq4(): irc.send(chan=channel, msg='!ep4') text = refresh() m = re.search('(?<= botep \:)(.*)', str(text)) if m is not None: try: data = str(m.group(1)) print(data) print('[+] catched ' + str(data)) decode = base64.b64decode(data) print('[+] decoded ' + decode) uncompress = zlib.decompress(decode) print('[+] decompressed ' + uncompress) forge = '!ep4 -rep ' + str(uncompress) print('[+] Sending back ' + str(forge)) irc.send(chan=channel, msg=forge) except zlib.error(): print("[-] Zlib error :'( )'") time.sleep(1)
def bad_open(*args, **kwargs): raise zlib.error('bad tar')
def flush(self) -> None: raise zlib.error()
def good_case1(): """zlib.error is defined in C module.""" import zlib raise zlib.error(4)
def test_generic_zlib_error(self): from zlib import error msg = "Error -3 while decompressing data: incorrect data check" self.assertTranslationEqual(('error', 'zlib.error', msg), error(msg))
def flush(self): raise zlib.error()
def compress(self, *args, **kwargs): raise zlib.error()
def scan_next(self, last_scan_result, current_path, next_path): lookup_key = "{current_path}->{next_path}".format( current_path=current_path, next_path=next_path) # print(f"testing {lookup_key}") conti = self.lookup.get(lookup_key, True) if not conti: return False, None, None, None, None else: is_single_header = last_scan_result[0] last_file_data = copy.deepcopy(last_scan_result[1]) data_to_skip = copy.deepcopy(last_scan_result[2]) decomp_obj = last_scan_result[3].copy() data = last_file_data total_read = 0 with open(next_path, "rb") as f: try: while True: decomp_buf = bytearray() buf = f.read(self.DEFAULT_TRUNK_SIZE) self.total_read_in_bytes += self.DEFAULT_TRUNK_SIZE total_read += self.DEFAULT_TRUNK_SIZE if len(buf) == 0: # reach the file end return True, data, data_to_skip, decomp_obj, self.TAR_BODY_MARK try: decomp_buf = decomp_obj.decompress(buf) except zlib.error: # self.lookup[lookup_key] = False # return False raise zlib.error("GZ decompress fail.") data.extend(decomp_buf) if data_to_skip > len(data): continue elif data_to_skip > 0: data = data[data_to_skip:] reach_end = False while not reach_end: if len(data) >= self.TAR_HEAD_SIZE: header = self.parse_tar_header_simple( data[0:self.TAR_HEAD_SIZE]) pl_size = math.ceil( header['size'] / self.TAR_BLOCK_SIZE) * self.TAR_BLOCK_SIZE tar_entry_size = self.TAR_HEAD_SIZE + pl_size if tar_entry_size < len(data): data = data[tar_entry_size:] else: data_to_skip = tar_entry_size - len(data) data = bytearray() reach_end = True else: data_to_skip = 0 reach_end = True except tarfile.InvalidHeaderError as err: if not any(data): self.logger.info("reaches the last file") return True, bytearray( ), 0, decomp_obj, self.TAR_TAIL_MARK else: # print("look like this file is not continuous") # print(err) return False, None, -1, None, None except zlib.error as zerr: # print("GZ decompress error") return False, None, -1, None, None
# For further info, check http://launchpad.net/filesync-server """Resumable decompression A ctypes interface to zlib decompress/inflate functions that mimics zlib.decompressobj interface but also supports getting and setting the z_stream state to suspend/serialize it and then resume the decompression at a later time. """ import cPickle import ctypes import zlib if zlib.ZLIB_VERSION != '1.2.3.3' and zlib.ZLIB_VERSION != '1.2.3.4': raise zlib.error("zlib version not supported: %s" % (zlib.ZLIB_VERSION)) if zlib.ZLIB_VERSION == '1.2.3.3': # from inftrees.h ENOUGH = 2048 elif zlib.ZLIB_VERSION == '1.2.3.4': ENOUGH_LENS = 852 ENOUGH_DISTS = 592 ENOUGH = ENOUGH_LENS + ENOUGH_DISTS # from inflate.h #/* # gzip header information passed to and from zlib routines. See RFC 1952 # for more details on the meanings of these fields. #*/