def run(self, obj, config): self.config = config self.obj = obj user = self.current_task.user data = io.BytesIO(obj.filedata.read()) swf = bytearray() try: comp = data.read(3) header = data.read(5) if comp == 'CWS': swf = 'FWS' + header + zlib.decompress(data.read()) if comp == 'ZWS': data.seek(12) # seek to LZMA props swf = 'FWS' + header + pylzma.decompress(data.read()) except Exception as exc: self._error("unswf: (%s)." % exc) return if swf: h = md5(str(swf)).hexdigest() name = h if not user.has_access_to(SampleACL.WRITE): self._info("User does not have permission to add Samples to CRITs") self._add_result("Extract Canceled", "User does not have permission to add Samples to CRITs") return self._info("New file: %s (%d bytes, %s)" % (name, len(swf), h)) handle_file(name, swf, self.obj.source, related_id=str(self.obj.id), related_type=str(self.obj._meta['crits_type']), campaign=self.obj.campaign, method=self.name, relationship=RelationshipTypes.RELATED_TO, user=self.current_task.user) self._add_result("file_added", name, {'md5': h})
def _decompressSWF(f, swf_size): magic = f.read(3) if magic == "CWS": try: header = "FWS" + f.read(5) data = zlib.decompress(f.read())[:swf_size-8] return header + data except (QuitScanException, GlobalScanTimeoutError, GlobalModuleTimeoutError): raise except Exception: return "ERROR" finally: logging.debug("extract_swf - closing stringio handle in decompress") f.close() elif magic == "ZWS": try: header = "FWS" + f.read(5) f.seek(12) data = pylzma.decompress(f.read())[:swf_size-8] return header + data except (QuitScanException, GlobalScanTimeoutError, GlobalModuleTimeoutError): raise except Exception: return "ERROR" finally: logging.debug("extract_swf - closing stringio handle in decompress") f.close() else: return None
def test_compression_decompression_noeos(self): # call compression and decompression on random data of various sizes for i in range(18): size = 1 << i original = generate_random(size) result = pylzma.decompress(pylzma.compress(original, eos=0), maxlength=size) self.assertEqual(md5(original).hexdigest(), md5(result).hexdigest())
def read(self, filename): if filename in self.nsis_header.files: data = None (foff, ftime, extract_type) = self.nsis_header.files[filename] if self.case_type == 1: # case 1: 설치 파일 전부를 압축한 경우 # print '#Case 1' # print hex(foff) # print hex(kavutil.get_uint32(self.body_data, foff) & 0x7fffffff) fsize = kavutil.get_uint32(self.body_data, foff) & 0x7fffffff return self.body_data[foff + 4:foff + 4 + fsize] elif self.case_type == 2: # case 2: 개별로 압축한 경우 # print '#Case 2' # print hex(foff) # print hex(kavutil.get_uint32(self.body_data, foff) & 0x7fffffff) fsize = kavutil.get_uint32(self.body_data, foff) & 0x7fffffff fdata = self.body_data[foff + 4:foff + 4 + fsize] comp_type = self.__get_comp_type(kavutil.get_uint32(fdata, 0)) # print comp_type if comp_type == self.TYPE_LZMA: try: # 전체 압축한 경우인지 확인해 본다. data = pylzma.decompress(fdata) except TypeError: pass elif comp_type == self.TYPE_ZLIB: try: data = zlib.decompress(fdata, -15) except zlib.error: pass return data else: return None
def load(path_stack, compression="gzip"): """ Load Stack instance from .stack file. Parameters ---------- path_stack : str The full path to the .stack file that should be created, including the extension. compression : {'gzip', 'lzma'}, default 'gzip' The compression type that has been used saving the file. Returns ------- None """ if not path_stack.endswith('.stack'): raise ValueError( "To avoid ambiguity, when using Stack.load() you must provide the full path to " "the stack file you want to create, including the file extension. For example: " "stack.load(path_stack='./output/MyStack.stack'). Your call looks like this: " "stack.load(path_stack='%s', ...)" % (path_stack) ) if compression is None: f = open(path_stack, 'rb') elif compression.lower() == "lzma": f = pylzma.decompress(open(path_stack, 'rb')) # there seems to be a problem here! else: f = gzip.open(path_stack, 'rb') new_stack = cPickle.load(f) f.close() return new_stack
def unzlib(id): body, sz = get_response_and_size(id, "all") obj_num = -1 name = "" if check_errors(): return name = get_name(id) # decomp = gzip.GzipFile('', 'rb', 9, StringIO.StringIO(body)) # page = decomp.read() inData = body if inData[0] == 'C': # zlib SWF decompressData = zlib.decompress(inData[8:]) elif inData[0] == 'Z': # lzma SWF decompressData = pylzma.decompress(inData[12:]) elif inData[0] == 'F': # uncompressed SWF decompressData = inData[8:] else: print 'not a SWF file' return obj_num, name sigSize = struct.unpack("<I", inData[4:8])[0] decompressSize = len(decompressData) + 8 if sigSize != decompressSize: print 'Length not correct, decompression failed' else: header = list(struct.unpack("<8B", inData[0:8])) header[0] = ord('F') page = struct.pack("<8B", *header) + decompressData obj_num = add_object("unzlib", page, id=id) return obj_num, name
def read(self, filename): if filename in self.nsis_header.files: data = None (foff, ftime, extract_type) = self.nsis_header.files[filename] if self.case_type == 1: # case 1: 설치 파일 전부를 압축한 경우 # print '#Case 1' # print hex(foff) # print hex(kavutil.get_uint32(self.body_data, foff) & 0x7fffffff) fsize = kavutil.get_uint32(self.body_data, foff) & 0x7fffffff return self.body_data[foff+4:foff+4+fsize] elif self.case_type == 2: # case 2: 개별로 압축한 경우 # print '#Case 2' # print hex(foff) # print hex(kavutil.get_uint32(self.body_data, foff) & 0x7fffffff) fsize = kavutil.get_uint32(self.body_data, foff) & 0x7fffffff fdata = self.body_data[foff+4:foff+4+fsize] comp_type = self.__get_comp_type(kavutil.get_uint32(fdata, 0)) # print comp_type if comp_type == self.TYPE_LZMA: try: # 전체 압축한 경우인지 확인해 본다. data = pylzma.decompress(fdata) except TypeError: pass elif comp_type == self.TYPE_ZLIB: try: data = zlib.decompress(fdata, -15) except zlib.error: pass return data else: return None
def unpack(data): """ Unpack data from ddsx and returns it. If data have wrong header, prints error and exit(1). Return unpacked dds data, ready for saving. :param data: ddsx data """ header_format = struct.unpack_from('4s', data, 0x4)[0] if header_format not in ddsx_types: print 'wrong ddsx type:', header_format exit(1) dds_height = struct.unpack_from('H', data, 0xc)[0] dds_width = struct.unpack_from('H', data, 0xe)[0] dds_mipmapcount = struct.unpack_from('B', data, 0x10)[0] dds_unpacked_body_size = struct.unpack_from('I', data, 0x18)[0] dds_body_size = struct.unpack_from('I', data, 0x1c)[0] ddsx_unknown_flag_0 = struct.unpack_from('B', data, 0xa) if ddsx_unknown_flag_0 in [0, 1]: pass # all unpack ok 11 c0 01 40, 11 40 01 40, 11 40 00 40 dds_data = ctypes.create_string_buffer(0x80) struct.pack_into('128B', dds_data, 0, *dds_header) struct.pack_into('I', dds_data, 0xc, dds_width) struct.pack_into('I', dds_data, 0x10, dds_height) struct.pack_into('I', dds_data, 0x14, dds_unpacked_body_size) struct.pack_into('B', dds_data, 0x1c, dds_mipmapcount) struct.pack_into('4s', dds_data, 0x54, header_format) if dds_body_size == 0: # not packed return dds_data.raw + data[0x20:] elif struct.unpack_from('I', data, 0x20)[0] == 0x1000005d: # packed with lzma return dds_data.raw + pylzma.decompress(data[0x20:], maxlength=dds_unpacked_body_size) else: # packed with zlib return dds_data.raw + zlib.decompress(data[0x20:])
def unzip(inData): if inData[0] == 'C': # zlib SWF debug('zlib compressed swf detected.') decompressData = zlib.decompress(inData[8:]) elif inData[0] == 'Z': # lzma SWF debug('lzma compressed swf detected.') decompressData = pylzma.decompress(inData[12:]) elif inData[0] == 'F': # uncompressed SWF debug('Uncompressed swf detected.') decompressData = inData[8:] else: exit('not a SWF file') sigSize = struct.unpack("<I", inData[4:8])[0] debug('Filesize in signature: %s' % sigSize) decompressSize = len(decompressData) + 8 debug('Filesize decompressed: %s' % decompressSize) check((sigSize == decompressSize), 'Length not correct, decompression failed') header = list(struct.unpack("<8B", inData[0:8])) header[0] = ord('F') debug('Generating uncompressed data') return struct.pack("<8B", *header) + decompressData
def parse(self, data): """ Parses the SWF. The @data parameter can be a file object or a SWFStream """ self._data = data = data if isinstance(data, SWFStream) else SWFStream(data) self._header = SWFHeader(self._data) if self._header.compressed: temp = StringIO.StringIO() if self._header.compressed_zlib: import zlib data = data.f.read() zip = zlib.decompressobj() temp.write(zip.decompress(data)) else: import pylzma data.readUI32() #consume compressed length data = data.f.read() temp.write(pylzma.decompress(data)) temp.seek(0) data = SWFStream(temp) self._header._frame_size = data.readRECT() self._header._frame_rate = data.readFIXED8() self._header._frame_count = data.readUI16() self.parse_tags(data)
def unzip(inData): if inData[0] == 'C': # zlib SWF debug('zlib compressed swf detected.') decompressData = zlib.decompress(inData[8:]) elif inData[0] == 'Z': # lzma SWF debug('lzma compressed swf detected.') decompressData = pylzma.decompress(inData[12:]) elif inData[0] == 'F': # uncompressed SWF debug('Uncompressed swf detected.') decompressData = inData[8:] else: print('not a SWF file') sigSize = struct.unpack("<I", inData[4:8])[0] debug('Filesize in signature: %s' % sigSize) decompressSize = len(decompressData) +8 debug('Filesize decompressed: %s' % decompressSize) check((sigSize == decompressSize), 'Length not correct, decompression failed') header = list(struct.unpack("<8B", inData[0:8])) header[0] = ord('F') debug('Generating uncompressed data') return struct.pack("<8B", *header)+decompressData
def load(path_stack, compression="gzip"): """ Load Stack instance from .stack file. Parameters ---------- path_stack : str The full path to the .stack file that should be created, including the extension. compression : {'gzip', 'lzma'}, default 'gzip' The compression type that has been used saving the file. Returns ------- None """ if not path_stack.endswith('.stack'): raise ValueError( "To avoid ambiguity, when using Stack.load() you must provide the full path to " "the stack file you want to create, including the file extension. For example: " "stack.load(path_stack='./output/MyStack.stack'). Your call looks like this: " "stack.load(path_stack='%s', ...)" % (path_stack)) if compression is None: f = open(path_stack, 'rb') elif compression.lower() == "lzma": f = pylzma.decompress(open( path_stack, 'rb')) # there seems to be a problem here! else: f = gzip.open(path_stack, 'rb') new_stack = cPickle.load(f) f.close() return new_stack
def unzlib(id): body, sz = get_response_and_size(id, "all") obj_num = -1 name = "" if check_errors(): return name = get_name(id) # decomp = gzip.GzipFile('', 'rb', 9, StringIO.StringIO(body)) # page = decomp.read() inData = body if inData[0] == 'C': # zlib SWF decompressData = zlib.decompress(inData[8:]) elif inData[0] == 'Z': # lzma SWF decompressData = pylzma.decompress(inData[12:]) elif inData[0] == 'F': # uncompressed SWF decompressData = inData[8:] else: print 'not a SWF file' return obj_num, name sigSize = struct.unpack("<I", inData[4:8])[0] decompressSize = len(decompressData) +8 if sigSize != decompressSize: print 'Length not correct, decompression failed' else: header = list(struct.unpack("<8B", inData[0:8])) header[0] = ord('F') page = struct.pack("<8B", *header)+decompressData obj_num = add_object("unzlib",page,id=id) return obj_num, name
def unpackHistogramCollection(buf, decode=True, decompress=True): """ Unpack a collection of histograms """ # Dencode if asked if decode: buf = base64.b64decode(buf) # Deompress if asked if decompress: buf = pylzma.decompress(buf) # Read header (ver, numHistograms) = struct.unpack("<BI", buf[:5]) p = 5 # Start piling histograms hc = HistogramCollection() for i in range(0,numHistograms): # Read histogram and offset position (histo, p) = unpackHistogram(buf,p) # Append histogram in collection hc.append( histo ) # Return collection return hc
def run(self, obj, config): self.config = config self.obj = obj data = io.BytesIO(obj.filedata.read()) swf = bytearray() try: comp = data.read(3) header = data.read(5) if comp == 'CWS': swf = 'FWS' + header + zlib.decompress(data.read()) if comp == 'ZWS': data.seek(12) # seek to LZMA props swf = 'FWS' + header + pylzma.decompress(data.read()) except Exception as exc: self._error("unswf: (%s)." % exc) return if swf: h = md5(str(swf)).hexdigest() name = h self._info("New file: %s (%d bytes, %s)" % (name, len(swf), h)) handle_file(name, swf, self.obj.source, related_id=str(self.obj.id), related_type=str(self.obj._meta['crits_type']), campaign=self.obj.campaign, method=self.name, relationship=RelationshipTypes.RELATED_TO, user=self.current_task.username) self._add_result("file_added", name, {'md5': h})
def run(self, obj, config): self.config = config self.obj = obj data = io.BytesIO(obj.filedata.read()) swf = bytearray() try: comp = data.read(3) header = data.read(5) if comp == 'CWS': swf = 'FWS' + header + zlib.decompress(data.read()) if comp == 'ZWS': data.seek(12) # seek to LZMA props swf = 'FWS' + header + pylzma.decompress(data.read()) except Exception as exc: self._error("unswf: (%s)." % exc) return if swf: h = md5(str(swf)).hexdigest() name = h self._info("New file: %s (%d bytes, %s)" % (name, len(swf), h)) handle_file(name, swf, self.obj.source, related_id=str(self.obj.id), campaign=self.obj.campaign, method=self.name, relationship='Related_To', user=self.current_task.username) self._add_result("file_added", name, {'md5': h})
def __getitem__(self, key): from vyperlogix.crypto.Encryptors import Encryptors try: val = self.__db[key] if (not self.isPickleMethodUseSafeSerializer): if (self.isPickleMethodUseZLIB): val = zlib.decompress(val) elif (self.isPickleMethodUseLZMA): val = pylzma.decompress(val) if (self.isPickleMethodUseStrings): val = val.split(',') if (len(val) > 1): val = [ tuple(x.split('|')) if x.find('|') > -1 else x for x in val ] elif (self.isPickleMethodUseBsdDbShelf): val = self.unPickleItem(val) elif (self.isPickleMethodUseMarshal): val = marshal.loads(hexToStr(val)) elif (self.isPickleMethodUseSafeSerializer): try: val = loads(val, beSilent=True) except: pass if (isinstance(val, dict)): d_val = HashedLists2() d_val.fromDict(val) val = d_val elif (self.isPickleMethodUseCerealizer): val = cerealizer.loads(val) except Exception as details: val = 'UNKNOWN value for key (%s) of type "%s" due to ERROR "%s".' % ( key, str(key.__class__), str(details)) return val
def uncompress(self, filename=None, data=None): # if data has already been uncompressed, return it if hasattr(self, '__data__'): return self.__data__ if filename == None and data == None: return None if not filename == None: self.__data__ = open(filename, 'rb').read() else: self.__data__ = data if self.__data__[:3] == 'FWS': self.compressed = False return self.__data__ if self.__data__[:3] == 'ZWS': self.compressed = True rawdata = pylzma.decompress(self.__data__[12:]) elif self.__data__[:3] == 'CWS': self.compressed = True rawdata = zlib.decompress(self.__data__[8:]) else: raise SwfFormatError('Unexpected magic string, not a Flash file.') swfdata = 'FWS' + self.__data__[3] + pack('I', len(rawdata) + 8) + rawdata return swfdata
def _decompressSWF(f, swf_size): magic = f.read(3) if magic == "CWS": try: header = "FWS" + f.read(5) data = zlib.decompress(f.read())[:swf_size - 8] return header + data except (QuitScanException, GlobalScanTimeoutError, GlobalModuleTimeoutError): raise except Exception: return "ERROR" finally: logging.debug( "extract_swf - closing stringio handle in decompress") f.close() elif magic == "ZWS": try: header = "FWS" + f.read(5) f.seek(12) data = pylzma.decompress(f.read())[:swf_size - 8] return header + data except (QuitScanException, GlobalScanTimeoutError, GlobalModuleTimeoutError): raise except Exception: return "ERROR" finally: logging.debug( "extract_swf - closing stringio handle in decompress") f.close() else: return None
def test_compression_decompression_eos(self): # call compression and decompression on random data of various sizes for i in xrange(18): size = 1 << i original = generate_random(size) result = pylzma.decompress(pylzma.compress(original, eos=1)) self.assertEqual(len(result), size) self.assertEqual(md5.new(original).hexdigest(), md5.new(result).hexdigest())
def decom_lzma_algm(file_name): import pylzma outfilename = file_name + '.decomp' try: with open(file_name, 'rb') as f, open(outfilename, 'wb') as out: out.write(pylzma.decompress(f.read())) finally: out.close()
def unLZMAzipfile(file): f = open(file, 'rb+') line = f.read() result = pylzma.decompress(line) f.seek(0) f.truncate() f.write(result) f.close()
def _read_config(self): """Fetch the device configuration descriptor from the device.""" #Is the crc necessary here? self._ser.write(b'\\#\x00\x00\x00\x00') (clen,) = struct.unpack(">H", self._my_ser_read(2)) cbytes = self._my_ser_read(clen) self._my_ser_read(2) #read and ignore the not-yet-crc return json.JSONDecoder().decode(str(pylzma.decompress(cbytes), "UTF-8"))
def lzma_decompress(data): """ LZMA decompression using pylzma. The LZMA header consists of 5 + 8 bytes. The first 5 bytes are compression parameters, the 8 following bytes specify the length of the data. """ # unpack the data length from the LZMA header (bytes # 6-13 inclusively/unsigned long long int) coded_length = struct.unpack('<Q', data[5:13])[0] if coded_length == '\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF': # if the length is -1 (in two's complement), there is an EOS marker # marking the end of the data and pylzma doesn't need the coded_length return pylzma.decompress(data[:5] + data[13:]) else: # if the length is specified, pylzma needs the coded_length since there probably # is no EOS marker return pylzma.decompress(data[:5] + data[13:], maxlength=coded_length)
def run(self): if self.compresslib=="lzma": self.datadecompressed=pylzma.decompress(self.data) elif self.compresslib=="zlib": self.datadecompressed=zlib.decompress(self.data) elif self.compresslib=="bz2": self.datadecompressed=bz2.decompress(self.data) elif self.compresslib=="none": self.datadecompressed=self.data
def checkCompression(file): con = dbapi2.connect(file) con.text_factory = str cur = con.execute('SELECT id, contents FROM articles') for id, contents in cur: #print contents dummy = decompress(contents) print dummy
def finish(): time.sleep(0.5) try: decompressed = pylzma.decompress(working_on) except TypeError, e: save_to.close() rospy.logerr("Failed to decompress file: {}".format(e.message)) saved.publish("") return False
def run(self): if self.compresslib == "lzma": self.datadecompressed = pylzma.decompress(self.data) elif self.compresslib == "zlib": self.datadecompressed = zlib.decompress(self.data) elif self.compresslib == "bz2": self.datadecompressed = bz2.decompress(self.data) elif self.compresslib == "none": self.datadecompressed = self.data
def checkCompression(file): con=dbapi2.connect(file) con.text_factory=str cur=con.execute('SELECT id, contents FROM articles') for id, contents in cur: #print contents dummy=decompress(contents) print dummy
def test_matchfinders(self): # use different matchfinder algorithms for compression matchfinders = ['bt2', 'bt3', 'hc4'] original = 'hello world' for mf in matchfinders: result = pylzma.decompress(pylzma.compress(original, matchfinder=mf)) self.assertEqual(original, result) self.failUnlessRaises(TypeError, pylzma.compress, original, matchfinder='1234')
def pickleLoad(fname): ''' General routine for restoring pickled and lzma-compressed data. ''' with open(fname, 'rb') as f: pickled = f.read() pickled = pylzma.decompress(pickled) df = cPickle.loads(pickled) return df
def test_compression_decompression_eos(self): # call compression and decompression on random data of various sizes for i in range(18): size = 1 << i original = generate_random(size) result = pylzma.decompress(pylzma.compress(original, eos=1)) self.assertEqual(len(result), size) self.assertEqual( md5(original).hexdigest(), md5(result).hexdigest())
def decompress(flag, compressed): if flag == CFLCOMPRESS_NONE: return compressed if flag == CFLCOMPRESS_LZMA: try: return pylzma.decompress(compressed) except (TypeError, ValueError) as e: raise InvalidCFLError(e) else: raise InvalidCFLError('Unsupported flag %r' % (flag,))
def test_compression_decompression_noeos(self): # call compression and decompression on random data of various sizes for i in xrange(18): size = 1 << i original = generate_random(size) result = pylzma.decompress(pylzma.compress(original, eos=0), maxlength=size) self.assertEqual( md5.new(original).hexdigest(), md5.new(result).hexdigest())
def updateDB(self, updatedDB, type): from time import time buffer = pylzma.decompress(updatedDB) f = file("./%s/data/%s"%(GAMEROOT,self.auctionDB),"wb") f.write(buffer) f.close() self.lastUpdate = time() if (type == 1): #refresh search results self.Search(100) else: #refresh selling list results self.getCharacterSellList(100)
def test_compression_file(self): # test compressing from file-like object (C class) infile = BytesIO(self.plain) outfile = BytesIO() compress = pylzma.compressfile(infile, eos=1) while 1: data = compress.read(1) if not data: break outfile.write(data) check = pylzma.decompress(outfile.getvalue()) self.assertEqual(check, self.plain)
def decode_post(data): js = pylzma.decompress(data).decode("utf-8") np = json.loads(js) mape = dict((v,k) for k,v in __pmap().items()) post = dict() for k,v in np.items(): nk = k if(k in mape): nk = mape[k] post[nk] = v return post
def test_compression_file_python(self): # test compressing from file-like object (Python class) infile = PyStringIO(self.plain) outfile = PyStringIO() compress = pylzma.compressfile(infile, eos=1) while 1: data = compress.read(1) if not data: break outfile.write(data) check = pylzma.decompress(outfile.getvalue()) self.assertEqual(check, self.plain)
def updateDB(self, updatedDB, type): from time import time buffer = pylzma.decompress(updatedDB) f = file("./%s/data/%s" % (GAMEROOT, self.auctionDB), "wb") f.write(buffer) f.close() self.lastUpdate = time() if (type == 1): #refresh search results self.Search(100) else: #refresh selling list results self.getCharacterSellList(100)
def get(self, request, format=None): filename = request.GET['filename'] image = Images.objects.filter(filename=filename).first() if image is None: return Response({'detail': 'Invalid Filename'}, status=404) namef = open(settings.IMAGES_ROOT + '/' + filename + '.' + 'compr', 'r+') filecontent = namef.read() uncomp = pylzma.decompress(filecontent) ext = filename.rsplit('.', 1)[1].lower() return HttpResponse(uncomp, content_type='image/' + ext)
def uncompress_lzma(self, data): """uncompress lzma compressed stream""" if not self.lzma_install: if self.show_errors: print("\t[ERROR] pylzma module not installed - aborting validation/decompression") return None else: data = data[4:] try: return pylzma.decompress(data) except Exception: return None
def LzmaDec(cmpData, orgDataSize, orgBinSize): # cut binary data tail & decompress try: orgData = decompress(cmpData)[:orgDataSize] except: GlobalMsg.panic('LZMA internal error, decompression failed') # cut bin string tail binStr = BitArray(bytes = orgData).bin[2:2+orgBinSize] # return original bin string return binStr
def decompress_json(data): """Replaces all short abbreviations with real field names""" try: data = pylzma.decompress( base64.urlsafe_b64decode( str(data) ) ) except (py7zlib.ArchiveError, TypeError): return else: #Extend field names for full_field, cut_field in COMPRESS_MAPPING: data = data.replace(cut_field, full_field) return data
def test_compression_file_python(self): # test compressing from file-like object (Python class) from StringIO import StringIO as PyStringIO infile = PyStringIO(self.plain) outfile = PyStringIO() compress = pylzma.compressfile(infile, eos=1) while 1: data = compress.read(1) if not data: break outfile.write(data) check = pylzma.decompress(outfile.getvalue()) self.assertEqual(check, self.plain)
def _test_compression_streaming(self): # test compressing with one byte at a time... # XXX: disabled as LZMA doesn't support streaming compression yet compress = pylzma.compressobj(eos=1) infile = BytesIO(self.plain) outfile = BytesIO() while 1: data = infile.read(1) if not data: break outfile.write(compress.compress(data, 1)) outfile.write(compress.flush()) check = pylzma.decompress(outfile.getvalue()) self.assertEqual(check, self.plain)
def get(self): ''' 读取解压数据并返回使用时间 'return'第一个为数据,第二个为时间 ''' logging.info('start decompressing') start_time=time.time() data=pylzma.decompress(self.__file) end_time=time.time() use_time=end_time-start_time logging.info('decompress complete,using time:{}'.format(use_time)) return data,use_time
def uncompress_lzma(self, data): 'uncompress lzma compressed stream' if self.lzma_installed == False: if self.show_errors: print "\t[ERROR] pylzma module not installed - aborting validation/decompression" return None else: data = data[4:] try: import pylzma return pylzma.decompress(data) except: return None
def _test_compression_streaming(self): # XXX: this doesn't work, yet # test compressing with one byte at a time... compress = pylzma.compressobj(eos=1) infile = StringIO(self.plain) outfile = StringIO() while 1: data = infile.read(1) if not data: break outfile.write(compress.compress(data, 1)) outfile.write(compress.flush()) check = pylzma.decompress(outfile.getvalue()) self.assertEqual(check, self.plain)
def test_matchfinders(self): # use different matchfinder algorithms for compression matchfinders = ['bt2', 'bt3', 'hc4'] original = 'hello world' for mf in matchfinders: result = pylzma.decompress( pylzma.compress(original, matchfinder=mf)) self.assertEqual(original, result) self.failUnlessRaises(TypeError, pylzma.compress, original, matchfinder='1234')
def decompress_lzma(self): self.flash.seek(12) self.lzma_decompressed = pylzma.decompress(self.flash.read1(self.file_factor)) c_lzma_file = io.BytesIO() c_lzma_file.write(b'FWS') c_lzma_file.write(struct.pack("<B", self.version)) c_lzma_file.write(struct.pack("<I", self.file_size)) c_lzma_file.write(self.lzma_decompressed) c_lzma_file.seek(0) return c_lzma_file
def EXTRACT_SWF(s, buff): SWF = {} magic = buff[:3] data = '' if magic == 'CWS': SWF['Buffer'] = 'FWS' + buff[3:8] + zlib.decompress(buff[8:]) elif magic == 'ZWS': SWF['Buffer'] = 'FWS' + buff[3:8] + pylzma.decompress(buff[12:]) elif magic == 'FWS': SWF['Version'] = ord(buff[3]) return SWF
def ConvertTextMessageToMavlink(self, TextMessage): ###################################################################################### # # Summary: Takes a text message's payload text, unBase64's it, decompresses it with LZMA, # parses out the multiple Mavlink messages within the buffer, and a returns a list of # decoded Mavlink messages. Base64 is used to make the text buffer url/sms/email safe. # ###################################################################################### try: DecodedMavlinkBuffer = base64.b64decode(TextMessage) DecompressedMavlinkBuffer = pylzma.decompress(DecodedMavlinkBuffer) ListOfMavlinkMessages = self._MavlinkHelperObject.parse_buffer(DecompressedMavlinkBuffer) return ListOfMavlinkMessages except Exception, e: self.Logger("Exception in ConvertTextMessagetoMavlink: "+str(e), message_importance=1) return None
def _file_decode(cls, data): """{z,b,l}{j,p}<data_len><pad><DATA>""" alg = data[0] header = data[1] idx = data.find(cls.PADDING) size = int(data[2:idx]) data = data[idx + 1:idx + 1 + size] if alg == cls.GZIP: data = data.decode('zlib') elif alg == cls.BZ2: data = data.decode('bz2') elif alg == cls.LZMA: data = pylzma.decompress(data) if header == cls.JSON: data = json.loads(data) if header == cls.PICKLE: data = pickle.loads(data) return data