Esempio n. 1
0
	def get_graph(self,xp_uuid=None,xp_cfg=None,method="srtheo",tmin=0,tmax=None):
		self.cursor.execute("SELECT Custom_Graph FROM computed_data_table WHERE Id=\'"+str(xp_uuid)+"\' AND Function=\'"+method+"\'")
		tempblob = self.cursor.fetchone()
		if sys.version_info.major == 2:
			ans = pickle.loads(lzo.decompress(str(tempblob[0])))
		else:
			ans = pickle.loads(lzo.decompress(tempblob[0]))
		return ans
Esempio n. 2
0
def probes():
    """ Iterate through uncompressed probes generated by NavigaTor. """
    for cprobe in cprobes():
        try:
            probe = loads(decompress(cprobe))
        # Backward compatibility when bandwidth probes were not implemented.
        except TypeError:
            from NavigaTor import Probe_old as Probe
            probe = loads(decompress(cprobe))
        yield probe
Esempio n. 3
0
def probes():
    """ Iterate through uncompressed probes generated by NavigaTor. """
    for cprobe in cprobes():
        try:
            probe = loads(decompress(cprobe))
        # Backward compatibility when bandwidth probes were not implemented.
        except TypeError:
            from NavigaTor import Probe_old as Probe
            probe = loads(decompress(cprobe))
        yield probe
Esempio n. 4
0
def test(src, level=1):
    a0 = lzo.adler32(src)
    c =  lzo.compress(src, level)
    u1 = lzo.decompress(c)
    a1 = lzo.adler32(u1)
    o =  lzo.optimize(c)
    u2 = lzo.decompress(o)
    a2 = lzo.adler32(u2)
    if src != u1 or src != u2:
        raise lzo.error, "internal error 1"
    if a0 != a1 or a0 != a2:
        raise lzo.error, "internal error 2"
    print "compressed %6d -> %6d" % (len(src), len(c))
Esempio n. 5
0
def test(src, level = 1):
    a0 = lzo.adler32(src)
    c =  lzo.compress(src,level)
    u1 = lzo.decompress(c)
    a1 = lzo.adler32(u1)
    o =  lzo.optimize(c)
    u2 = lzo.decompress(o)
    a2 = lzo.adler32(u2)
    if cmp(src,u1) != 0 or cmp(src,u2) != 0:
        raise lzo.error, "internal error 1"
    if cmp(a0,a1) != 0 or cmp(a0,a2) != 0:
        raise lzo.error, "internal error 2"
    print "compressed %6d -> %6d" % (len(src), len(c))
Esempio n. 6
0
def test(src, level=1):
    a0 = lzo.adler32(src)
    c = lzo.compress(src, level)
    u1 = lzo.decompress(c)
    a1 = lzo.adler32(u1)
    o = lzo.optimize(c)
    u2 = lzo.decompress(o)
    a2 = lzo.adler32(u2)
    if src != u1 or src != u2:
        raise lzo.error, "internal error 1"
    if a0 != a1 or a0 != a2:
        raise lzo.error, "internal error 2"
    print "compressed %6d -> %6d" % (len(src), len(c))
Esempio n. 7
0
 def get_experiment(self,
                    xp_uuid=None,
                    force_new=False,
                    blacklist=[],
                    pattern=None,
                    tmax=0,
                    **xp_cfg):
     if force_new:
         tempexp = Experiment(database=self, **xp_cfg)
         tempexp.commit_to_db()
     elif xp_uuid is not None:
         if self.id_in_db(xp_uuid):
             self.cursor.execute(
                 "SELECT Experiment_object FROM main_table WHERE Id=\'" +
                 str(xp_uuid) + "\'")
             tempblob = self.cursor.fetchone()
             if sys.version_info.major == '2':
                 tempexp = pickle.loads(lzo.decompress(str(tempblob[0])))
             else:
                 tempexp = pickle.loads(lzo.decompress(tempblob[0]))
             tempexp.db = self
         else:
             print("ID doesn't exist in DB")
             return self.get_experiment(blacklist=blacklist,
                                        pattern=pattern,
                                        tmax=tmax,
                                        **xp_cfg)
     else:
         templist = self.get_id_list(pattern=pattern, tmax=tmax, **xp_cfg)
         for elt in blacklist:
             try:
                 templist.remove(elt)
             except ValueError:
                 pass
         temptmax = -1
         for xp_uuid in templist:
             t = int(self.get_param(param='Tmax', xp_uuid=xp_uuid))
             temptmax = max(temptmax, min(t, tmax))
         for xp_uuid in templist:
             t = int(self.get_param(param='Tmax', xp_uuid=xp_uuid))
             if t < temptmax:
                 templist.remove(xp_uuid)
         if templist:
             i = random.randint(0, len(templist) - 1)
             tempexp = self.get_experiment(xp_uuid=templist[i])
             tempexp.db = self
         else:
             tempexp = Experiment(database=self, **xp_cfg)
             tempexp.commit_to_db()
     return tempexp
Esempio n. 8
0
def gen(src, level=1):
    a0 = lzo.adler32(src)
    c =  lzo.compress(src, level)
    u1 = lzo.decompress(c)
    a1 = lzo.adler32(u1)
    o =  lzo.optimize(c)
    u2 = lzo.decompress(o)
    a2 = lzo.adler32(u2)
    if src != u1:
        raise lzo.error("internal error 1: %r %r", src, u1)
    if src != u2:
        raise lzo.error("internal error 1: %r %r",  src, u2)
    if a0 != a1 or a0 != a2:
        raise lzo.error("internal error 2")
    print("compressed %6d -> %6d" % (len(src), len(c)))
Esempio n. 9
0
def gen_raw(src, level=1):
    a0 = lzo.adler32(src)
    c =  lzo.compress(src, level, False)
    u1 = lzo.decompress(c, False, len(src))
    a1 = lzo.adler32(u1)
    o =  lzo.optimize(c, False, len(src))
    u2 = lzo.decompress(o, False, len(src))
    a2 = lzo.adler32(u2)
    # make sure it still works when you overstate the output buffer length
    u3 = lzo.decompress(c, False, len(src) + 100)
    if src != u1 or src != u2 or src != u3:
        raise lzo.error("internal error 1")
    if a0 != a1 or a0 != a2:
        raise lzo.error("internal error 2")
    print("compressed %6d -> %6d" % (len(src), len(c)))
Esempio n. 10
0
 def get_graph(self,
               xp_uuid=None,
               xp_cfg=None,
               method="srtheo",
               tmin=0,
               tmax=None):
     self.cursor.execute(
         "SELECT Custom_Graph FROM computed_data_table WHERE Id=\'" +
         str(xp_uuid) + "\' AND Function=\'" + method + "\'")
     tempblob = self.cursor.fetchone()
     if sys.version_info.major == '2':
         ans = pickle.loads(lzo.decompress(str(tempblob[0])))
     else:
         ans = pickle.loads(lzo.decompress(tempblob[0]))
     return ans
Esempio n. 11
0
 def uncompressLZO(data):
     try:
         import lzo
     except ImportError:
         raise LoadException("can't load LZO saves, no LZO")
     else:
         return lzo.decompress(data)
Esempio n. 12
0
 def _decode_key_block(self, key_block_compressed, key_block_info_list):
     key_list = []
     i = 0
     for compressed_size, decompressed_size in key_block_info_list:
         start = i;
         end = i + compressed_size
         # 4 bytes : compression type
         key_block_type = key_block_compressed[start:start+4]
         if key_block_type == b'\x00\x00\x00\x00':
             # extract one single key block into a key list
             key_list += self._split_key_block(key_block_compressed[start+8:end])
         elif key_block_type == b'\x01\x00\x00\x00':
             if not HAVE_LZO:
                 log.error("LZO compression is not supported")
                 break
             # 4 bytes as adler32 checksum
             adler32 = unpack('>I', key_block_compressed[start+4:start+8])[0]
             # decompress key block
             header = '\xf0' + pack('>I', decompressed_size)
             key_block = lzo.decompress(header + key_block_compressed[start+8:end])
             # notice that lzo 1.x return signed value
             assert(adler32 == lzo.adler32(key_block) & 0xffffffff)
             # extract one single key block into a key list
             key_list += self._split_key_block(key_block)
         elif key_block_type == b'\x02\x00\x00\x00':
             # 4 bytes same as end of block
             assert(key_block_compressed[start+4:start+8] == key_block_compressed[end-4:end])
             # decompress key block
             key_block = zlib.decompress(key_block_compressed[start+self._number_width:end])
             # extract one single key block into a key list
             key_list += self._split_key_block(key_block)
         i += compressed_size
     return key_list
Esempio n. 13
0
 def get_mdx_by_index(self, fmdx, index):
     fmdx.seek(index['file_pos'])
     record_block_compressed = fmdx.read(index['compressed_size'])
     record_block_type = record_block_compressed[:4]
     record_block_type = index['record_block_type']
     #adler32 = unpack('>I', record_block_compressed[4:8])[0]
     if record_block_type == 0:
         _record_block = record_block_compressed[8:]
         # lzo compression
     elif record_block_type == 1:
         if lzo is None:
             print("LZO compression is not supported")
             # decompress
         header = b'\xf0' + pack('>I', index['decompressed_size'])
         _record_block = lzo.decompress(header +
                                        record_block_compressed[8:])
         # zlib compression
     elif record_block_type == 2:
         # decompress
         _record_block = zlib.decompress(record_block_compressed[8:])
     record = _record_block[index['record_start'] -
                            index['offset']:index['record_end'] -
                            index['offset']]
     record = record = record.decode(
         self._encoding, errors='ignore').strip(u'\x00').encode('utf-8')
     if self._stylesheet:
         record = self._replace_stylesheet(record)
     record = record.decode('utf-8')
     return record
Esempio n. 14
0
def main(args):
    # display version information and module documentation
    print "LZO version %s (0x%x), %s" % (lzo.LZO_VERSION_STRING, lzo.LZO_VERSION, lzo.LZO_VERSION_DATE)
    print lzo.__file__
    print
    print lzo.__doc__

    # display additional module information
    ## print dir(lzo)
    ## print_modinfo()

    # compress some simple strings
    test("aaaaaaaaaaaaaaaaaaaaaaaa")
    test("abcabcabcabcabcabcabcabc")
    test("abcabcabcabcabcabcabcabc", level=9)
    test(" " * 131072)
    test("")
    print "Simple compression test passed."

    # force an exception (because of invalid compressed data)
    assert issubclass(lzo.error, Exception)
    try:
        x = lzo.decompress("xx")
    except lzo.error, ex:
        ## print ex
        pass
Esempio n. 15
0
def decompress(body):
    # DWORD header_size;
    # DWORD body_size;
    # DWORD decompressed_size;
    # BYTE compressor_id[16];
    header_size = u32(body[0:4])
    body_size = u32(body[4:8])
    plain_size = u32(body[8:12])
    compressor_id = body[12:28].encode('hex')

    header = body[:header_size]
    body = body[header_size:header_size + body_size]

    if compressor_id == "f37126ad88a5617eaf06000d424c5a21":  # default
        return body
    elif compressor_id == "5fd8ea0e9d0a92cbe425109690ce7da2":  # zlib
        import zlib
        return zlib.decompress(body)
    elif compressor_id == "503b6412c75a7c7558d1c92683225449":  # aplib
        import aplib
        return aplib.depack_safe(body)
    elif compressor_id == "0a7874d2478a7713705e13dd9b31a6b1":  # lzo1x
        plain_size = u32(header[8:12])
        import lzo
        return lzo.decompress(body, False, plain_size)
    else:
        log.warning("Unknown Compressor: %s" % compressor_id)
Esempio n. 16
0
def mproto_load(mproto):
    pack, unpack = struct.pack, struct.unpack
    mp_f1 = open(mproto, "rb")
    mph = {}
    mph['magic'] = mp_f1.read(4)
    mph['count'] = unpack("I", mp_f1.read(4))[0]
    mph['esize'] = unpack("I", mp_f1.read(4))[0]

    import cStringIO
    mp_data1 = cStringIO.StringIO(mp_f1.read(mph['esize']))
    mp_f1.close()

    mpph = {}
    mpph['magic'] = unpack("I", mp_data1.read(4))[0]
    mpph['esize'] = unpack("I", mp_data1.read(4))[0]
    mpph['csize'] = unpack("I", mp_data1.read(4))[0]
    mpph['dsize'] = unpack("I", mp_data1.read(4))[0]

    global MT2_XTEAKEY_MPX
    mp_data2 = _xtea.decrypt_all(mp_data1.read(mpph['esize']), MT2_XTEAKEY_MPX)
    if EXT_DEBUG_MODE:
        ttt = open(mproto + ".unxtea", "wb")
        ttt.write(mp_data2)
        ttt.close()

    mp_data3 = lzo.decompress("\xf0" + pack("!L", mpph['dsize']) +
                              mp_data2[4:mpph['csize'] + 4])
    if EXT_DEBUG_MODE:
        ttt = open(mproto + ".unlzo", "wb")
        ttt.write(mp_data3)
        ttt.close()
    return mph, mp_data3
Esempio n. 17
0
 def _decode_key_block(self, key_block_compressed, key_block_info_list):
     key_list = []
     i = 0
     for compressed_size, decompressed_size in key_block_info_list:
         start = i;
         end = i + compressed_size
         # 4 bytes : compression type
         key_block_type = key_block_compressed[start:start+4]
         if key_block_type == '\x00\x00\x00\x00':
             # extract one single key block into a key list
             key_list += self._split_key_block(key_block_compressed[start+8:end])
         elif key_block_type == '\x01\x00\x00\x00':
             if not HAVE_LZO:
                 print "LZO compression is not supported"
                 break
             # 4 bytes as adler32 checksum
             adler32 = unpack('>I', key_block_compressed[start+4:start+8])[0]
             # decompress key block
             header = '\xf0' + pack('>I', decompressed_size)
             key_block = lzo.decompress(header + key_block_compressed[start+8:end])
             # notice that lzo 1.x return signed value
             assert(adler32 == lzo.adler32(key_block) & 0xffffffff)
             # extract one single key block into a key list
             key_list += self._split_key_block(key_block)
         elif key_block_type == '\x02\x00\x00\x00':
             # 4 bytes same as end of block
             assert(key_block_compressed[start+4:start+8] == key_block_compressed[end-4:end])
             # decompress key block
             key_block = zlib.decompress(key_block_compressed[start+self._number_width:end])
             # extract one single key block into a key list
             key_list += self._split_key_block(key_block)
         i += compressed_size
     return key_list
Esempio n. 18
0
 def get_data_by_index(fmdx, index):
     fmdx.seek(index['file_pos'])
     record_block_compressed = fmdx.read(index['compressed_size'])
     record_block_type = record_block_compressed[:4]
     record_block_type = index['record_block_type']
     decompressed_size = index['decompressed_size']
     # adler32 = unpack('>I', record_block_compressed[4:8])[0]
     if record_block_type == 0:
         _record_block = record_block_compressed[8:]
         # lzo compression
     elif record_block_type == 1:
         if lzo is None:
             print("LZO compression is not supported")
             # decompress
         header = b'\xf0' + pack('>I', index['decompressed_size'])
         _record_block = lzo.decompress(record_block_compressed[8:],
                                        initSize=decompressed_size,
                                        blockSize=1308672)
         # zlib compression
     elif record_block_type == 2:
         # decompress
         _record_block = zlib.decompress(record_block_compressed[8:])
     data = _record_block[index['record_start'] -
                          index['offset']:index['record_end'] -
                          index['offset']]
     return data
Esempio n. 19
0
 def uncompressLZO(data):
     try:
         import lzo
     except ImportError:
         raise LoadException("can't load LZO saves, no LZO")
     else:
         return lzo.decompress(data)
Esempio n. 20
0
    def _decompress(
        self, data
    ):  # a decompression funcion like lrzip in spirit: lzma<bz2<zlib<lz0<lz4
        try:
            data = lzma.decompress(data)
        except:
            pass
        data = bz2.decompress(data)
        data = zlib.decompress(data)
        try:
            data = data.decode('zlib')
        except:
            pass
        try:
            data = lzo.decompress(data)
        except:
            pass
        try:
            data = lz4.decompress(data)
        except:
            pass

        if self.shuffle == True:
            try:
                print "unshuffling..."
                data = buff_unshuffle(data)
                print "data unshuffled..."
            except:
                pass

        return data
Esempio n. 21
0
def main(args):
    # display version information and module documentation
    print "LZO version %s (0x%x), %s" % (lzo.LZO_VERSION_STRING,
                                         lzo.LZO_VERSION, lzo.LZO_VERSION_DATE)
    print lzo.__file__
    print
    print lzo.__doc__

    # display additional module information
    ## print dir(lzo)
    ## print_modinfo()

    # compress some simple strings
    test("aaaaaaaaaaaaaaaaaaaaaaaa")
    test("abcabcabcabcabcabcabcabc")
    test("abcabcabcabcabcabcabcabc", level=9)
    test(" " * 131072)
    test("")
    print "Simple compression test passed."

    # force an exception (because of invalid compressed data)
    assert issubclass(lzo.error, Exception)
    try:
        x = lzo.decompress("xx")
    except lzo.error:
        pass
    else:
        print "Exception handling does NOT work !"
    return 0
Esempio n. 22
0
def processChunk(filename):
    # Get row and column from filename
    column = int(filename.strip('.chunk').split('~')[0])
    row = int(filename.strip('.chunk').split('~')[1]) + 1
    chunk_tilesize = {"x": tilesize, "y": tilesize}

    # Account for columns or rows that are too short
    if (column + 1) == columns:
        chunk_tilesize['x'] = tilesize - differencex
    if row == rows:
        chunk_tilesize['y'] = tilesize - differencey

    # read the actual data and create an image
    file = zipref.read(composite_key + '/' + filename)
    # 262144 is the final byte size of the pixel data for 256x256 square.
    # This is based on 256*256*4 (width * height * 4 bytes per pixel)
    # finalsize is chunk width * chunk height * 4 bytes per pixel
    finalsize = chunk_tilesize['x'] * chunk_tilesize['y'] * 4
    decompressed = lzo.decompress(file, False, finalsize)
    # Will need to know how big each tile is instead of just saying 256
    tile = Image.frombytes('RGBA', (chunk_tilesize['x'], chunk_tilesize['y']),
                           decompressed)
    # Tile starts upside down, flip it
    tile = tile.transpose(Image.FLIP_TOP_BOTTOM)

    # Calculate pixel position of tile
    positionx = column * tilesize
    positiony = (imagesize[1] - (row * tilesize))
    if (row == rows):
        positiony = 0

    canvas.paste(tile, (positionx, positiony))
Esempio n. 23
0
def test_chunk_write_partial_offset(tmpdir):
    store = Store(str(tmpdir / 'store'))
    f = File(str(tmpdir / 'asdf'), store)

    chunk = Chunk(f, 1, store, None)
    # Write data that fits exactly into this chunk. Nothing remains
    # to be written.
    result = chunk.write(0, SPACE_CHUNK)
    assert result == (Chunk.CHUNK_SIZE, b'')
    # Write data that doesn't fit exactly into this chunk. This means
    # we have remaining data that needs to go into another chunk.
    result = chunk.write(10, SPACE_CHUNK)
    assert result == (Chunk.CHUNK_SIZE - 10, b' ' * 10)

    chunk.flush()
    assert chunk.hash == SPACE_CHUNK_HASH
    store_state = os.stat(store.chunk_path(SPACE_CHUNK_HASH))

    with open(store.chunk_path(chunk.hash), 'rb') as store_file:
        data = store_file.read()
        data = lzo.decompress(data)
        assert data == SPACE_CHUNK

    # Check that we can edit and flush again. Check that the store file
    # wasn't touched.
    chunk.write(0, b'      ')
    chunk.flush()
    assert store_state == os.stat(store.chunk_path(SPACE_CHUNK_HASH))
Esempio n. 24
0
File: readmdict.py Progetto: ltf/lab
    def _decode_key_block(self, key_block_compressed, key_block_info_list):
        key_list = []
        i = 0
        for compressed_size, decompressed_size in key_block_info_list:
            start = i
            end = i + compressed_size
            # 4 bytes : compression type
            key_block_type = key_block_compressed[start:start+4]
            # 4 bytes : adler checksum of decompressed key block
            adler32 = unpack('>I', key_block_compressed[start+4:start+8])[0]
            if key_block_type == b'\x00\x00\x00\x00':
                key_block = key_block_compressed[start+8:end]
            elif key_block_type == b'\x01\x00\x00\x00':
                if lzo is None:
                    print("LZO compression is not supported")
                    break
                # decompress key block
                header = b'\xf0' + pack('>I', decompressed_size)
                key_block = lzo.decompress(header + key_block_compressed[start+8:end])
            elif key_block_type == b'\x02\x00\x00\x00':
                # decompress key block
                key_block = zlib.decompress(key_block_compressed[start+8:end])
            # extract one single key block into a key list
            key_list += self._split_key_block(key_block)
            # notice that adler32 returns signed value
            assert(adler32 == zlib.adler32(key_block) & 0xffffffff)

            i += compressed_size
        return key_list
Esempio n. 25
0
 def _decompress(
     self, data
 ):  # a decompression funcion like lrzip in spirit: lzma<bz2<zlib<lz0<lz4
     try:
         data = lzma.decompress(data)
         sys.stderr.write("lzma ok\n")
     except:
         sys.stderr.write("lzma err\n")
         pass
     try:
         data = bz2.decompress(data)
         sys.stderr.write("bz2 ok\n")
     except:
         sys.stderr.write("bz2 err\n")
         pass
     try:
         data = zlib.decompress(data)
         sys.stderr.write("zlib ok\n")
     except:
         sys.stderr.write("zlib err\n")
         pass
     try:
         data = lzo.decompress(data)
         sys.stderr.write("lzo ok\n")
     except:
         sys.stderr.write("lzo err\n")
         pass
     try:
         data = lz4.block.decompress(data)
         sys.stderr.write("lz4 ok\n")
     except:
         sys.stderr.write("lz4 err\n")
         pass
     return data
Esempio n. 26
0
def receive_frame(counter):
    #Obtenim tamany del frame a obtenir
    sizeData = con.recv(4)
    byteLength = int.from_bytes(sizeData, "big")

    #Obtenim les dades
    print("----------------------------------")
    print("Frame " + str(counter) + " byte length: " + str(byteLength))

    data = b''

    while len(data) < byteLength:
        chunk = con.recv(byteLength - len(data))
        print("Chunk size = " + str(len(chunk)))
        if chunk == b'':
            raise RuntimeError("Connexió de socket caiguda")
        data = data + chunk

    print("Bytes received: " + str(len(data)))

    #Descomprimim el bytearray, sabem que descomprimit ha de
    #ocupar el doble de nombre de posicions del array en bytes
    global z_positions
    data = lzo.decompress(data, False, z_positions * 2)

    #Generem matriu de profunditat a partir del byetarray
    global x_positions, y_positions
    Z = np.frombuffer(data, dtype=np.uint16)
    Z = Z.reshape(y_positions, x_positions)
    return Z
Esempio n. 27
0
def main(args):
    # display version information and module documentation
    print("LZO version %s (0x%x), %s" % (lzo.LZO_VERSION_STRING, lzo.LZO_VERSION, lzo.LZO_VERSION_DATE))
    print(lzo.__file__)
    print()
    print(lzo.__doc__)

    # display additional module information
    ## print dir(lzo)
    ## print_modinfo()

    # compress some simple strings
    gen(b"aaaaaaaaaaaaaaaaaaaaaaaa")
    gen_raw(b"aaaaaaaaaaaaaaaaaaaaaaaa")
    gen(b"abcabcabcabcabcabcabcabc")
    gen_raw(b"abcabcabcabcabcabcabcabc")
    gen(b"abcabcabcabcabcabcabcabc", level=9)
    gen_raw(b"abcabcabcabcabcabcabcabc", level=9)
    gen(b" " * 131072)
    gen_raw(b" " * 131072)
    gen(b"")
    gen_raw(b"")
    print("Simple compression test passed.")

    test_version()

    # force an exception (because of invalid compressed data)
    assert issubclass(lzo.error, Exception)
    try:
        x = lzo.decompress("xx")
    except lzo.error:
        pass
    else:
        print("Exception handling does NOT work !")
    return 0
Esempio n. 28
0
def decompress(ctype, unc_len, data):
    if ctype == UBIFS_COMPR_LZO:
        return lzo.decompress(''.join(('\xf0', struct.pack('>I', unc_len), data)))
    elif ctype == UBIFS_COMPR_ZLIB:
        return zlib.decompress(data, -11)
    else:
        return data
Esempio n. 29
0
    def _decode_key_block(self, key_block_compressed, key_block_info_list):
        key_list = []
        i = 0
        for compressed_size, decompressed_size in key_block_info_list:
            start = i
            end = i + compressed_size
            # 4 bytes : compression type
            key_block_type = key_block_compressed[start:start + 4]
            # 4 bytes : adler checksum of decompressed key block
            adler32 = unpack('>I',
                             key_block_compressed[start + 4:start + 8])[0]
            if key_block_type == b'\x00\x00\x00\x00':
                key_block = key_block_compressed[start + 8:end]
            elif key_block_type == b'\x01\x00\x00\x00':
                if lzo is None:
                    print("LZO compression is not supported")
                    break
                # decompress key block
                header = b'\xf0' + pack('>I', decompressed_size)
                key_block = lzo.decompress(key_block_compressed[start + 8:end],
                                           initSize=decompressed_size,
                                           blockSize=1308672)
            elif key_block_type == b'\x02\x00\x00\x00':
                # decompress key block
                key_block = zlib.decompress(key_block_compressed[start +
                                                                 8:end])
            # extract one single key block into a key list
            key_list += self._split_key_block(key_block)
            # notice that adler32 returns signed value
            assert (adler32 == zlib.adler32(key_block) & 0xffffffff)

            i += compressed_size
        return key_list
Esempio n. 30
0
	def get_experiment(self, xp_uuid=None, force_new=False, blacklist=[], pattern=None, tmax=0, **xp_cfg):
		if force_new:
			tempexp = Experiment(database=self,**xp_cfg)
			tempexp.commit_to_db()
		elif xp_uuid is not None:
			if self.id_in_db(xp_uuid):
				self.cursor.execute("SELECT Experiment_object FROM main_table WHERE Id=\'"+str(xp_uuid)+"\'")
				tempblob = self.cursor.fetchone()
				if sys.version_info.major == 2:
					try:
						tempexp = pickle.loads(lzo.decompress(str(tempblob[0])))
					except TypeError:
						tempexp = pickle.loads(lzo.decompress(str(tempblob[0].tobytes())))
				else:
					try:
						tempexp = pickle.loads(lzo.decompress(tempblob[0]))
					except TypeError:
						tempexp = pickle.loads(lzo.decompress(tempblob[0].tobytes()))

				tempexp.db = self
			else:
				print("ID doesn't exist in DB")
				return self.get_experiment(blacklist=blacklist,pattern=pattern,tmax=tmax, **xp_cfg)
		else:
			templist=self.get_id_list(pattern=pattern, tmax=tmax, **xp_cfg)
			for elt in blacklist:
				try:
					templist.remove(elt)
				except ValueError:
					pass
			temptmax = -1
			for xp_uuid in templist:
				t = int(self.get_param(param='Tmax', xp_uuid=xp_uuid))
				temptmax = max(temptmax, min(t ,tmax))
			for xp_uuid in templist:
				t = int(self.get_param(param='Tmax', xp_uuid=xp_uuid))
				if t < temptmax:
					templist.remove(xp_uuid)
			if templist:
				i=random.randint(0,len(templist)-1)
				tempexp = self.get_experiment(xp_uuid=templist[i])
				tempexp.db=self
			else:
				tempexp = Experiment(database=self,**xp_cfg)
				tempexp.commit_to_db()
		return tempexp
Esempio n. 31
0
 def decompress(self, data, dsize=None):
     import lzo
     result = ''
     try:
         result = lzo.decompress(data, False, dsize)
         return result
     except lzo.error as e:
         log.debug(e)
Esempio n. 32
0
def decompress(ctype, unc_len, data):
    if ctype == UBIFS_COMPR_LZO:
        return lzo.decompress(''.join(('\xf0', struct.pack('>I',
                                                           unc_len), data)))
    elif ctype == UBIFS_COMPR_ZLIB:
        return zlib.decompress(data, -11)
    else:
        return data
Esempio n. 33
0
 def frame_copy(n: int, f: VideoFrame) -> VideoFrame:
     fout = f.copy()
     frame_data, frame_props = self.get_frame(n, pipe=False)
     if self.compression_method == 'lzo':
         frame_data = pickle.loads(lzo.decompress(frame_data))
     for p in range(fout.format.num_planes):
         np.asarray(fout.get_write_array(p))[:] = frame_data[p]
     for i in frame_props:
         fout.props[i] = frame_props[i]
     return fout
Esempio n. 34
0
def Decompress(Input):
    Output = '.'.join(Input.split('.')[0:-1])
    file_in = file(Input, "r")
    c_data = file_in.read()

    file_out = file(Output, "w")
    data = lzo.decompress(c_data)
    file_out.write(data)
    file_out.close()

    file_in.close()
Esempio n. 35
0
    def _decode_record_block(self):
        f = open(self._fname, 'rb')
        record_block_data_offset = self._record_block_offset + self._number_width * 4 \
                                   + self._number_width * 2 * len(self._record_block_info_list)
        f.seek(record_block_data_offset)

        # actual record block
        offset = 0
        i = 0
        size_counter = 0

        for compressed_size, decompressed_size in self._record_block_info_list:
            record_block_compressed = f.read(compressed_size)
            # 4 bytes: compression type
            record_block_type = record_block_compressed[:4]
            # 4 bytes: adler32 checksum of decompressed record block
            adler32 = unpack('>I', record_block_compressed[4:8])[0]
            if record_block_type == b'\x00\x00\x00\x00':
                record_block = record_block_compressed[8:]
            elif record_block_type == b'\x01\x00\x00\x00':
                if lzo is None:
                    print("LZO compression is not supported")
                    break
                # decompress
                header = b'\xf0' + pack('>I', decompressed_size)
                record_block = lzo.decompress(header +
                                              record_block_compressed[8:])
            elif record_block_type == b'\x02\x00\x00\x00':
                # decompress
                record_block = zlib.decompress(record_block_compressed[8:])

            # notice that adler32 return signed value
            assert (adler32 == zlib.adler32(record_block) & 0xffffffff)

            assert (len(record_block) == decompressed_size)
            # split record block according to the offset info from key block
            while i < len(self._key_list):
                record_start, key_text = self._key_list[i]
                # reach the end of current record block
                if record_start - offset >= len(record_block):
                    break
                # record end index
                if i < len(self._key_list) - 1:
                    record_end = self._key_list[i + 1][0]
                else:
                    record_end = len(record_block) + offset
                i += 1
                data = record_block[record_start - offset:record_end - offset]
                yield key_text, data
            offset += len(record_block)
            size_counter += compressed_size
        assert (size_counter == self._record_block_size)

        f.close()
Esempio n. 36
0
    def get_file_content(self, name, node):
        # try:
        #     mkdir_p(os.path.dirname(name))
        # except OSError:
        #     logger.critical("Failed to created directory for {name}".format(name=name))
        #     return
        content = str2byt("")

        # with open(name, "wb") as wfd:
        inodes = self.inodes[node]
        sorted_nodes = sorted(inodes, key=lambda item: item[3])
        ts = 0
        for inode in sorted_nodes:
            (version, isize, mtime, offset, csize, dsize, compr, dataidx) = inode
            ts = mtime
            if compr == JFFS2_COMPR_NONE:
                # wfd.write(self.image[dataidx:dataidx + csize])
                content += self.image[dataidx:dataidx + csize]
            elif compr == JFFS2_COMPR_ZLIB:
                try:
                    decompr = zlib.decompress(self.image[dataidx:dataidx + csize])
                    # wfd.write(decompr)
                    content += decompr
                except zlib.error:
                    logger.critical("Failed to decompress zlib, dumping raw")
                    # wfd.write(self.image[dataidx:dataidx + csize])
                    content += self.image[dataidx:dataidx + csize]
            elif compr == JFFS2_COMPR_RTIME:
                try:
                    decompr = rtime_decompress(self.image[dataidx:dataidx + csize], dsize)
                    # wfd.write(decompr)
                    content += decompr
                except IndexError:
                    logger.critical("rtime failed, dumping")
                    # wfd.write(self.image[dataidx:dataidx + csize])
                    content += self.image[dataidx:dataidx + csize]
            elif compr == JFFS2_COMPR_LZO:
                if lzo is None:
                    logger.critical("No lzo installed!")
                try:
                    compressed = '\xf0' + struct.pack('!L', dsize) + self.image[dataidx:dataidx + csize]
                    decompr = lzo.decompress(compressed)
                    # wfd.write(decompr)
                    content += decompr
                except lzo.error as e:
                    logger.critical("Failed to decompress lzo, dumping raw (%s)" % str(e))
                    # wfd.write(self.image[dataidx:dataidx + csize])
                    content += self.image[dataidx:dataidx + csize]
            else:
                logger.critical("Unknown compression %d" % compr)
        # os.utime(name, (ts, ts))

        return content
def retrieveFromDatabase(cur, inputFile, outputFile):

    cur.execute("SELECT compression,data_file FROM map WHERE name = ?", (inputFile,))
    row = cur.fetchone()
    data = row[1]

    if row[0] > 0:
        ldata = lzo.decompress(data)
        data = ldata
    
    i = open(outputFile, 'wb')
    i.write(data)
    i.close()
Esempio n. 38
0
 def decompress(self):
     sig = self.file.readStruct('4s')
     if sig == b"LZO\0":
         version = self.file.readu32()
         decLen = self.file.readu32()
         compLen = self.file.readu32()
     else:
         raise TypeError("Not a LZO file (header: " + str(sig) + ")")
     #result = bytearray(decLen)
     compData = self.file.readBytes(compLen)
     result = lzo.decompress(compData)
     assert len(result) == decLen
     return result
Esempio n. 39
0
 def load_block(self, index):
     if self.cache is not None and index in self.cache:
         return self.cache[index]
     else:
         offset, csize, size = self.block_info[index]
         # Get the block of compressed data
         self.file.seek(offset)
         data = self.file.read(csize)
         # Need to prepend a header for python-lzo module (silly)
         data = ''.join(('\xf0', struct.pack("!I", size), data))
         value = lzo.decompress(data)
         if self.cache is not None:
             self.cache[index] = value
         return value
Esempio n. 40
0
def get_tile_size(reader: ChkDirReader, chunks: list[ChunkRange]) -> int:
    """Gets the size of a square tile from an unknown chunk"""
    for chunk in chunks:
        data: bytes = deflate_range(reader, chunk.start, chunk.end, True)
        if data is None:
            continue
        try:
            decompressed: bytes = lzo.decompress(data, False, MAX_BUFFER_LEN)
            pixel_count: float = len(decompressed) / 4  # RGBA per-pixel
            tilesize = math.sqrt(pixel_count)  # square edge length
            return int(tilesize)
        except:  # pylint: disable=bare-except
            continue
    return -1
Esempio n. 41
0
 def load_block( self, index ):
     if self.cache is not None and index in self.cache:
         return self.cache[index]
     else:      
         offset, csize, size = self.block_info[ index ]
         # Get the block of compressed data
         self.file.seek( offset )
         data = self.file.read( csize )
         # Need to prepend a header for python-lzo module (silly)
         data = ''.join( ( '\xf0', struct.pack( "!I", size ), data ) )
         value = lzo.decompress( data )
         if self.cache is not None:
             self.cache[index] = value
         return value
Esempio n. 42
0
def lloads(value):
    ''' 
        load serialized string from ldumps
        
        input: 
            - value: serialized string
        
        output:
            - python variable
    '''
    import lzo
    try:
        return pickle.loads(lzo.decompress(value))
    except:
        return pickle.loads(value)
Esempio n. 43
0
def read_data_conn(cursor, filepath, label=None):
    if not os.path.isfile(filepath):  #
        xz_decompress(filepath + '.xz')
        os.remove(filepath + '.xz')
    if label is None:
        cursor.execute('SELECT max(T) FROM main_table')
        max_T = cursor.fetchone()[0]
        label = max_T
    cursor.execute("SELECT Population_object FROM main_table WHERE T=\'" +
                   str(label) + "\'")
    blob = cursor.fetchone()
    lz_data = blob[0]
    pickled_data = lzo.decompress(lz_data)
    data = pickle.loads(pickled_data)
    return data
Esempio n. 44
0
	def read_data(self,label=None):
		conn = psycopg2.connect(self.conn_info)
		with conn:
			cursor = conn.cursor()
			if label is None:
				cursor.execute('SELECT max(T) FROM '+self.db_id)
				max_T = cursor.fetchone()[0]
				label = max_T
			cursor.execute("SELECT Population_object FROM "+self.db_id+" WHERE T=\'"+str(label)+"\'")
			blob = cursor.fetchone()
		if blob is None:
			raise IOError('No row in database ' + self.db_id + ' for label: '+str(label))
		lz_data = blob[0]
		pickled_data = lzo.decompress(bytes(lz_data))
		data = pickle.loads(pickled_data)
		return data
Esempio n. 45
0
def decompress(ctype, unc_len, data):
    """Decompress data.

    Arguments:
    Int:ctype    -- Compression type LZO, ZLIB (*currently unused*).
    Int:unc_len  -- Uncompressed data lenth.
    Str:data     -- Data to be uncompessed.

    Returns:
    Uncompressed Data.
    """
    if ctype == UBIFS_COMPR_LZO:
        try:
            return lzo.decompress(''.join(('\xf0', struct.pack('>I', unc_len), data)))
        except Exception, e:
            error(decompress, 'Warn', 'LZO Error: %s' % e)
Esempio n. 46
0
def decompress(ctype, unc_len, data):
    """Decompress data.

    Arguments:
    Int:ctype    -- Compression type LZO, ZLIB (*currently unused*).
    Int:unc_len  -- Uncompressed data lenth.
    Str:data     -- Data to be uncompessed.

    Returns:
    Uncompressed Data.
    """
    if ctype == UBIFS_COMPR_LZO:
        try:
            return lzo.decompress(''.join(('\xf0', struct.pack('>I', unc_len), data)))
        except Exception, e:
            error(decompress, 'Warn', 'LZO Error: %s' % e)
Esempio n. 47
0
    def process(self):
        while True:
            ret = self.sqs.receive_message(
                QueueUrl=self.queue_url,
                MaxNumberOfMessages=10,
                WaitTimeSeconds=1
            )

            if 'Messages' not in ret:
                continue
            
            for msg in ret['Messages']:
                key = msg['Body']
                record = self.s3.get_object(Bucket='samuel-html', Key=key)
                #pack['Body'] botocore.response.StreamingBody
                pack = json.loads(lzo.decompress(record['Body'].read()).decode('utf-8'))
            #    response = self.client.delete_message(
            #        QueueUrl=self.queue_url,
            #        ReceiptHandle=msg['ReceiptHandle']
            #    )
            #    print(response)

                self.bloom_filter.add(pack['url'])
                if pack.get('code') == 200:
                    url = pack['url']
                    ret = self.extractor.extract(pack)
                    for link in ret['links']:
                        if not self.bloom_filter.add(link['url']):
                            seed(link)
                        else:
                            #print 'already crawled', link['url']
                            pass
                    #save pack to tbl_link
                    self.dynamodb.Table('link').put_item(
                        Item = {
                            'url': url,
                            'ctime': Decimal(str(time.time())),
                            'utime': Decimal(str(time.time()))
                        }
                    )
                    logger.info("%s ok" % (pack['url']))
                else:
                    logger.warn("%s not ok code:%d" % (pack['url'], pack.get('code')))
                response = self.sqs.delete_message(
                    QueueUrl=self.queue_url,
                    ReceiptHandle=msg['ReceiptHandle']
                )
Esempio n. 48
0
def decompress(ctype, unc_len, data):
    """Decompress data.

    Arguments:
    Int:ctype    -- Compression type LZO, ZLIB (*currently unused*).
    Int:unc_len  -- Uncompressed data lenth.
    Str:data     -- Data to be uncompessed.

    Returns:
    Uncompressed Data.
    """
    if ctype == UBIFS_COMPR_LZO:
        return lzo.decompress(''.join(('\xf0', struct.pack('>I', unc_len), data)))
    elif ctype == UBIFS_COMPR_ZLIB:
        return zlib.decompress(data, -11)
    else:
        return data
Esempio n. 49
0
    def dump_file(self, name, node):
        logger.info("Writing file %s" % name)
        try:
            mkdir_p(os.path.dirname(name))
        except OSError:
            logger.critical("Failed to created directory for {name}".format(name=name))
            return

        with open(name, "wb") as wfd:
            inodes = self.inodes[node]
            sorted_nodes = sorted(inodes, key=lambda item: item[3])
            ts = 0
            for inode in sorted_nodes:
                (version, isize, mtime, offset, csize, dsize, compr, dataidx) = inode
                ts = mtime
                if compr == JFFS2_COMPR_NONE:
                    wfd.write(self.image[dataidx:dataidx+csize])
                elif compr == JFFS2_COMPR_ZLIB:
                    try:
                        decompr = zlib.decompress(self.image[dataidx:dataidx+csize])
                        wfd.write(decompr)
                    except zlib.error:
                        logger.critical("Failed to decompress zlib, dumping raw")
                        wfd.write(self.image[dataidx:dataidx+csize])
                elif compr == JFFS2_COMPR_RTIME:
                    try:
                        decompr = rtime_decompress(self.image[dataidx:dataidx+csize], dsize)
                        wfd.write(decompr)
                    except IndexError:
                        logger.critical("rtime failed, dumping")
                        wfd.write(self.image[dataidx:dataidx+csize])
                elif compr == JFFS2_COMPR_LZO:
                    if lzo is None:
                        logger.critical("No lzo installed!")
                    try:
                        compressed = '\xf0' + struct.pack('!L', dsize) + self.image[dataidx:dataidx+csize]
                        decompr = lzo.decompress(compressed)
                        wfd.write(decompr)
                    except lzo.error as e:
                        logger.critical("Failed to decompress lzo, dumping raw (%s)" % str(e))
                        wfd.write(self.image[dataidx:dataidx+csize])
                else:
                    logger.critical("Unknown compression %d" % compr)
        os.utime(name, (ts, ts))
Esempio n. 50
0
def decode_body(buf, decompress=True):
    """given the bytes from a .dat file, decode it"""
    head = buf[:0x88]
    key = decode_key(buf[0x88:0x88 + 0x80])
    body = xor_data(key, buf[0x108:])
    expected_pack_size = len(buf) - 0x110
    packed_size, unpacked_size = unpack_from('<L I', body)
    if expected_pack_size != packed_size:
        raise Exception('Wrong packed size')
    if body[-3:] != TAIL:
        raise Exception('Trailing 3 bytes not correct')
        pass
    # this is needed to play nice with the lzo api
    if decompress:
      magic = b'\xf0' + unpacked_size.to_bytes(4, 'big')
      data = lzo.decompress(magic + body[8:])
      return head, data
    else:
      return head, body
Esempio n. 51
0
	def read_data(self,label=None):
		if not os.path.isfile(self.filepath):
			if not os.path.isfile(self.filepath+'.xz'):
				raise IOError('No file for poplist: '+self.filepath+' . You should call init_db before adding elements')
			else:
				xz_decompress(self.filepath+'.xz')
		conn = sql.connect(self.filepath)
		with conn:
			cursor = conn.cursor()
			if label is None:
				cursor.execute('SELECT max(T) FROM main_table')
				max_T = cursor.fetchone()[0]
				label = max_T
			cursor.execute("SELECT Population_object FROM main_table WHERE T=\'"+str(label)+"\'")
			blob = cursor.fetchone()
		if blob is None:
			raise IOError('No row in database ' + str(self.filepath) + ' for label: '+str(label))
		lz_data = blob[0]
		pickled_data = lzo.decompress(bytes(lz_data))
		data = pickle.loads(pickled_data)
		return data
Esempio n. 52
0
def main(args):
    ## print_modinfo()

    # print version information and module documentation
    print "LZO version %s (0x%x), %s" % (lzo.LZO_VERSION_STRING, lzo.LZO_VERSION, lzo.LZO_VERSION_DATE)
    print
    print lzo.__doc__

    # compress some simple strings
    test("aaaaaaaaaaaaaaaaaaaaaaaa")
    test("abcabcabcabcabcabcabcabc")
    test("abcabcabcabcabcabcabcabc",9)
    test(" " * 131072)
    test("")
    print "Simple compression test passed."

    # force an exception
    try:
        x = lzo.decompress("xx")
    except lzo.error, msg:
        ## print msg
        pass
Esempio n. 53
0
def unwrap_player_data(data):
    if data[: 20] != hashlib.sha1(data[20: ]).digest():
        raise BL2Error("Invalid save file")

    data = lzo.decompress("\xf0" + data[20: ])
    size, wsg, version = struct.unpack(">I3sI", data[: 11])
    if version != 2 and version != 0x02000000:
        raise BL2Error("Unknown save version " + str(version))

    if version == 2:
        crc, size = struct.unpack(">II", data[11: 19])
    else:
        crc, size = struct.unpack("<II", data[11: 19])

    bitstream = ReadBitstream(data[19: ])
    tree = read_huffman_tree(bitstream)
    player = huffman_decompress(tree, bitstream, size)

    if (lzo.crc32(player) & 0xffffffff) != crc:
        raise BL2Error("CRC check failed")

    return player
Esempio n. 54
0
 def decompress(self, data):
     return lzo.decompress(data)
Esempio n. 55
0
    def _decode_record_block(self):
        f = open(self._fname, 'rb')
        f.seek(self._record_block_offset)

        num_record_blocks       = self._read_number(f)
        num_entries             = self._read_number(f)
        assert(num_entries == self._num_entries)
        record_block_info_size  = self._read_number(f)
        record_block_size       = self._read_number(f)

        # record block info section
        record_block_info_list = []
        size_counter = 0
        for i in range(num_record_blocks):
            compressed_size     = self._read_number(f)
            decompressed_size   = self._read_number(f)
            record_block_info_list += [(compressed_size, decompressed_size)]
            size_counter += self._number_width * 2
        assert(size_counter == record_block_info_size)

        # actual record block data
        offset = 0
        i = 0
        size_counter = 0
        for compressed_size, decompressed_size in record_block_info_list:
            record_block_compressed = f.read(compressed_size)
            # 4 bytes indicates block compression type
            record_block_type = record_block_compressed[:4]
            # no compression
            if record_block_type == b'\x00\x00\x00\x00':
                record_block = record_block_compressed[8:]
            # lzo compression
            elif record_block_type == b'\x01\x00\x00\x00':
                if not HAVE_LZO:
                    log.error("LZO compression is not supported")
                    break
                # 4 bytes as adler32 checksum
                adler32 = unpack('>I', record_block_compressed[4:8])[0]
                # decompress
                header = '\xf0' + pack('>I', decompressed_size)
                record_block = lzo.decompress(header + record_block_compressed[8:])
                # notice that lzo 1.x return signed value
                assert(adler32 == lzo.adler32(record_block) & 0xffffffff)
            # zlib compression
            elif record_block_type == b'\x02\x00\x00\x00':
                # 4 bytes as checksum
                assert(record_block_compressed[4:8] == record_block_compressed[-4:])
                # compressed contents
                record_block = zlib.decompress(record_block_compressed[8:])
            assert(len(record_block) == decompressed_size)
            # split record block according to the offset info from key block
            while i < len(self._key_list):
                record_start, key_text = self._key_list[i]
                # reach the end of current record block
                if record_start - offset >= len(record_block):
                    break
                # record end index
                if i < len(self._key_list)-1:
                    record_end = self._key_list[i+1][0]
                else:
                    record_end = len(record_block) + offset
                i += 1
                record = record_block[record_start-offset:record_end-offset]
                # convert to utf-8
                #record = record.decode(self._encoding, errors='ignore').strip(u'\x00').encode('utf-8')
                record = record.decode(self._encoding, errors='ignore').strip(u'\x00')
                # substitute styles
                if self._substyle and self._stylesheet:
                    record = self._substitute_stylesheet(record)

                yield key_text, record
            offset += len(record_block)
            size_counter += compressed_size
        assert(size_counter == record_block_size)

        f.close()
Esempio n. 56
0
 def decompressInputStream(self, data):
     return DataInputBuffer(lzo.decompress(data))
Esempio n. 57
0
 def _get(self, acc):
     flat = np.fromstring(comp.decompress(self.db[acc]), 'I')
     return flat.reshape([self.NROW, len(flat) // self.NROW])
Esempio n. 58
0
File: readmdict.py Progetto: ltf/lab
    def _decode_record_block(self):
        f = open(self._fname, 'rb')
        f.seek(self._record_block_offset)

        num_record_blocks = self._read_number(f)
        num_entries = self._read_number(f)
        assert(num_entries == self._num_entries)
        record_block_info_size = self._read_number(f)
        record_block_size = self._read_number(f)

        # record block info section
        record_block_info_list = []
        size_counter = 0
        for i in range(num_record_blocks):
            compressed_size = self._read_number(f)
            decompressed_size = self._read_number(f)
            record_block_info_list += [(compressed_size, decompressed_size)]
            size_counter += self._number_width * 2
        assert(size_counter == record_block_info_size)

        # actual record block
        offset = 0
        i = 0
        size_counter = 0
        for compressed_size, decompressed_size in record_block_info_list:
            record_block_compressed = f.read(compressed_size)
            # 4 bytes: compression type
            record_block_type = record_block_compressed[:4]
            # 4 bytes: adler32 checksum of decompressed record block
            adler32 = unpack('>I', record_block_compressed[4:8])[0]
            if record_block_type == b'\x00\x00\x00\x00':
                record_block = record_block_compressed[8:]
            elif record_block_type == b'\x01\x00\x00\x00':
                if lzo is None:
                    print("LZO compression is not supported")
                    break
                # decompress
                header = b'\xf0' + pack('>I', decompressed_size)
                record_block = lzo.decompress(header + record_block_compressed[8:])
            elif record_block_type == b'\x02\x00\x00\x00':
                # decompress
                record_block = zlib.decompress(record_block_compressed[8:])

            # notice that adler32 return signed value
            assert(adler32 == zlib.adler32(record_block) & 0xffffffff)

            assert(len(record_block) == decompressed_size)
            # split record block according to the offset info from key block
            while i < len(self._key_list):
                record_start, key_text = self._key_list[i]
                # reach the end of current record block
                if record_start - offset >= len(record_block):
                    break
                # record end index
                if i < len(self._key_list)-1:
                    record_end = self._key_list[i+1][0]
                else:
                    record_end = len(record_block) + offset
                i += 1
                data = record_block[record_start-offset:record_end-offset]
                yield key_text, data
            offset += len(record_block)
            size_counter += compressed_size
        assert(size_counter == record_block_size)

        f.close()