Esempio n. 1
0
    def process(self, data: Any) -> Any:
        """ Implements a Processor for decompressing a numpy array. """
        if isinstance(data, list):
            ret = []
            datashape_feature = data[0][0]
            datatype_feature = data[0][1]
            for datacom_feature, datacom_target, datashape_target, datatype_target in data[
                    1:]:
                datacom_feature = zstd.decompress(datacom_feature)
                datacom_feature = np.frombuffer(
                    datacom_feature,
                    datatype_feature).reshape(datashape_feature)
                if len(datashape_target) > 0 and datashape_target[0] == 0:
                    datacom_target = np.zeros(datashape_target)
                else:
                    datacom_target = zstd.decompress(datacom_target)
                    datacom_target = np.frombuffer(
                        datacom_target,
                        datatype_target).reshape(datashape_target)
                ret.append((datacom_feature, datacom_target))
        else:
            shape, dtype, modelcom = data
            modelcom = zstd.decompress(modelcom)
            ret = np.frombuffer(modelcom, dtype).reshape(shape)

        return ret
Esempio n. 2
0
def dump_event_csv(event_payload, idx, timestamp=None, verbose=False):
    evt = Event.GetRootAsEvent(event_payload, 0)
    m = evt.Meta()
    message = {
        'id': m.Id().decode("utf-8"),
        'payload-size': evt.PayloadLength(),
        'encoding': event_encoding_name(m.Encoding()),
        't': timestamp_to_datetime(timestamp)
    }

    payload = evt.PayloadAsNumpy()
    if m.Encoding() == EventEncoding.Zstd:
        payload = zstd.decompress(evt.PayloadAsNumpy())

    if m.PayloadType() == PayloadType.CB:
        fill_cb(payload, message)
    elif m.PayloadType() == PayloadType.CCB or m.PayloadType(
    ) == PayloadType.Slates:
        ...
    elif m.PayloadType() == PayloadType.Outcome:
        fill_outcome(payload, message)
    elif m.PayloadType() == PayloadType.CA:
        ...
    elif m.PayloadType() == PayloadType.DedupInfo:
        ...
    elif m.PayloadType() == PayloadType.MultiStep:
        fill_multistep(payload, message)
    else:
        ...
    return {'type': payload_name(m.PayloadType()), 'message': message}
Esempio n. 3
0
def fetch_file(filename, path, patchline='live', platform='mac', region='NA'):
    """
  fetches a file from the game client
  """
    rman = PatcherManifest(download(patchline, platform, region))
    file = rman.files[filename]
    bundle_ids = {}
    for chunk in file.chunks:
        bundle_ids[chunk.bundle.bundle_id] = True
    bundle_ids = list(bundle_ids.keys())

    for bundle_id in bundle_ids:
        name = f'{bundle_id:016X}.bundle'
        url = os.path.join(constants.riotcdn_url, 'channels', 'public',
                           'bundles', name)
        util.download(url, os.path.join(bundle_dir, name))

    f = open(path, 'wb')
    for chunk in file.chunks:
        bundle_id = chunk.bundle.bundle_id
        bundle = open(os.path.join(bundle_dir, f'{bundle_id:016X}.bundle'),
                      'rb')
        bundle.seek(chunk.offset)
        f.write(zstd.decompress(bundle.read(chunk.size)))
        bundle.close()
    f.close()
Esempio n. 4
0
    def _extract(self, file, file_path, raw=False):
        self._data.seek(file.offset, 0)
        this = self._data.bytes(file.compressed_file_size)
        # https://github.com/Pupix/lol-wad-parser/blob/2de5a9dafb77b7165b568316d5c1b1f8b5e898f2/lib/extract.js#L11
        # https://github.com/CommunityDragon/CDTB/blob/2663610ed10a2f5fdeeadc5860abca275bcd6af6/cdragontoolbox/wad.py#L82
        if file.type == 0:
            data = this
        elif file.type == 1:
            data = gzip.decompress(this)
        elif file.type == 2:
            data = BinaryReader(this)
            n = data.customize('<L')
            data.skip(4)
            re = data.bytes(4 + n).rstrip(b'\0').decode('utf-8')
            log.debug(f'文件重定向: {re}')
            return
        elif file.type == 3:
            data = zstd.decompress(this)
        else:
            raise ValueError(f"不支持的文件类型: {file.type}")

        if raw:
            return data
        else:
            file_dir = os.path.dirname(file_path)
            if not os.path.exists(file_dir):
                os.makedirs(file_dir)

            log.debug(f'提取文件: {file_path}')
            with open(file_path, 'wb+') as f:
                f.write(data)
            return file_path
Esempio n. 5
0
def _decompress(binary: bin) -> bin:
    """
    This function decompresses a binary using the scheme defined in the first byte of the input

    Args:
        binary (bin): a compressed binary

    Returns:
        bin: decompressed binary

    """

    # check the 1-byte header to check the compression scheme used
    compress_scheme = binary[0]

    # remove the 1-byte header from the input stream
    binary = binary[1:]
    # 1)  Decompress or return the original stream
    if compress_scheme == LZ4:
        return lz4.frame.decompress(binary)
    elif compress_scheme == ZSTD:
        return zstd.decompress(binary)
    elif compress_scheme == NO_COMPRESSION:
        return binary
    else:
        raise CompressionNotFoundException("compression scheme not found for"
                                           " compression code:" +
                                           str(compress_scheme))
Esempio n. 6
0
def handle_chunk_data(data: bytes) -> bytes:
    packet_chunk_data = PacketChunkData(data)
    packet_chunk_data.unpack_custom_packet_data()
    cached_ys = []
    for y, section in enumerate(packet_chunk_data.sections):
        # if section exists, put the data into database
        if section:
            coords = packet_chunk_data.get_coords_bytes(y)
            get_session_info('chunk_section_db').put(coords,
                                                     zstd.compress(section))
            cached_ys.append(y)

    # iter cached section mask
    for y in range(16):
        if packet_chunk_data.cached_section_mask.get(y):
            coords = packet_chunk_data.get_coords_bytes(y)
            packet_chunk_data.sections[y] = zstd.decompress(
                get_session_info('chunk_section_db').get(coords))

    # send ack packet
    if cached_ys:
        packet_chunk_data_ack = PacketChunkDataAck()

        packet_chunk_data_ack.dimension = get_session_info('dimension')
        packet_chunk_data_ack.chunk_x = packet_chunk_data.x
        packet_chunk_data_ack.chunk_z = packet_chunk_data.z
        for y in cached_ys:
            packet_chunk_data_ack.section_ys.append(y)

        try:
            send_packet(CHUNK_DATA_ACK, packet_chunk_data_ack.pack_packet())
        except OSError:
            pass

    return packet_chunk_data.pack_vanilla_packet_data()
Esempio n. 7
0
def idenLibProcessSignatures():
    global func_sigs
    global mainSigs
    for file in getFiles(symEx_dir):
        if not file.endswith(SIG_EXT):
            continue
        with open(file, 'rb') as ifile:
            sig = ifile.read()
            sig = zstd.decompress(sig).strip()
            sig = sig.split(b"\n")
            for line in sig:
                sig_opcodes, name = line.split(" ")
                if '_' in sig_opcodes:  # "main" signatures
                    opcodeMain, mainIndexes = sig_opcodes.split('_')
                    fromFunc, fromBase = mainIndexes.split("!")
                    mainSigs[opcodeMain] = (name.strip(), int(fromFunc),
                                            int(fromBase))
                elif '+' in sig_opcodes:
                    opcodes, strBranches = sig_opcodes.split('+')
                    nBranches = int(strBranches)
                    func_sigs[opcodes.strip()] = (name.strip(), nBranches)
                else:
                    func_sigs[sig_opcodes.strip()] = (name.strip(), 0)
    if not os.path.isdir(idenLib_appdata):
        os.mkdir(idenLib_appdata)
    pickle.dump(func_sigs, open(idenLibCache, "wb"))
    pickle.dump(mainSigs, open(idenLibCacheMain, "wb"))
    print("[idenLib] Signatures refreshed...\n")
Esempio n. 8
0
def dump_event(event_payload, idx, timestamp=None):
    evt = Event.GetRootAsEvent(event_payload, 0)
    m = evt.Meta()

    print(
        f'\t[{idx}] id:{m.Id().decode("utf-8")} type:{payload_name(m.PayloadType())} payload-size:{evt.PayloadLength()} encoding:{event_encoding_name(m.Encoding())} ts:{timestamp_to_datetime(timestamp)}'
    )

    payload = evt.PayloadAsNumpy()
    if m.Encoding() == EventEncoding.Zstd:
        payload = zstd.decompress(evt.PayloadAsNumpy())

    if m.PayloadType() == PayloadType.CB:
        parse_cb(payload)
    elif m.PayloadType() == PayloadType.CCB or m.PayloadType(
    ) == PayloadType.Slates:
        parse_multislot(payload)
    elif m.PayloadType() == PayloadType.Outcome:
        parse_outcome(payload)
    elif m.PayloadType() == PayloadType.CA:
        parse_continuous_action(payload)
    elif m.PayloadType() == PayloadType.DedupInfo:
        parse_dedup_info(payload)
    else:
        print('unknown payload type')
Esempio n. 9
0
 def helper_compression_level20(self):
     if sys.hexversion < 0x03000000:
         DATA = "This is must be very very long string to be compressed by zstd. AAAAAAAAAAAAARGGHHH!!! Just hope its enough length. И немного юникода."
     else:
         DATA = b"This is must be very very long string to be compressed by zstd. AAAAAAAAAAAAARGGHHH!!! Just hope its enough length." + " И немного юникода.".encode(
         )
     self.assertEqual(DATA, zstd.decompress(zstd.compress(DATA, 20)))
def dump_event_csv(event_payload, idx, timestamp=None, verbose=False, batch_idx=0):
    evt = Event.GetRootAsEvent(event_payload, 0)
    m = evt.Meta()
    message = {
        "batch": batch_idx,
        "id": m.Id().decode("utf-8"),
        "payload-size": evt.PayloadLength(),
        "encoding": event_encoding_name(m.Encoding()),
        "t": timestamp_to_datetime(timestamp),
    }

    payload = evt.PayloadAsNumpy()
    if m.Encoding() == EventEncoding.Zstd:
        payload = zstd.decompress(evt.PayloadAsNumpy())

    if m.PayloadType() == PayloadType.CB:
        fill_cb(payload, message)
    elif m.PayloadType() == PayloadType.CCB or m.PayloadType() == PayloadType.Slates:
        ...
    elif m.PayloadType() == PayloadType.Outcome:
        fill_outcome(payload, message)
    elif m.PayloadType() == PayloadType.CA:
        ...
    elif m.PayloadType() == PayloadType.DedupInfo:
        ...
    elif m.PayloadType() == PayloadType.MultiStep:
        fill_multistep(payload, message)
    else:
        ...
    return {"type": payload_name(m.PayloadType()), "message": message}
Esempio n. 11
0
    def extractPartition(self, index):
        """
        Extracts a partition from a compressed DZ file using ZLIB.
        self function could be particularly memory-intensive when used with large segments,
        as the entire compressed segment is loaded into RAM and decompressed.

        A better way to do self would be to chunk the zlib compressed data and decompress it
        with zlib.decompressor() and a while loop.

        I'm lazy though, and y'all have fast computers, so self is good enough.
        """

        currentPartition = self.partitions[index]

        # Seek to the beginning of the compressed data in the specified partition
        self.infile.seek(currentPartition['offset'])

        # Ensure that the output directory exists
        if not os.path.exists(self.outdir):
            os.makedirs(self.outdir)

        # Open the new file for writing
        outfile = open(os.path.join(self.outdir, currentPartition['name']),
                       'wb')

        # Read the whole compressed segment into RAM
        zdata = self.infile.read(currentPartition['length'])

        # Decompress the data, and write it to disk
        outfile.write(zstd.decompress(zdata))

        # Close the file
        outfile.close()
Esempio n. 12
0
    def helper_compression_negative_level(self):
        if zstd.ZSTD_version_number() < 10304:
            return raise_skip(
                "PyZstd was build with old version of ZSTD library (%s) without support of negative compression levels."
                % zstd.ZSTD_version())

        CDATA = zstd.compress(tDATA, -1)
        self.assertEqual(tDATA, zstd.decompress(CDATA))
Esempio n. 13
0
def receive_events(dataset):
    data = flask.request.data
    if flask.request.headers.get("Content-Encoding") == "zstd":
        data = zstd.decompress(data)
    data = json.loads(data)
    events[dataset].extend(data)
    resp = len(data) * [{"status": 202}]
    return flask.jsonify(resp)
Esempio n. 14
0
    def load_hlt(self):
        """
        TAKES HLT FILE AND RETURNS IN JSON FORMAT
        """
        data = zstd.decompress(open(self.replay_filename, "rb").read())     ## DECOMPRESS HLT FILE (COMPRESSED JSON)
        data_json = json.loads(data.decode())                   ## LOAD JSON

        return data_json
Esempio n. 15
0
    def process(self, data: Any) -> Any:
        """ Implements a Processor for decompressing model parameters. """

        output = pickle.loads(zstd.decompress(data))

        logging.info("[Server #%d] Decompressed received model parameters.",
                     self.server_id)

        return output
Esempio n. 16
0
def unpack_data(dump, w=None, h=None):
    if dump is None:
        return None
    decompressed = zs.decompress(dump)
    fast_decompressed = lz.decompress(decompressed)
    if fast_decompressed[:5] == b'Chain':
        return pickle.loads(fast_decompressed[5:])
    else:
        return np.reshape(np.frombuffer(fast_decompressed), newshape=(h, w))
Esempio n. 17
0
def decompress(compressed_value):
    """Zstandard decompress and deserialize the compressed value

    Args:
        compressed_value: Any bytes that was compressed using 'compress' function

    Returns: Decompressed and deserialized value if not None, else empty list as default value is returned

    """
    return zstd.decompress(compressed_value)
Esempio n. 18
0
def load_compressed_data(file_name):
    with open(file_name, 'rb') as f:
        compressed = f.read()

    data = pickle.loads(zstd.decompress(compressed))
    # data = compressed

    # comp_ctx = zstd.ZstdDecompressor()
    # data = pickle.loads(comp_ctx.decompress(compressed))
    # del comp_ctx
    return data
Esempio n. 19
0
def get_parsed(filename):
    with open(filename, 'rb') as handle:
        raw = handle.read()
    if filename.endswith('pickle'):
        return pickle.loads(zstd.decompress(raw))
    header, body = read_header_and_body(filename)
    return {
        'json': header,
        'binary': body.pop('header'),
        'commands': get_command_timeseries(body.pop('body')),
        'remaining': body,
    }
Esempio n. 20
0
def decode_replay(replay_file):
    """
    Decode replay file to python dict
    :param replay_file:
    :type replay_file: str
    :return:
    :rtype:
    """

    with open(replay_file, "rb") as rpfile:
        decoded_data = zstd.decompress(rpfile.read()).decode('utf-8')
        replay_data = json.loads(decoded_data.strip())
    return replay_data
Esempio n. 21
0
def get_replay_bytes(path):
    with open(path, "rb") as f:
        header = json.loads(f.readline().decode())
        buf = f.read()
        version = header.get("version", 1)

        if version == 1:
            # print("Version 1")
            decoded = base64.decodebytes(buf)
            decoded = decoded[4:]  # skip the decoded size
            extracted = zlib.decompress(decoded)
            return extracted
        elif version == 2:
            # print("Version 2")
            extracted = zstd.decompress(buf)
            return extracted
Esempio n. 22
0
    def poll_result_from_cache(self, task_id, timeout=10):
        """
        poll_result_from_cache after receiving a signal from waiting
        Args:
            task_id:
            timeout:

        Returns:

        """
        loop_times = int(timeout / self.max_polling_timeout)
        task_id = task_id.encode()
        if self.use_redis:
            for _ in range(loop_times):
                res_exists = self.cache.hexists(task_id, 'result')

                # if still no response yet, continue polling
                if not res_exists:
                    continue

                res = self.cache.hget(task_id, b'result')
                flight_time_response = self.cache.hget(task_id, b'flight_time_response')

                break
            else:
                raise TimeoutError
        else:
            for _ in range(loop_times):
                try:
                    res = self.cache[task_id]
                    flight_time_response = res[1]
                    res = res[0]
                    break
                except:
                    time.sleep(self.max_polling_timeout)
            else:
                raise TimeoutError

        if self.encrypt:
            res = self.encrypt.decrypt(res)

        if self.use_compression:
            res = zstd.decompress(res)

        res = self.parse_response(res)

        return res, flight_time_response
Esempio n. 23
0
    async def get_full_block_bytes(self, header_hash: bytes32) -> Optional[bytes]:
        cached = self.block_cache.get(header_hash)
        if cached is not None:
            log.debug(f"cache hit for block {header_hash.hex()}")
            return bytes(cached)
        log.debug(f"cache miss for block {header_hash.hex()}")
        async with self.db.execute(
            "SELECT block from full_blocks WHERE header_hash=?", (self.maybe_to_hex(header_hash),)
        ) as cursor:
            row = await cursor.fetchone()
        if row is not None:
            if self.db_wrapper.db_version == 2:
                return zstd.decompress(row[0])
            else:
                return row[0]

        return None
Esempio n. 24
0
    def get_decompressed_data(self, method_byte, compressed_hash,
                              extra_header_size):
        size_with_header = read_binary_uint32(self.stream)
        compressed_size = size_with_header - extra_header_size - 4

        compressed = BytesIO(self.stream.read(compressed_size))

        block_check = BytesIO()
        write_binary_uint8(method_byte, block_check)
        write_binary_uint32(size_with_header, block_check)
        block_check.write(compressed.getvalue())

        self.check_hash(block_check.getvalue(), compressed_hash)

        compressed = compressed.read(compressed_size - 4)

        return zstd.decompress(compressed)
    def deCryptoStart(self, password=""):
        finalCryptoFile = self.loadSecureFile()
        compressedCryptoFiles, cryptoRamdomKey = self.openPacketGenerator(
            finalCryptoFile)  #esta certo ate aqui
        files = self.openCryptoFiles(compressedCryptoFiles, cryptoRamdomKey,
                                     password)
        decompressCryptoFiles = zstd.decompress(files)
        self.deleteCryptoFiles()
        self.writeUnlockFiles(serializerFiles=decompressCryptoFiles)


# import time
# start = time.time()
# temp = EcryptoAdvanced()
# temp.start('RonaldLopes')
# # temp.deCryptoStart('RonaldLopes')
# end = time. time()
# print(end - start)
Esempio n. 26
0
def _decode_tile_layer_data(data: str, compression: str,
                            layer_width: int) -> List[List[int]]:
    """Decode Base64 Encoded tile data. Optionally supports gzip and zlib compression.

    Args:
        data: The base64 encoded data
        compression: Either zlib, gzip, or empty. If empty no decompression is done.

    Returns:
        List[List[int]]: A nested list containing the decoded data

    Raises:
        ValueError: For an unsupported compression type.
    """
    unencoded_data = base64.b64decode(data)
    if compression == "zlib":
        unzipped_data = zlib.decompress(unencoded_data)
    elif compression == "gzip":
        unzipped_data = gzip.decompress(unencoded_data)
    elif compression == "zstd" and zstd is None:
        raise ValueError("zstd compression support is not installed."
                         "To install use 'pip install pytiled-parser[zstd]'")
    elif compression == "zstd":
        unzipped_data = zstd.decompress(unencoded_data)
    else:
        unzipped_data = unencoded_data

    tile_grid: List[int] = []

    byte_count = 0
    int_count = 0
    int_value = 0
    for byte in unzipped_data:
        int_value += byte << (byte_count * 8)
        byte_count += 1
        if not byte_count % 4:
            byte_count = 0
            int_count += 1
            tile_grid.append(int_value)
            int_value = 0

    return _convert_raw_tile_layer_data(tile_grid, layer_width)
Esempio n. 27
0
def save_files(directory, data, file_headers, ignore=None):
    parser = Parser(data)
    if ignore is None:
        ignore = {}
    else:
        # convert list to dict for fast lookup
        ignore = {hash: True for hash in ignore}

    _five_percent_interval = int(len(file_headers) / 20.)
    for i, header in enumerate(file_headers):
        parser.seek(header['offset'])
        if header['compressed']:
            file_data = parser.raw(header['compressed_file_size'])
            try:
                file_data = gzip.decompress(file_data)
                #print('gzip:',file_data)
            except OSError: #if gzip wont work must be zstd
                try:
                    file_data = zstd.decompress(file_data)
                    #print('zstd:',file_data)
                except (OSError,ValueError): #if gzip or zstd wont work must be text of some sort (don't know)
                    file_data = file_data[4:]#remove the first 4 bytes useless
                    #print('OSError:',file_data)

        #assert header['sha256'] == hashlib.sha256(file_data).hexdigest()  # Make sure we have the correct data!

        filename = header['filename']
        ext = header['extension']
        if ext is not None and not filename.endswith(ext):
            filename = filename + '.' + ext
        filename = filename.split('/')
        filename = os.path.join(directory, *filename)

        if not ignore.get(header['file_hash'], False):
            #this is broken?
            try:
                extract_file(filename, file_data)
            except UnboundLocalError:
                pass
				
        #this is broken?
        '''
Esempio n. 28
0
def _decompress(compressed_input_bin: bin, compress_scheme=LZ4) -> bin:
    """
    This function decompresses a binary using LZ4

    Args:
        compressed_input_bin (bin): a compressed binary
        compress_scheme: the compression method to use

    Returns:
        bin: decompressed binary

    """
    if compress_scheme == LZ4:
        return lz4.frame.decompress(compressed_input_bin)
    elif compress_scheme == ZSTD:
        return zstd.decompress(compressed_input_bin)
    else:
        raise CompressionNotFoundException("compression scheme note found for"
                                           " compression code:" +
                                           str(compress_scheme))
Esempio n. 29
0
    def extract(self, paths: List[str], out_dir):
        """
        解包wad文件
        :param paths: 文件路径列表, 例如['assets/characters/aatrox/skins/base/aatrox.skn']
        :param out_dir: 输出文件夹
        :return:
        """
        for path in paths:
            path_hash = self.get_hash(path)
            for file in self.files:
                if path_hash == file.path_hash:

                    self._data.seek(file.offset, 0)
                    this = self._data.bytes(file.compressed_file_size)
                    # https://github.com/Pupix/lol-wad-parser/blob/2de5a9dafb77b7165b568316d5c1b1f8b5e898f2/lib/extract.js#L11
                    # https://github.com/CommunityDragon/CDTB/blob/2663610ed10a2f5fdeeadc5860abca275bcd6af6/cdragontoolbox/wad.py#L82
                    if file.type == 0:
                        data = this
                    elif file.type == 1:
                        data = gzip.decompress(this)
                    elif file.type == 2:
                        data = BinaryReader(this)
                        n = data.customize('<L')
                        data.skip(4)
                        re = data.bytes(4 + n).rstrip(b'\0').decode('utf-8')
                        log.debug(f'文件重定向: {re}')
                        continue
                    elif file.type == 3:
                        data = zstd.decompress(this)
                    else:
                        raise ValueError(f"不支持的文件类型: {file.type}")

                    file_path = os.path.join(out_dir, os.path.normpath(path))
                    file_dir = os.path.dirname(file_path)
                    if not os.path.exists(file_dir):
                        os.makedirs(file_dir)

                    log.debug(f'提取文件: {file_path}')
                    with open(file_path, 'wb+') as f:
                        f.write(data)
Esempio n. 30
0
def idenLib():
    # function sigs from the current binary
    func_bytes_addr = {}
    for addr, size in get_func_ranges():
        f_bytes = getOpcodes(addr, size)
        func_bytes_addr[f_bytes] = addr

    # load sigs
    func_sigs = {}
    ida_dir = ida_diskio.idadir("")
    symEx_dir = ida_dir + os.sep + "SymEx"
    if not os.path.isdir(symEx_dir):
        printf("[!] There is no {} directory".format(symEx_dir))
    else:
        for file in files(symEx_dir):
            with open(file, 'rb') as ifile:
                sig = ifile.read()
                sig = zstd.decompress(sig).strip()
                sig = sig.split(b"\r\n")
                for line in sig:
                    sig_opcodes, name = line.split(" ")
                    func_sigs[sig_opcodes.strip()] = name.strip()
    # apply sigs
    counter = 0
    for sig_opcodes, addr in func_bytes_addr.items():
        if func_sigs.has_key(sig_opcodes):
            func_name = func_sigs[sig_opcodes]
            current_name = ida_funcs.get_func_name(addr)
            if (current_name == func_name):
                continue
            digit = 1
            while func_name in get_names():
                func_name = func_name + str(digit)
                digit = digit + 1
            ida_name.set_name(addr, func_name, SN_NOCHECK)
            print("{}: {}".format(hex(addr), func_name))
            counter = counter + 1

    print("Applied to {} function(s)".format(counter))
Esempio n. 31
0
 def test_raw(self):
   DATA = b"abc def"
   self.assertEqual(DATA, zstd.decompress(zstd.compress(DATA)))