Exemple #1
0
    def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
              **kwargs):
        super().parse(data_in, conf)
        data_in.seek(72, SEEK_CUR)
        n_vertices = int.from_bytes(data_in.read(4), 'little')
        data_in.seek(8, SEEK_CUR)
        n_faces = int.from_bytes(data_in.read(4), 'little')

        if n_vertices > 1000 or n_faces > 1000:
            if conf.ignore_warnings:
                warnings.warn(
                    f"Too much vertices or faces ({n_vertices} vertices, {n_faces} faces)."
                    f"It is most probably caused by an inaccuracy in my reverse engineering of the models format."
                )
            else:
                raise Models3DWarning(data_in.tell(), n_vertices, n_faces)

        data_in.seek(4, SEEK_CUR)
        n_bounding_box_info = int.from_bytes(
            data_in.read(2), 'little') + int.from_bytes(
                data_in.read(2), 'little') + int.from_bytes(
                    data_in.read(2), 'little')

        if conf.game in (G.CROC_2_PS1, G.CROC_2_DEMO_PS1,
                         G.CROC_2_DEMO_PS1_DUMMY):
            data_in.seek(2, SEEK_CUR)
        elif conf.game in (G.HARRY_POTTER_1_PS1, G.HARRY_POTTER_2_PS1):
            data_in.seek(6, SEEK_CUR)

        return cls(n_vertices, n_faces, n_bounding_box_info)
    def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
              **kwargs):
        fallback_data = cls.fallback_parse_data(data_in)
        size, start = super().parse(data_in, conf)
        if conf.game == G.CROC_2_DEMO_PS1_DUMMY:
            has_legacy_textures = False
            titles = None
        else:
            tpsx_flags = int.from_bytes(data_in.read(4), 'little')
            has_translated_titles = tpsx_flags & 16 != 0
            has_legacy_textures = tpsx_flags & 8 != 0
            has_title_and_demo_mode_data = tpsx_flags & 4 != 0
            if has_title_and_demo_mode_data:
                if has_translated_titles:
                    n_titles = int.from_bytes(data_in.read(4), 'little')
                    titles = [
                        data_in.read(48).strip(b'\0').decode('latin1')
                        for _ in range(n_titles)
                    ]
                else:
                    titles = [data_in.read(32).strip(b'\0').decode('latin1')]
                data_in.seek(2052, SEEK_CUR)
            else:
                titles = None
        texture_file = TextureFile.parse(
            data_in,
            conf,
            has_legacy_textures=has_legacy_textures,
            end=start + size)

        cls.check_size(size, start, data_in.tell())
        return cls(texture_file, titles, fallback_data)
Exemple #3
0
    def __init__(self, file_obj: BufferedIOBase,
                 key_map: KeyMap,
                 init=False, *,
                 portion=3/4):
        self.file = file_obj
        self.maps = [key_map]
        self._portion = portion

        # set the first and last pointers
        file_obj.seek(0)
        if init:
            # empty pointers
            self._first = None
            self._last = None
            # start fill tracking
            self._filled = 0
        else:
            # read pointers
            self._first = int.from_bytes(file_obj.read(32), 'little')
            self._last = int.from_bytes(file_obj.read(32), 'little')
            # get current fill
            self._filled = int.from_bytes(file_obj.read(32), 'little')
        # add offset for pointers
        self._pos = 64

        # initialize key cache
        self._keys = {}

        # track if currently expanding db
        self._expansion = None

        # set up iterable variable
        self._current = None
Exemple #4
0
def readtimes(f: io.BufferedIOBase) -> List[float]:
    b = f.read(4)
    numof_times = struct.unpack_from("<L", b, 0)[0]
    times = [0.0 for _ in range(numof_times)]
    bs = f.read(8 * numof_times)
    for i, _ in enumerate(times):
        times[i] = struct.unpack_from("<d", bs, 8 * i)
    return times
Exemple #5
0
def parse_message_pairs(stream: BufferedIOBase):
    parser = intialize_parser(parse_message_pair)

    data = stream.read(1024)
    while data:
        for rr in parse(parser, data):
            yield rr
        data = stream.read(1024)
Exemple #6
0
def _read_data_frame(rfile: BufferedIOBase):
    frame = {}
    net_bytes = ord(rfile.read(1))
    frame["FIN"] = net_bytes >> 7
    frame["RSV1"] = (net_bytes & 0x40) >> 6
    frame["RSV2"] = (net_bytes & 0x20) >> 5
    frame["RSV3"] = (net_bytes & 0x10) >> 4
    frame["opcode"] = net_bytes & 0x0F

    if frame["RSV1"] != 0 or frame["RSV2"] != 0 or frame["RSV3"] != 0:
        raise FrameError(
            "Unsupported feature. RSV1, RSV2 or RSV3 has a non-zero value.",
            frame,
        )

    if not frame["opcode"] in OPCODE.values():
        raise FrameError("Unsupported opcode value.", frame)

    if frame["FIN"] == 0 and frame["opcode"] != OPCODE["continueation"]:
        raise FrameError(
            "FIN bit not set for a non-continueation frame.", frame
        )

    if frame["opcode"] in CONTROL_OPCODES and frame["FIN"] == 0:
        raise FrameError("FIN bit not set for a control frame.", frame)

    net_bytes = ord(rfile.read(1))
    mask_bit = net_bytes >> 7

    if mask_bit == 0:
        raise FrameError("Unmasked frame from client.", frame)

    length1 = net_bytes & 0x7F

    if frame["opcode"] in CONTROL_OPCODES and length1 > 125:
        raise FrameError("Control frame with invalid payload length.", frame)

    try:
        length = _read_payload_length(length1, rfile)
    except InvalidLengthError as error:
        raise FrameError(
            f"Invalid payload length of {error.length} bytes.", frame
        )

    masking_key = rfile.read(4)
    encoded_payload = rfile.read(length)
    frame["payload"] = _decode_payload(masking_key, encoded_payload)

    if frame["opcode"] == OPCODE["close"] and frame["payload"]:
        frame["status_code"] = int.from_bytes(
            frame["payload"][0:2], byteorder="big"
        )
        if length > 2:
            # /!\ may raise UnicodeError /!\
            frame["close_reason"] = frame["payload"][2:].decode()

    return frame
Exemple #7
0
def _read_payload_length(payload_length1: int, rfile: BufferedIOBase):
    final_length = payload_length1
    if payload_length1 == 126:
        final_length = int.from_bytes(rfile.read(2), byteorder="big")
    elif payload_length1 == 127:
        final_length = int.from_bytes(rfile.read(8), byteorder="big")
        if final_length >> 63 == 1:
            raise InvalidLengthError(final_length)
    return final_length
Exemple #8
0
    def read_object(cls, file: BufferedIOBase):
        new_key = file.read(cls.key_size)

        obj_size = file.read(cls.key_size)
        obj_size = int.from_bytes(obj_size, 'little')

        new_data = file.read(obj_size)

        return cls(new_key, new_data)
Exemple #9
0
 def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
           **kwargs):
     data_in.seek(4, SEEK_CUR)  # END section offset
     sampling_rate = int(
         round(
             ((int.from_bytes(data_in.read(2), 'little') * 44100) / 4096)))
     flags = DialoguesBGMsSoundFlags(
         int.from_bytes(data_in.read(2), 'little'))
     uk1 = data_in.read(4)
     size = int.from_bytes(data_in.read(4), 'little')
     return cls(sampling_rate, flags, uk1, size)
Exemple #10
0
def _extract_next_file(
        archive_file: io.BufferedIOBase) -> Iterator[Tuple[str, bytes]]:
    """Extracts the next available file from the archive.

  Reads the next available file header section and yields its filename and
  content in bytes as a tuple. Stops when there are no more available files in
  the provided archive_file.

  Args:
    archive_file: The archive file object, of which cursor is pointing to the
      next available file header section.

  Yields:
    The name and content of the next available file in the given archive file.

  Raises:
    RuntimeError: The archive_file is in an unknown format.
  """
    while True:
        header = archive_file.read(60)
        if not header:
            return
        elif len(header) < 60:
            raise RuntimeError('Invalid file header format.')

        # For the details of the file header format, see:
        # https://en.wikipedia.org/wiki/Ar_(Unix)#File_header
        # We only need the file name and the size values.
        name, _, _, _, _, size, end = struct.unpack('=16s12s6s6s8s10s2s',
                                                    header)
        if end != b'`\n':
            raise RuntimeError('Invalid file header format.')

        # Convert the bytes into more natural types.
        name = name.decode('ascii').strip()
        size = int(size, base=10)
        odd_size = size % 2 == 1

        # Handle the extended filename scheme.
        if name.startswith('#1/'):
            filename_size = int(name[3:])
            name = archive_file.read(filename_size).decode('utf-8').strip(
                ' \x00')
            size -= filename_size

        file_content = archive_file.read(size)
        # The file contents are always 2 byte aligned, and 1 byte is padded at the
        # end in case the size is odd.
        if odd_size:
            archive_file.read(1)

        yield (name, file_content)
Exemple #11
0
 def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
           **kwargs):
     sampling_rate = int.from_bytes(data_in.read(4), 'little')
     data_in.seek(
         2,
         SEEK_CUR)  # "Compressed" sampling rate, see SPSX's documentation
     volume_level = int.from_bytes(data_in.read(2), 'little')
     flags = SoundEffectsAmbientFlags(
         int.from_bytes(data_in.read(4), 'little'))
     uk1 = data_in.read(2)
     uk2 = data_in.read(2)
     size = int.from_bytes(data_in.read(4), 'little')
     return cls(sampling_rate, volume_level, flags, uk1, uk2, size)
Exemple #12
0
def extract(meta: io.BufferedIOBase, binFile: io.BufferedIOBase,
            rmdp: io.BufferedIOBase, filter, target):
    pc_bin = 0
    fmt = '>'
    array = binFile.read(1)
    if array[0] == 0:
        pc_bin = 1
        fmt = '<'
    file_version, num_dirs, num_files = struct.unpack(fmt + 'iii',
                                                      binFile.read(4 * 3))

    binFile.seek(0x9D)

    dirs = []
    full_names = []
    for i in range(num_dirs):
        _, parent, _, dirname, _, _, _ = _read(binFile, fmt + 'qqiqiqq')
        dirs.append((parent, dirname))

    r = []
    for i in range(num_files):
        _, dir_index, _, filename_offset, content_offset, content_length = _read(
            binFile, fmt + 'qqiqqq')
        _skip(binFile, 16)

        r.append((filename_offset, content_offset, content_length, dir_index))

    _skip(binFile, 44)

    data_start = binFile.tell()

    for parent, dirname_offset in dirs:
        if dirname_offset == -1:
            full_names.append(target)
        else:
            dirname = read_text(binFile, data_start + dirname_offset)
            parent = full_names[parent]
            full_names.append(os.path.join(parent, dirname))

    for i, (filename_offset, content_offset, content_length,
            dir_index) in enumerate(r):
        filename = read_text(binFile, data_start + filename_offset)
        print(i, '/', len(r), end='\r')
        dirname = full_names[dir_index]
        os.makedirs(dirname, exist_ok=True)
        full_name = os.path.join(dirname, filename)
        with open(full_name, 'wb') as outf:
            outf.write(copy_data(rmdp, content_offset, content_length))
    def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
              **kwargs):
        c1x, c1y, palette_info, c2x, c2y, flags, c3x, c3y, c4x, c4y = cls.struct.unpack(
            data_in.read(12))
        flags: TextureFlags = TextureFlags(flags)
        # Raw coordinates are contained in a 1024x1024, 512x1024 or 256x1024 space
        # (16-colors paletted, 256-colors paletted and non-paletted high color respectively)
        raw_coords = [(c1x, c1y), (c2x, c2y), (c3x, c3y), (c4x, c4y)]

        # Coordinates Mapping, needed to put the coordinates in the right order
        # (top-left, top-right, bottom-left then bottom-right)
        if raw_coords[0][0] > raw_coords[1][0]:
            if raw_coords[0][1] > raw_coords[2][1]:
                cm = 3, 2, 1, 0
            else:
                cm = 1, 0, 3, 2
        else:
            if raw_coords[0][1] > raw_coords[2][1]:
                cm = 2, 3, 0, 1
            else:
                cm = 0, 1, 2, 3

        palette_start = None
        if TextureFlags.IS_NOT_PALETTED not in flags:
            palette_start = ((palette_info & 0xFFC0) << 3) + (
                (palette_info & 0xF) << 5)

        # The top-left x coordinate of the 256-colors or high color textures needs to be corrected
        # 1024x1024 space -> 512x1024 or 256x1024 space respectively
        return cls(flags, raw_coords, cm, palette_start)
    def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
              **kwargs):
        n_unique_level_sfx: int = kwargs['n_unique_level_sfx']

        mapping = np.array([[ord(data_in.read(1)) for _ in range(16)]
                            for _ in range(n_unique_level_sfx)], np.uint8)
        return cls(mapping=mapping)
Exemple #15
0
 def _iter_file_chunk(cls, fobj: io.BufferedIOBase,
                      chunk_size: int) -> typing.Iterable[bytes]:
     while True:
         data = fobj.read(chunk_size)
         if len(data) == 0:
             break
         yield data
Exemple #16
0
 def read(self, io_in: BufferedIOBase, otherfields: Dict[str, Any]) -> Optional[Sig]:
     val = io_in.read(64)
     if len(val) == 0:
         return None
     elif len(val) != 64:
         raise ValueError('{}: not enough remaining'.format(self))
     return Sig(val)
 def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
           **kwargs):
     data_in.seek(4, SEEK_CUR)  # Group header offset
     n_sound_effects = int.from_bytes(data_in.read(4), 'little')
     data_in.seek(
         8, SEEK_CUR
     )  # End offset (4 bytes) | Sum of group VAGs' sizes (4 bytes)
     return cls(n_sound_effects=n_sound_effects)
Exemple #18
0
def extract(file: io.BufferedIOBase, target: io.TextIOBase):
    mn = file.read(2)

    if mn == b'\x95\x30':
        file.read(2)
    else:
        raise NotImplementedError(mn)

    while True:
        keylenb = file.read(4)
        if not keylenb:
            break
        keylen = struct.unpack('I', keylenb)[0]
        key = file.read(keylen).decode('ascii')
        vallen = struct.unpack('I', file.read(4))[0]
        val = file.read(vallen * 2).decode('utf-16le')
        # print(key, val)
        RES[key] = val
        # if key.startswith('opcon_doc_') and key.endswith('DESCRIPTION1'):
        #     if key in IMGS:
        #         target.write(
        #             f'<img src="./result/data_pc/uiresources/p7/images/4k/images/narrative/{IMGS[key]}.tex.png">\n')
        #         IMGS.pop(key)
        #     target.write(val)
        #     target.write('<div class="page-break"></div>')

    json.dump(RES, target, indent=4)
Exemple #19
0
 def read(self, io_in: BufferedIOBase, otherfields: Dict[str, Any]) -> None:
     binval = io_in.read()
     if len(binval) > self.maxbytes:
         raise ValueError('{} is too long for {}'.format(binval.hex(), self.name))
     if len(binval) > 0 and binval[0] == 0:
         raise ValueError('{} encoding is not minimal: {}'
                          .format(self.name, binval.hex()))
     # Pad with zeroes and convert as u64
     return struct.unpack_from('>Q', bytes(8 - len(binval)) + binval)[0]
Exemple #20
0
def reencrypt(ctx: click.Context, keyfile: BufferedIOBase) -> None:
    """
    Re-encrypt share with receiver key.

    Decrypt a user's share with their private key KEYFILE and re-encrypt it with the receiver's
    public key.  The re-encrypted share is written to {datadir}/reencrypted/{username}.
    """
    pvss = pvss_from_datadir(ctx.obj)
    reencrypted_share = pvss.reencrypt_share(keyfile.read())
    write_public_random(ctx.obj / "reencrypted", reencrypted_share)
    def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
              **kwargs):
        super().parse(data_in, conf)
        has_legacy_textures: bool = kwargs['has_legacy_textures']
        end: int = kwargs['end']
        rle = conf.game in (G.CROC_2_PS1, G.CROC_2_DEMO_PS1,
                            G.HARRY_POTTER_1_PS1, G.HARRY_POTTER_2_PS1)

        textures: List[TextureData] = []
        n_textures: int = int.from_bytes(data_in.read(4), 'little')
        n_rows: int = int.from_bytes(data_in.read(4), 'little')

        if n_textures > 4000 or 0 > n_rows > 4:
            if conf.ignore_warnings:
                warnings.warn(
                    f"Too much textures ({n_textures}, or incorrect row count {n_rows}."
                    f"It is most probably caused by an inaccuracy in my reverse engineering of the textures format."
                )
            else:
                raise TexturesWarning(data_in.tell(), n_textures, n_rows)

        # In Harry Potter, the last 16 textures are empty (full of 00 bytes)
        n_stored_textures = n_textures - 16 if conf.game in (
            G.HARRY_POTTER_1_PS1, G.HARRY_POTTER_2_PS1) else n_textures
        for texture_id in range(n_stored_textures):
            textures.append(TextureData.parse(data_in, conf))
        if conf.game in (G.HARRY_POTTER_1_PS1, G.HARRY_POTTER_2_PS1):
            data_in.seek(192, SEEK_CUR)  # 16 textures x 12 bytes
        n_idk_yet_1 = int.from_bytes(data_in.read(4), 'little')
        n_idk_yet_2 = int.from_bytes(data_in.read(4), 'little')
        data_in.seek(n_idk_yet_1 * cls.image_header_size, SEEK_CUR)

        if has_legacy_textures:  # Patch for legacy textures, see Textures documentation
            data_in.seek(15360, SEEK_CUR)
        if rle:
            raw_textures = BytesIO(cls.image_bytes_size * b'\x00')
            while data_in.tell() < end:
                run = int.from_bytes(data_in.read(cls.rle_size),
                                     'little',
                                     signed=True)
                if run < 0:
                    raw_textures.write(abs(run) * data_in.read(cls.rle_size))
                elif run > 0:
                    raw_textures.write(data_in.read(cls.rle_size * run))
                else:
                    raise ZeroRunLengthError(data_in.tell())
            raw_textures.seek(0)
            textures_data = raw_textures.read()
            raw_textures.close()
            if conf.game == G.CROC_2_DEMO_PS1:  # Patch for Croc 2 Demo (non-dummy) last end offset error
                data_in.seek(-2, SEEK_CUR)
        else:
            image_size = n_rows * (cls.image_bytes_size // 4)
            padding_size = cls.image_bytes_size - image_size
            textures_data = data_in.read(image_size) + padding_size * b'\x00'
        legacy_alpha = conf.game in (G.CROC_2_DEMO_PS1,
                                     G.CROC_2_DEMO_PS1_DUMMY)
        return cls(n_rows, textures_data, legacy_alpha, textures)
Exemple #22
0
def reconstruct(ctx: click.Context, keyfile: BufferedIOBase,
                secretfile: Path) -> None:
    """
    Reconstruct the secret.

    Decrypt re-encrypted shares with the receiver's private key KEYFILE and
    join the shares to reconstruct the secret.  It is written into SECRETFILE.
    """
    pvss = pvss_from_datadir(ctx.obj)
    secret = pvss.reconstruct_secret(keyfile.read())
    write_private(secretfile, secret)
Exemple #23
0
    def __fill_buf(self, f_in: BufferedIOBase) -> bool:
        next_bytes = f_in.read(self.read_len)
        if len(next_bytes) == 0:
            return False

        cur_process = f_in.tell() * 100 // self.length
        if cur_process != self.process:
            self.process = cur_process
            print(str(cur_process) + "%")
        self.buf.extend(next_bytes)
        return True
Exemple #24
0
    def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
              **kwargs):
        fallback_data = cls.fallback_parse_data(data_in)
        size, start = super().parse(data_in, conf)
        n_zones = int.from_bytes(data_in.read(4), 'little')
        n_idk1 = int.from_bytes(data_in.read(4), 'little')
        idk1 = [data_in.read(32) for _ in range(n_idk1)]
        n_chunks_per_zone = []
        for _ in range(n_zones):
            data_in.seek(2, SEEK_CUR)
            n_chunks_per_zone.append(data_in.read(1)[0])
            data_in.seek(9, SEEK_CUR)
        chunks_zones = []
        for n_chunks in n_chunks_per_zone:
            chunks_zones.append([
                int.from_bytes(data_in.read(2), 'little')
                for _ in range(n_chunks)
            ])

        cls.check_size(size, start, data_in.tell())
        return cls(idk1, chunks_zones, fallback_data)
Exemple #25
0
def _read_to_memory(stream: io.BufferedIOBase,
                    bytes_to_read: int) -> Optional[bytes]:
    data = io.BytesIO()
    while bytes_to_read > 0:
        chunk = stream.read(bytes_to_read)
        if len(chunk) == 0:
            return None

        data.write(chunk)
        bytes_to_read = bytes_to_read - len(chunk)

    return data.getvalue()
    def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args, **kwargs):
        fallback_data = super().fallback_parse_data(data_in)
        size, start = super().parse(data_in, conf)
        idk1 = data_in.read(4)
        n_idk_unique_textures = int.from_bytes(data_in.read(4), 'little')

        if conf.game != G.CROC_2_DEMO_PS1_DUMMY:
            data_in.seek(2048, SEEK_CUR)
        else:
            data_in.seek(2052, SEEK_CUR)

        n_models_3d = int.from_bytes(data_in.read(4), 'little')
        models_3d = [Model3DData.parse(data_in, conf) for _ in range(n_models_3d)]

        n_animations = int.from_bytes(data_in.read(4), 'little')
        animations = [AnimationData.parse(data_in, conf) for _ in range(n_animations)]

        if conf.game in (G.CROC_2_PS1, G.CROC_2_DEMO_PS1):
            n_dpsx_legacy_textures = int.from_bytes(data_in.read(4), 'little')
            data_in.seek(n_dpsx_legacy_textures * 3072, SEEK_CUR)

        n_scripts = int.from_bytes(data_in.read(4), 'little')
        scripts = [ScriptData.parse(data_in, conf) for _ in range(n_scripts)]

        level_file = LevelFile.parse(data_in, conf)

        # FIXME End of Croc 2 & Croc 2 Demo Dummies' level files aren't reversed yet
        if conf.game not in (G.CROC_2_PS1, G.CROC_2_DEMO_PS1_DUMMY):
            cls.check_size(size, start, data_in.tell())
        return cls(models_3d, animations, scripts, level_file, fallback_data)
Exemple #27
0
 def read(self, buffer: io.BufferedIOBase, bit_count: int, fmt: str):
     max_bit_count = 8 * struct.calcsize(fmt)
     if self._field == "" or fmt != self._fmt or self._offset + bit_count > max_bit_count:
         # Consume (and reverse) new bit field. Any previous bit field is discarded.
         (integer, ) = struct.unpack(fmt, buffer.read(struct.calcsize(fmt)))
         self._field = format(integer, f"0{max_bit_count}b")[::-1]
         self._fmt = fmt
     binary_str = self._field[self._offset:self._offset + bit_count][::-1]
     self._offset += bit_count
     if self._offset % max_bit_count == 0:  # read new field next time
         self._field = ""
         self._offset = 0
     return int(binary_str, 2)
Exemple #28
0
 def read(io_in: BufferedIOBase, otherfields: Dict[str, Any] = {}) -> Optional[int]:
     "Returns value, or None on EOF"
     b = io_in.read(1)
     if len(b) == 0:
         return None
     if b[0] < 253:
         return int(b[0])
     elif b[0] == 253:
         return try_unpack('BigSize', io_in, '>H', empty_ok=False)
     elif b[0] == 254:
         return try_unpack('BigSize', io_in, '>I', empty_ok=False)
     else:
         return try_unpack('BigSize', io_in, '>Q', empty_ok=False)
    def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
              **kwargs):
        super().parse(data_in, conf)
        n_flags: int = int.from_bytes(data_in.read(4), 'little')
        data_in.seek(4, SEEK_CUR)
        n_total_frames: int = int.from_bytes(data_in.read(4), 'little')
        has_additional_frame_data_value = int.from_bytes(
            data_in.read(4), 'little')
        has_additional_data: bool = has_additional_frame_data_value == 0
        n_stored_frames = 0
        if conf.game in (G.CROC_2_PS1, G.CROC_2_DEMO_PS1,
                         G.CROC_2_DEMO_PS1_DUMMY):
            n_inter_frames = int.from_bytes(data_in.read(4), 'little')
            if n_inter_frames != 0:
                n_stored_frames = n_total_frames
            data_in.seek(4, SEEK_CUR)
        else:  # Harry Potter 1 & 2
            data_in.seek(8, SEEK_CUR)
            n_inter_frames = 0
        n_vertex_groups: int = int.from_bytes(data_in.read(4), 'little')
        data_in.seek(4, SEEK_CUR)

        if conf.game in (G.HARRY_POTTER_1_PS1, G.HARRY_POTTER_2_PS1):
            n_stored_frames = int.from_bytes(data_in.read(4), 'little')
            data_in.seek(12, SEEK_CUR)

        flags = [data_in.read(4) for _ in range(n_flags)]
        if has_additional_data:
            data_in.seek(8 * n_total_frames, SEEK_CUR)
        data_in.seek(4 * n_total_frames, SEEK_CUR)  # Total frames info
        data_in.seek(n_inter_frames * cls.inter_frames_header_size,
                     SEEK_CUR)  # Inter-frames header
        if conf.game in (G.HARRY_POTTER_1_PS1,
                         G.HARRY_POTTER_2_PS1) or n_inter_frames != 0:
            data_in.seek(4 * n_stored_frames, SEEK_CUR)  # Stored frames info

        if n_stored_frames == 0 or n_inter_frames != 0:  # Rotation matrices
            old_animation_format = True
            n_stored_frames = n_total_frames
        else:  # Unit quaternions
            old_animation_format = False

        if n_total_frames > 500 or n_total_frames == 0:
            if conf.ignore_warnings:
                warnings.warn(
                    f"Too much frames in animation (or no frame): {n_total_frames} frames."
                    f"It is most probably caused by an inaccuracy in my reverse engineering of the textures format."
                )
            else:
                raise AnimationsWarning(data_in.tell(), n_total_frames)
        return cls(n_total_frames, n_stored_frames, n_vertex_groups, n_flags,
                   has_additional_data, flags, old_animation_format,
                   n_inter_frames)
Exemple #30
0
def try_unpack(name: str,
               io_out: BufferedIOBase,
               structfmt: str,
               empty_ok: bool) -> Optional[int]:
    """Unpack a single value using struct.unpack.

If need_all, never return None, otherwise returns None if EOF."""
    b = io_out.read(struct.calcsize(structfmt))
    if len(b) == 0 and empty_ok:
        return None
    elif len(b) < struct.calcsize(structfmt):
        raise ValueError("{}: not enough bytes", name)

    return struct.unpack(structfmt, b)[0]
Exemple #31
0
def _check_archive_signature(archive_file: io.BufferedIOBase) -> None:
    """Checks if the file has the correct archive header signature.

  The cursor is moved to the first available file header section after
  successfully checking the signature.

  Args:
    archive_file: The archive file object pointing at its beginning.

  Raises:
    RuntimeError: The archive signature is invalid.
  """
    signature = archive_file.read(8)
    if signature != b'!<arch>\n':
        raise RuntimeError('Invalid archive file format.')
Exemple #32
0
 def _pipe_writer(self, source: io.BufferedIOBase) -> None:
     while self._process:
         # arbitrarily large read size
         data = source.read(8192)
         if not data:
             self._process.terminate()
             return
         try:
             if self._stdin is not None:
                 self._stdin.write(data)
         except Exception:
             _log.debug('Write error for %s, this is probably not a problem', self, exc_info=True)
             # at this point the source data is either exhausted or the process is fubar
             self._process.terminate()
             return
def read_content(f: io.BufferedIOBase, count=None, batch_size=_DEFAULT_BATCH_SIZE):
    while True:
        values_to_read = batch_size if count is None else min(count, batch_size)
        b = f.read(values_to_read * _VALUE_SIZE)
        if not b:
            return

        arr = _make_array()
        arr.frombytes(b)
        yield from arr
        if count is not None:
            count -= len(arr)

        if count == 0:
            return
Exemple #34
0
	def __init__(self, stream: io.BufferedIOBase, new: Version = None):
		"""Parse a MIX from `stream`, which must be a buffered file object.
		
		If `new` is not None, initialize an empty MIX of this version instead.
		MixParseError is raised on parsing errors.
		"""
		
		# Initialize mandatory attributes
		self._dirty = False
		self._stream = None
		self._open = []
		
		# If stream is, for example, a raw I/O object, files could be destroyed
		# without ever raising an error, so check this.
		if not isinstance(stream, io.BufferedIOBase):
			raise TypeError("`stream` must be an instance of io.BufferedIOBase")
		
		if not stream.readable():
			raise ValueError("`stream` must be readable")
		
		if not stream.seekable():
			raise ValueError("`stream` must be seekable")
		
		if new is not None:
			# Start empty (new file)
			if type(new) is not Version:
				raise TypeError("`new` must be a Version enumeration member or None")
			if version is Version.RG:
				raise NotImplementedError("RG MIX files are not yet supported")
			self._stream = stream
			self._index = {}
			self._contents = []
			self._version = version
			self._flags = 0
			return
		
		# Parse an existing file
		filesize = stream.seek(0, io.SEEK_END)
		if filesize <= 6:
			raise MixParseError("File too small")
		stream.seek(0)
		
		first4 = stream.read(4)
		if first4 == b"MIX1":
			raise NotImplementedError("RG MIX files are not yet supported")
		elif first4[:2] == b"\x00\x00":
			# It seems we have a RA or TS MIX so check the flags
			flags = int.from_bytes(first4[2:], "little")
			if flags > 3:
				raise MixParseError("Unsupported properties")
			if flags & 2:
				raise NotImplementedError("Encrypted MIX files are not yet supported")
			
			# FIXME HERE: 80 bytes of westwood key_source if encrypted,
			#             to create a 56 byte long blowfish key from it.
			#
			#             They are followed by a number of 8 byte blocks,
			#             the first of them decrypting to filecount and bodysize.
			
			# Encrypted TS MIXes have a key.ini we can check for later,
			# so at this point assume Version.TS only if unencrypted.
			# Stock RA MIXes seem to be always encrypted.
			version = Version.TS
			
			# RA/TS MIXes hold their filecount after the flags,
			# whilst for TD MIXes their first two bytes are the filecount.
			filecount = int.from_bytes(stream.read(2), "little")
		else:
			version = Version.TD
			flags = 0
			filecount = int.from_bytes(first4[:2], "little")
			stream.seek(2)
			
		# From here it's the same for every unencrypted MIX
		bodysize    = int.from_bytes(stream.read(4), "little")
		indexoffset = stream.tell()
		indexsize   = filecount * 12
		bodyoffset  = indexoffset + indexsize

		# Check if data is sane
		# FIXME: Checksummed MIXes have 20 additional bytes after the body.
		if filesize - bodyoffset != bodysize:
			raise MixParseError("Incorrect filesize or invalid header")

		# OK, time to read the index
		index = {}
		for key, offset, size in struct.iter_unpack("<LLL", stream.read(indexsize)):
			offset += bodyoffset
			
			if offset + size > filesize:
				raise MixParseError("Content extends beyond end of file")

			index[key] = _MixNode(key, offset, size, size, None)

		if len(index) != filecount:
			raise MixParseError("Duplicate key")

		# Now read the names
		# TD/RA: 1422054725; TS: 913179935
		for dbkey in (1422054725, 913179935):
			if dbkey in index:
				stream.seek(index[dbkey].offset)
				header = stream.read(32)

				if header != b"XCC by Olaf van der Spek\x1a\x04\x17'\x10\x19\x80\x00":
					continue

				dbsize  = int.from_bytes(stream.read(4), "little")  # Total filesize

				if dbsize != index[dbkey].size or dbsize > 16777216:
					raise MixParseError("Invalid name table")

				# Skip four bytes for XCC type; 0 for LMD, 2 for XIF
				# Skip four bytes for DB version; Always zero
				stream.seek(8, io.SEEK_CUR)
				gameid = int.from_bytes(stream.read(4), "little")  # XCC Game ID
				
				# XCC saves alias numbers, so converting them
				# to `Version` is not straight forward.
				# FIXME: Check if Dune games and Nox also use MIX files
				if gameid == 0:
					if version is not Version.TD:
						continue
				elif gameid == 1:
					version = Version.RA
				elif 2 <= gameid <= 6 or gameid == 15:
					version = Version.TS
				else:
					continue
				
				namecount = int.from_bytes(stream.read(4), "little")
				bodysize  = dbsize - 53  # Size - Header - Last byte
				namelist  = stream.read(bodysize).split(b"\x00") if bodysize else []
				
				if len(namelist) != namecount:
					raise MixParseError("Invalid name table")
				
				# Remove Database from index
				del index[dbkey]
				
				# Add names to index
				names = False
				for name in namelist:
					name = name.decode(ENCODING, "ignore")
					key = genkey(name, version)
					if key in index:
						index[key].name = name
						names = True
				
				# XCC sometimes puts two Databases in a file by mistake,
				# so if no names were found, give it another try
				if names: break

		# Create a sorted list of all contents
		contents = sorted(index.values(), key=lambda node: node.offset)

		# Calculate alloc values
		# This is the size up to wich a file may grow without needing a move
		for i in range(len(contents) - 1):
			contents[i].alloc = contents[i+1].offset - contents[i].offset

			if contents[i].alloc < contents[i].size:
				raise MixParseError("Overlapping file boundaries")

		# Populate the object
		self._stream = stream
		self._version = version
		self._index = index
		self._contents = contents
		self._flags = flags