Exemple #1
0
    def __init__(self, file_obj: BufferedIOBase,
                 key_map: KeyMap,
                 init=False, *,
                 portion=3/4):
        self.file = file_obj
        self.maps = [key_map]
        self._portion = portion

        # set the first and last pointers
        file_obj.seek(0)
        if init:
            # empty pointers
            self._first = None
            self._last = None
            # start fill tracking
            self._filled = 0
        else:
            # read pointers
            self._first = int.from_bytes(file_obj.read(32), 'little')
            self._last = int.from_bytes(file_obj.read(32), 'little')
            # get current fill
            self._filled = int.from_bytes(file_obj.read(32), 'little')
        # add offset for pointers
        self._pos = 64

        # initialize key cache
        self._keys = {}

        # track if currently expanding db
        self._expansion = None

        # set up iterable variable
        self._current = None
Exemple #2
0
    def unpack(self, buffer: io.BufferedIOBase, bounding_box_has_unknown: bool = None):
        data = self.STRUCT.unpack(buffer)
        mesh_offset = buffer.tell()

        bounding_box_offset = data.pop("__bounding_box_offset")
        if bounding_box_offset != 0:
            buffer.seek(bounding_box_offset)
            self.bounding_box = BoundingBoxWithUnknown(buffer) if bounding_box_has_unknown else BoundingBox(buffer)
        else:
            self.bounding_box = None

        buffer.seek(data.pop("__bone_offset"))
        bone_count = data.pop("__bone_count")
        self.bone_indices = list(unpack_from_buffer(buffer, f"<{bone_count}i",))

        buffer.seek(data.pop("__face_set_offset"))
        face_set_count = data.pop("__face_set_count")
        self._face_set_indices = list(unpack_from_buffer(buffer, f"<{face_set_count}i"))

        buffer.seek(data.pop("__vertex_buffer_offset"))
        vertex_count = data.pop("__vertex_buffer_count")
        self._vertex_buffer_indices = list(unpack_from_buffer(buffer, f"<{vertex_count}i"))

        buffer.seek(mesh_offset)
        self.set(**data)
Exemple #3
0
    def unpack(
        self,
        buffer: io.BufferedIOBase,
        unicode: bool = None,
        version: Version = None,
        gx_lists: list[GXList] = None,
        gx_list_indices: dict[int, int] = None,
    ):
        if any(var is None
               for var in (unicode, version, gx_lists, gx_list_indices)):
            raise ValueError(
                "Not all required keywords were passed to `Material.unpack()`."
            )

        data = self.STRUCT.unpack(buffer)
        encoding = "utf-16-le" if unicode else "shift_jis_2004"
        self.name = read_chars_from_buffer(buffer,
                                           offset=data.pop("__name_offset"),
                                           encoding=encoding)
        self.mtd_path = read_chars_from_buffer(
            buffer, offset=data.pop("__mtd_path_offset"), encoding=encoding)
        gx_offset = data.pop("__gx_offset")
        if gx_offset == 0:
            self.gx_index = -1
        elif gx_offset in gx_list_indices:
            self.gx_index = gx_list_indices[gx_offset]
        else:
            gx_list_indices[gx_offset] = len(gx_lists)
            material_offset = buffer.tell()
            buffer.seek(gx_offset)
            gx_lists.append(GXList(buffer, version))
            buffer.seek(material_offset)
        self.set(**data)
Exemple #4
0
    def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
              **kwargs):
        super().parse(data_in, conf)
        data_in.seek(72, SEEK_CUR)
        n_vertices = int.from_bytes(data_in.read(4), 'little')
        data_in.seek(8, SEEK_CUR)
        n_faces = int.from_bytes(data_in.read(4), 'little')

        if n_vertices > 1000 or n_faces > 1000:
            if conf.ignore_warnings:
                warnings.warn(
                    f"Too much vertices or faces ({n_vertices} vertices, {n_faces} faces)."
                    f"It is most probably caused by an inaccuracy in my reverse engineering of the models format."
                )
            else:
                raise Models3DWarning(data_in.tell(), n_vertices, n_faces)

        data_in.seek(4, SEEK_CUR)
        n_bounding_box_info = int.from_bytes(
            data_in.read(2), 'little') + int.from_bytes(
                data_in.read(2), 'little') + int.from_bytes(
                    data_in.read(2), 'little')

        if conf.game in (G.CROC_2_PS1, G.CROC_2_DEMO_PS1,
                         G.CROC_2_DEMO_PS1_DUMMY):
            data_in.seek(2, SEEK_CUR)
        elif conf.game in (G.HARRY_POTTER_1_PS1, G.HARRY_POTTER_2_PS1):
            data_in.seek(6, SEEK_CUR)

        return cls(n_vertices, n_faces, n_bounding_box_info)
    def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
              **kwargs):
        fallback_data = cls.fallback_parse_data(data_in)
        size, start = super().parse(data_in, conf)
        if conf.game == G.CROC_2_DEMO_PS1_DUMMY:
            has_legacy_textures = False
            titles = None
        else:
            tpsx_flags = int.from_bytes(data_in.read(4), 'little')
            has_translated_titles = tpsx_flags & 16 != 0
            has_legacy_textures = tpsx_flags & 8 != 0
            has_title_and_demo_mode_data = tpsx_flags & 4 != 0
            if has_title_and_demo_mode_data:
                if has_translated_titles:
                    n_titles = int.from_bytes(data_in.read(4), 'little')
                    titles = [
                        data_in.read(48).strip(b'\0').decode('latin1')
                        for _ in range(n_titles)
                    ]
                else:
                    titles = [data_in.read(32).strip(b'\0').decode('latin1')]
                data_in.seek(2052, SEEK_CUR)
            else:
                titles = None
        texture_file = TextureFile.parse(
            data_in,
            conf,
            has_legacy_textures=has_legacy_textures,
            end=start + size)

        cls.check_size(size, start, data_in.tell())
        return cls(texture_file, titles, fallback_data)
 def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
           **kwargs):
     data_in.seek(4, SEEK_CUR)  # Group header offset
     n_sound_effects = int.from_bytes(data_in.read(4), 'little')
     data_in.seek(
         8, SEEK_CUR
     )  # End offset (4 bytes) | Sum of group VAGs' sizes (4 bytes)
     return cls(n_sound_effects=n_sound_effects)
Exemple #7
0
 def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
           **kwargs):
     data_in.seek(4, SEEK_CUR)  # END section offset
     sampling_rate = int(
         round(
             ((int.from_bytes(data_in.read(2), 'little') * 44100) / 4096)))
     flags = DialoguesBGMsSoundFlags(
         int.from_bytes(data_in.read(2), 'little'))
     uk1 = data_in.read(4)
     size = int.from_bytes(data_in.read(4), 'little')
     return cls(sampling_rate, flags, uk1, size)
def content_length(f: io.BufferedIOBase, preserve_pos=True):
    if preserve_pos:
        pos = f.tell()

    f.seek(0, io.SEEK_END)
    res = f.tell() // _VALUE_SIZE

    if preserve_pos:
        f.seek(pos)

    return res
Exemple #9
0
    def unpack(self, buffer: io.BufferedIOBase):
        data = self.STRUCT.unpack(buffer)
        layout_offset = buffer.tell()

        buffer.seek(data.pop("__member_offset"))
        struct_offset = 0
        self.members = []
        for _ in range(data.pop("__member_count")):
            member = LayoutMember(buffer, dict(struct_offset=struct_offset))
            self.members.append(member)
            struct_offset += member.layout_type.size()
        buffer.seek(layout_offset)
Exemple #10
0
 def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
           **kwargs):
     sampling_rate = int.from_bytes(data_in.read(4), 'little')
     data_in.seek(
         2,
         SEEK_CUR)  # "Compressed" sampling rate, see SPSX's documentation
     volume_level = int.from_bytes(data_in.read(2), 'little')
     flags = SoundEffectsAmbientFlags(
         int.from_bytes(data_in.read(4), 'little'))
     uk1 = data_in.read(2)
     uk2 = data_in.read(2)
     size = int.from_bytes(data_in.read(4), 'little')
     return cls(sampling_rate, volume_level, flags, uk1, uk2, size)
Exemple #11
0
def extract(meta: io.BufferedIOBase, binFile: io.BufferedIOBase,
            rmdp: io.BufferedIOBase, filter, target):
    pc_bin = 0
    fmt = '>'
    array = binFile.read(1)
    if array[0] == 0:
        pc_bin = 1
        fmt = '<'
    file_version, num_dirs, num_files = struct.unpack(fmt + 'iii',
                                                      binFile.read(4 * 3))

    binFile.seek(0x9D)

    dirs = []
    full_names = []
    for i in range(num_dirs):
        _, parent, _, dirname, _, _, _ = _read(binFile, fmt + 'qqiqiqq')
        dirs.append((parent, dirname))

    r = []
    for i in range(num_files):
        _, dir_index, _, filename_offset, content_offset, content_length = _read(
            binFile, fmt + 'qqiqqq')
        _skip(binFile, 16)

        r.append((filename_offset, content_offset, content_length, dir_index))

    _skip(binFile, 44)

    data_start = binFile.tell()

    for parent, dirname_offset in dirs:
        if dirname_offset == -1:
            full_names.append(target)
        else:
            dirname = read_text(binFile, data_start + dirname_offset)
            parent = full_names[parent]
            full_names.append(os.path.join(parent, dirname))

    for i, (filename_offset, content_offset, content_length,
            dir_index) in enumerate(r):
        filename = read_text(binFile, data_start + filename_offset)
        print(i, '/', len(r), end='\r')
        dirname = full_names[dir_index]
        os.makedirs(dirname, exist_ok=True)
        full_name = os.path.join(dirname, filename)
        with open(full_name, 'wb') as outf:
            outf.write(copy_data(rmdp, content_offset, content_length))
    def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
              **kwargs):
        super().parse(data_in, conf)
        has_legacy_textures: bool = kwargs['has_legacy_textures']
        end: int = kwargs['end']
        rle = conf.game in (G.CROC_2_PS1, G.CROC_2_DEMO_PS1,
                            G.HARRY_POTTER_1_PS1, G.HARRY_POTTER_2_PS1)

        textures: List[TextureData] = []
        n_textures: int = int.from_bytes(data_in.read(4), 'little')
        n_rows: int = int.from_bytes(data_in.read(4), 'little')

        if n_textures > 4000 or 0 > n_rows > 4:
            if conf.ignore_warnings:
                warnings.warn(
                    f"Too much textures ({n_textures}, or incorrect row count {n_rows}."
                    f"It is most probably caused by an inaccuracy in my reverse engineering of the textures format."
                )
            else:
                raise TexturesWarning(data_in.tell(), n_textures, n_rows)

        # In Harry Potter, the last 16 textures are empty (full of 00 bytes)
        n_stored_textures = n_textures - 16 if conf.game in (
            G.HARRY_POTTER_1_PS1, G.HARRY_POTTER_2_PS1) else n_textures
        for texture_id in range(n_stored_textures):
            textures.append(TextureData.parse(data_in, conf))
        if conf.game in (G.HARRY_POTTER_1_PS1, G.HARRY_POTTER_2_PS1):
            data_in.seek(192, SEEK_CUR)  # 16 textures x 12 bytes
        n_idk_yet_1 = int.from_bytes(data_in.read(4), 'little')
        n_idk_yet_2 = int.from_bytes(data_in.read(4), 'little')
        data_in.seek(n_idk_yet_1 * cls.image_header_size, SEEK_CUR)

        if has_legacy_textures:  # Patch for legacy textures, see Textures documentation
            data_in.seek(15360, SEEK_CUR)
        if rle:
            raw_textures = BytesIO(cls.image_bytes_size * b'\x00')
            while data_in.tell() < end:
                run = int.from_bytes(data_in.read(cls.rle_size),
                                     'little',
                                     signed=True)
                if run < 0:
                    raw_textures.write(abs(run) * data_in.read(cls.rle_size))
                elif run > 0:
                    raw_textures.write(data_in.read(cls.rle_size * run))
                else:
                    raise ZeroRunLengthError(data_in.tell())
            raw_textures.seek(0)
            textures_data = raw_textures.read()
            raw_textures.close()
            if conf.game == G.CROC_2_DEMO_PS1:  # Patch for Croc 2 Demo (non-dummy) last end offset error
                data_in.seek(-2, SEEK_CUR)
        else:
            image_size = n_rows * (cls.image_bytes_size // 4)
            padding_size = cls.image_bytes_size - image_size
            textures_data = data_in.read(image_size) + padding_size * b'\x00'
        legacy_alpha = conf.game in (G.CROC_2_DEMO_PS1,
                                     G.CROC_2_DEMO_PS1_DUMMY)
        return cls(n_rows, textures_data, legacy_alpha, textures)
Exemple #13
0
def uncompress_num(f_in: BufferedIOBase) -> int:
    pos = f_in.tell()
    compressed_num = f_in.read(10)
    if len(compressed_num) == 0:
        return None
    num = 0
    for i in range(len(compressed_num)):
        low = compressed_num[i]
        if low < 128:
            num += low << (i * 7)
        else:
            num += (low - 128) << (i * 7)
            f_in.seek(pos + i + 1)
            return num
    else:
        raise RuntimeError("compressed_num is cut, should enlarge it")
Exemple #14
0
def merge_sort_stupid(fin: io.BufferedIOBase, fout: io.BufferedIOBase, memory_size: int, left=0, count=None):
    fout.seek(0)
    if count is None:
        count = content_length(fin, preserve_pos=False)

    if count <= memory_size:
        go_to_pos(fin, left)
        write_content(fout, sorted(read_content(fin, count=count)), batch_size=memory_size)
        return

    with tmp_file() as left_f, tmp_file() as right_f:
        merge_sort_stupid(fin, left_f, memory_size, left, count=count // 2)
        merge_sort_stupid(fin, right_f, memory_size, left + count // 2, count=count - count // 2)
        left_f.seek(0)
        right_f.seek(0)
        write_content(fout, heapq.merge(read_content(left_f, batch_size=memory_size // 2),
                                        read_content(right_f, batch_size=memory_size // 2)),
                      batch_size=memory_size)
Exemple #15
0
    def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
              **kwargs):
        size, start = super().parse(data_in, conf)
        spsx_section: SPSXSection = kwargs['spsx_section']

        if size != 0:
            if SPSXFlags.HAS_LEVEL_SFX in spsx_section.spsx_flags:
                spsx_section.level_sfx_groups.parse_vags(data_in, conf)
                spsx_section.level_sfx_mapping.parse_mapping(
                    spsx_section.level_sfx_groups)

            data_in.seek(2048 * math.ceil(data_in.tell() / 2048))
            spsx_section.dialogues_bgms.parse_vags(data_in, conf)

            if conf.game == G.HARRY_POTTER_2_PS1:
                data_in.seek(2048 * math.ceil(data_in.tell() / 2048))

            cls.check_size(size, start, data_in.tell())
        return cls(spsx_section)
Exemple #16
0
def copy_byte_range(
    infile: io.BufferedIOBase,
    outfile: io.BufferedIOBase,
    start: int = None,
    stop: int = None,
    bufsize: int = 16 * 1024,
):
    """Like shutil.copyfileobj, but only copy a range of the streams.

    Both start and stop are inclusive.
    """
    if start is not None:
        infile.seek(start)
    while True:
        to_read = min(bufsize, stop + 1 - infile.tell() if stop else bufsize)
        buf = infile.read(to_read)
        if not buf:
            break
        outfile.write(buf)
Exemple #17
0
    def _check_sorted(self, source: io.BufferedIOBase, res: io.BufferedIOBase):
        hashes_size = 2**20

        def h(value):
            return hash(value) % hashes_size

        source.seek(0)
        source_content = list(itertools.repeat(0, hashes_size))
        for v in read_content(source):
            source_content[h(v)] += 1

        res.seek(0)
        res_content = list(itertools.repeat(0, hashes_size))
        prev = None
        for cur in read_content(res):
            res_content[h(cur)] += 1
            self.assertTrue(prev is None or prev <= cur)
            prev = cur

        self.assertTrue(source_content == res_content, 'Content differs')
Exemple #18
0
    def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
              **kwargs):
        fallback_data = cls.fallback_parse_data(data_in)
        size, start = super().parse(data_in, conf)
        n_zones = int.from_bytes(data_in.read(4), 'little')
        n_idk1 = int.from_bytes(data_in.read(4), 'little')
        idk1 = [data_in.read(32) for _ in range(n_idk1)]
        n_chunks_per_zone = []
        for _ in range(n_zones):
            data_in.seek(2, SEEK_CUR)
            n_chunks_per_zone.append(data_in.read(1)[0])
            data_in.seek(9, SEEK_CUR)
        chunks_zones = []
        for n_chunks in n_chunks_per_zone:
            chunks_zones.append([
                int.from_bytes(data_in.read(2), 'little')
                for _ in range(n_chunks)
            ])

        cls.check_size(size, start, data_in.tell())
        return cls(idk1, chunks_zones, fallback_data)
    def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args, **kwargs):
        fallback_data = super().fallback_parse_data(data_in)
        size, start = super().parse(data_in, conf)
        idk1 = data_in.read(4)
        n_idk_unique_textures = int.from_bytes(data_in.read(4), 'little')

        if conf.game != G.CROC_2_DEMO_PS1_DUMMY:
            data_in.seek(2048, SEEK_CUR)
        else:
            data_in.seek(2052, SEEK_CUR)

        n_models_3d = int.from_bytes(data_in.read(4), 'little')
        models_3d = [Model3DData.parse(data_in, conf) for _ in range(n_models_3d)]

        n_animations = int.from_bytes(data_in.read(4), 'little')
        animations = [AnimationData.parse(data_in, conf) for _ in range(n_animations)]

        if conf.game in (G.CROC_2_PS1, G.CROC_2_DEMO_PS1):
            n_dpsx_legacy_textures = int.from_bytes(data_in.read(4), 'little')
            data_in.seek(n_dpsx_legacy_textures * 3072, SEEK_CUR)

        n_scripts = int.from_bytes(data_in.read(4), 'little')
        scripts = [ScriptData.parse(data_in, conf) for _ in range(n_scripts)]

        level_file = LevelFile.parse(data_in, conf)

        # FIXME End of Croc 2 & Croc 2 Demo Dummies' level files aren't reversed yet
        if conf.game not in (G.CROC_2_PS1, G.CROC_2_DEMO_PS1_DUMMY):
            cls.check_size(size, start, data_in.tell())
        return cls(models_3d, animations, scripts, level_file, fallback_data)
    def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
              **kwargs):
        super().parse(data_in, conf)
        header = AnimationHeader.parse(data_in, conf)

        if header.n_total_frames == header.n_stored_frames:
            frame_indexes = list(range(header.n_total_frames))
        else:
            frame_indexes = []

        inter_frames_size = 4 * math.ceil((header.n_inter_frames * 2) / 4)
        frames = []

        for frame_id in range(header.n_stored_frames):
            last: List[np.ndarray] = []
            for group_id in range(header.n_vertices_groups):
                sub_frame = [
                    int.from_bytes(data_in.read(2), 'little', signed=True)
                    for _ in range(header.sub_frame_size // 2)
                ]
                if header.old_animation_format:
                    matrix = np.divide(
                        (sub_frame[:3], sub_frame[3:6], sub_frame[6:9]),
                        4096).T  # Need to be reversed
                    translation = (sub_frame[9:10], sub_frame[10:11],
                                   sub_frame[11:12])
                else:
                    matrix = Quaternion(sub_frame[0], sub_frame[1],
                                        sub_frame[2],
                                        sub_frame[3]).rotation_matrix
                    translation = (sub_frame[4:5], sub_frame[5:6],
                                   sub_frame[6:7])
                    if header.n_total_frames != header.n_stored_frames:
                        frame_indexes.append(sub_frame[7])
                last.append(np.append(matrix, translation, axis=1))
            if header.n_inter_frames != 0 and frame_id != header.n_stored_frames - 1:
                data_in.seek(inter_frames_size, SEEK_CUR)
            frames.append(last)
        return cls(header, frames)
Exemple #21
0
 def parse_u8(data: BufferedIOBase):
     nodes = []
     data.seek(0)
     if data.read(4) != MAGIC_HEADER:
         raise InvalidU8File('Invalid magic header')
     first_node_offset = struct.unpack(">I", data.read(4))[0]
     if first_node_offset != U8File.FIRST_NODE_OFFSET:
         raise InvalidU8File('Invalid first node offset')
     _all_node_size = struct.unpack(">I", data.read(4))[0]
     _start_data_offset = struct.unpack(">I", data.read(4))[0]
     # read the first node, to figure out where the filenames start
     # should be a directory
     data.seek(first_node_offset)
     if data.read(1) != b'\x01':
         raise InvalidU8File
     # the root node always starts at string offset 0
     if read_u24(data) != 0:
         raise InvalidU8File
     # it has no parent directory
     if read_u32(data) != 0:
         raise InvalidU8File
     # total count of nodes with 12 bytes each, after that the string
     # section starts
     total_node_count = read_u32(data)
     node = DirNode(0, 0, total_node_count)
     node.set_name('')
     nodes.append(node)
     string_pool_base_offset = first_node_offset + total_node_count * 12
     for i in range(1, total_node_count):
         data.seek(first_node_offset + i * 12)
         nodetype = data.read(1)
         string_offset = read_u24(data)
         if nodetype == b'\x00':
             data_offset = read_u32(data)
             data_length = read_u32(data)
             node = FileNode(string_offset, data_offset, data_length)
             node.set_name(
                 read_null_term_string(
                     data, string_pool_base_offset + string_offset))
             nodes.append(node)
         elif nodetype == b'\x01':
             parent_index = read_u32(data)
             next_parent_index = read_u32(data)
             node = DirNode(string_offset, parent_index, next_parent_index)
             node.set_name(
                 read_null_term_string(
                     data, string_pool_base_offset + string_offset))
             nodes.append(node)
         else:
             raise InvalidU8File(f'Unknown nodetype {nodetype}')
     return U8File(data, nodes)
Exemple #22
0
def _filesize(f: io.BufferedIOBase):
    pos = f.tell()
    f.seek(0, io.SEEK_END)
    result = f.tell()
    f.seek(pos)
    return result
Exemple #23
0
 def _is_dcx(buffer: io.BufferedIOBase):
     offset = buffer.tell()
     is_dcx = buffer.read(4) == b"DCX\0"
     buffer.seek(offset)
     return is_dcx
Exemple #24
0
def read_chars_from_buffer(
    buffer: io.BufferedIOBase,
    offset: int = None,
    length: int = None,
    reset_old_offset=True,
    encoding: str = None,
    strip=True,
) -> tp.Union[str, bytes]:
    """Read characters from a buffer (type IOBase). Use 'read_chars_from_bytes' if your data is already in bytes format.

    Args:
        buffer (io.BufferedIOBase): byte-format data stream to read from.
        offset: offset to `seek()` in buffer before starting to read characters. Defaults to current offset (None).
        reset_old_offset: if True, and 'offset' is not None, the buffer offset will be restored to its original position
            (at function call time) before returning. (Default: True)
        length: number of characters to read (i.e. the length of the returned string). If None (default), characters
            will be read until a null termination is encountered. Otherwise, if a length is specified, any spaces at
            the end of the string will be stripped, then any nulls at the end will be stripped.
        encoding: attempt to decode characters in this encoding before returning. If 'utf-16-*' is specified, this
            function will infer that characters are two bytes long (and null terminations will be two bytes). Otherwise,
            it assumes they are one byte long. You can decode the characters yourself if you want to use another
            multiple-bytes-per-character encoding.
        strip: remove trailing spaces and nulls (default: True).
    """
    if length == 0:
        if not reset_old_offset and not isinstance(buffer, bytes):
            buffer.seek(offset)
        return "" if encoding is not None else b""

    if isinstance(buffer, bytes):
        buffer = io.BytesIO(buffer)
    chars = []
    old_offset = None
    bytes_per_char = 2 if encoding is not None and encoding.replace(
        "-", "").startswith("utf16le") else 1

    if offset is not None:
        old_offset = buffer.tell()
        buffer.seek(offset)

    while 1:
        c = buffer.read(bytes_per_char)
        if not c and length is None:
            raise ValueError(
                "Ran out of bytes to read before null termination was found.")
        if length is None and c == b"\x00" * bytes_per_char:
            # Null termination.
            array = b"".join(chars)
            if reset_old_offset and old_offset is not None:
                buffer.seek(old_offset)
            if encoding is not None:
                return array.decode(encoding)
            return array
        else:
            chars.append(c)
            if len(chars) == length:
                if reset_old_offset and old_offset is not None:
                    buffer.seek(old_offset)
                stripped_array = b"".join(
                    chars)  # used to strip spaces as well, but not anymore
                if strip:
                    stripped_array.rstrip()  # remove spaces
                    while stripped_array.endswith(b"\0\0" if bytes_per_char ==
                                                  2 else b"\0"):
                        stripped_array = stripped_array[:
                                                        -bytes_per_char]  # remove (pairs of) null characters
                if encoding is not None:
                    return stripped_array.decode(encoding)
                return stripped_array
Exemple #25
0
    def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
              **kwargs):
        super().parse(data_in, conf)
        n_chunk_models = int.from_bytes(data_in.read(4), 'little')
        _chunk_model_headers = [
            Model3DHeader.parse(data_in, conf) for _ in range(n_chunk_models)
        ]
        chunk_models = [
            LevelGeom3DData.parse(data_in,
                                  conf,
                                  header=_chunk_model_headers[i])
            for i in range(n_chunk_models)
        ]
        if conf.game != G.CROC_2_DEMO_PS1_DUMMY:
            data_in.seek(8, SEEK_CUR)
        n_sub_chunks = int.from_bytes(data_in.read(4), 'little')

        n_idk1 = int.from_bytes(data_in.read(4), 'little')
        data_in.seek(4 * n_idk1, SEEK_CUR)
        assert n_sub_chunks == int.from_bytes(data_in.read(4), 'little')
        n_actors_instances = int.from_bytes(data_in.read(2), 'little')
        data_in.seek(6 if conf.game != G.CROC_2_DEMO_PS1_DUMMY else 2,
                     SEEK_CUR)
        n_total_chunks = int.from_bytes(data_in.read(4), 'little')
        n_chunk_columns = int.from_bytes(data_in.read(4), 'little')
        n_chunk_rows = int.from_bytes(data_in.read(4), 'little')
        if conf.game != G.CROC_2_DEMO_PS1_DUMMY:
            n_lighting_headers = int.from_bytes(data_in.read(2), 'little')
            n_add_sub_chunks_lighting = int.from_bytes(data_in.read(2),
                                                       'little')
            idk3 = int.from_bytes(data_in.read(4), 'little')
        else:
            n_lighting_headers, n_add_sub_chunks_lighting = None, None
        n_idk4 = int.from_bytes(data_in.read(4), 'little')
        data_in.seek(116 if conf.game != G.CROC_2_DEMO_PS1_DUMMY else 80,
                     SEEK_CUR)

        _chunks_matrix: List[Union[List[int], Optional[int]]] = \
            [int.from_bytes(data_in.read(4), 'little') for _ in range(n_total_chunks)]
        _sub_chunks_height = {}
        chunks_info_start_offset = data_in.tell()

        def parse_chunks_info(offset: int, chunks_ids_list: List[int]):
            data_in.seek(chunks_info_start_offset + offset)
            chunks_ids_list.append(int.from_bytes(data_in.read(4), 'little'))
            linked_chunk_offset = int.from_bytes(data_in.read(4), 'little')
            if linked_chunk_offset != 0xFFFFFFFF:
                return parse_chunks_info(linked_chunk_offset, chunks_ids_list)
            else:
                return chunks_ids_list

        for i in range(n_chunk_rows):
            for j in range(n_chunk_columns):
                index = i * n_chunk_columns + j
                chunk_info_offset = _chunks_matrix[index]
                if chunk_info_offset != 0xFFFFFFFF:
                    sub_chunk_ids = parse_chunks_info(chunk_info_offset, [])
                    _chunks_matrix[index] = sub_chunk_ids
                    for sub_chunk_id in sub_chunk_ids:
                        _sub_chunks_height[sub_chunk_id] = (i, j)
                else:
                    _chunks_matrix[index] = None

        data_in.seek(chunks_info_start_offset + 8 * n_sub_chunks)
        if conf.game != G.CROC_2_DEMO_PS1_DUMMY:
            header256bytes = data_in.read(256)
            n_zone_ids = int.from_bytes(data_in.read(4), 'little')
            zone_ids = [
                int.from_bytes(data_in.read(4), 'little')
                for _ in range(n_zone_ids)
            ]
            assert n_zone_ids == n_total_chunks

            if data_in.read(4) == b'fvw\x00':
                fvw_data = [data_in.read(2) for _ in range(n_total_chunks)]
            else:
                fvw_data = None
                data_in.seek(-4, SEEK_CUR)
        else:
            zone_ids = None
            fvw_data = None

        _sub_chunks_rotation = {}
        for i in range(n_sub_chunks):
            rotation = int.from_bytes(data_in.read(4), 'big')
            assert rotation in (0, 4, 8, 12)
            _sub_chunks_rotation[i] = ChunkRotation(rotation)
            assert data_in.read(4) == b'\x00\x00\x00\x00'
            x = int.from_bytes(data_in.read(4), 'little')
            y = int.from_bytes(data_in.read(4), 'little')
            z = int.from_bytes(data_in.read(4), 'little')
            assert data_in.read(4) == b'\x00\x00\x00\x00'
            assert x == 2048 + 4096 * _sub_chunks_height[i][
                1]  # Chunks are 4096-large, so +2048 for the chunk's center
            assert z == 2048 + 4096 * _sub_chunks_height[i][0]
            _sub_chunks_height[i] = y
        chunks_models_mapping = [
            int.from_bytes(data_in.read(4), 'little')
            for _ in range(n_sub_chunks)
        ]

        if conf.game != G.CROC_2_DEMO_PS1_DUMMY:
            lighting_headers = [
                data_in.read(84) for _ in range(n_lighting_headers)
            ]

        idk_4 = [data_in.read(36) for _ in range(n_idk4)]

        for i in range(n_actors_instances):
            data_in.seek(24, SEEK_CUR)
            actor_offset = int.from_bytes(data_in.read(4), 'little')
            data_in.seek(32, SEEK_CUR)
            actor_sound_level = int.from_bytes(data_in.read(4), 'little')

        if conf.game not in (G.CROC_2_DEMO_PS1, G.CROC_2_DEMO_PS1_DUMMY):
            add_models_mapping = []
            for i in range(n_add_sub_chunks_lighting):
                data_in.seek(16, SEEK_CUR)
                add_models_mapping.append(
                    int.from_bytes(data_in.read(4), 'little'))
                data_in.seek(4, SEEK_CUR)

            n_idk2 = int.from_bytes(data_in.read(4), 'little')
            data_in.seek(32 * n_idk2, SEEK_CUR)  # TODO Reverse this
        else:
            add_models_mapping = None
            data_in.seek(32 * n_sub_chunks,
                         SEEK_CUR)  # Two different 32-bytes long structures
            data_in.seek(32 * n_sub_chunks, SEEK_CUR)
            data_in.seek(32 if conf.game == G.CROC_2_DEMO_PS1 else 92,
                         SEEK_CUR)

        if conf.game == G.CROC_2_PS1:
            data_in.seek(30732, SEEK_CUR)
        elif conf.game != G.CROC_2_DEMO_PS1_DUMMY and n_sub_chunks != 0:
            sub_chunks_n_lighting = [
                int.from_bytes(data_in.read(4), 'little')
                for _ in range(n_sub_chunks)
            ]
            sub_chunks_n_add_lighting = [
                int.from_bytes(data_in.read(4), 'little')
                for _ in range(n_add_sub_chunks_lighting)
            ]
            for model_id in range(n_sub_chunks):
                for i in range(sub_chunks_n_lighting[model_id]):
                    size = 4 * chunk_models[
                        chunks_models_mapping[model_id]].n_vertices
                    data_in.seek(size, SEEK_CUR)
            for model_id in range(n_add_sub_chunks_lighting):
                for i in range(sub_chunks_n_add_lighting[model_id]):
                    size = 4 * chunk_models[
                        add_models_mapping[model_id]].n_vertices
                    data_in.seek(size, SEEK_CUR)
            if conf.game != G.CROC_2_DEMO_PS1:  # Not present in Croc 2 Demo Dummy
                idk_size = int.from_bytes(data_in.read(4), 'little')
                if idk_size != 0:
                    data_in.seek(4 + idk_size, SEEK_CUR)
                else:
                    data_in.seek(-4, SEEK_CUR)
                n_idk3 = int.from_bytes(data_in.read(4), 'little')
                if n_idk3 == 0:
                    data_in.seek(-4, SEEK_CUR)
                idk3 = [
                    int.from_bytes(data_in.read(40), 'little')
                    for _ in range(n_idk3)
                ]
            data_in.seek(12, SEEK_CUR)

        chunks_holders = []
        for i in range(n_total_chunks):
            if _chunks_matrix[i] is not None:
                sub_chunks = [
                    SubChunk(chunk_models[chunks_models_mapping[sub_chunk_id]],
                             _sub_chunks_height[sub_chunk_id],
                             _sub_chunks_rotation[sub_chunk_id])
                    for sub_chunk_id in _chunks_matrix[i]
                ]
                if conf.game != G.CROC_2_DEMO_PS1_DUMMY:
                    chunks_holders.append(
                        ChunkHolder(sub_chunks, zone_ids[i],
                                    fvw_data[i] if fvw_data else None))
                else:
                    chunks_holders.append(ChunkHolder(sub_chunks))
            else:
                chunks_holders.append(
                    ChunkHolder(zone_id=zone_ids[i],
                                fvw_data=fvw_data[i] if fvw_data else None)
                    if conf.game != G.CROC_2_DEMO_PS1_DUMMY else ChunkHolder())

        return cls(
            ChunksMatrix(chunks_holders, chunk_models, n_chunk_rows,
                         n_chunk_columns, zone_ids is not None))
 def parse_vags(self, data_in: BufferedIOBase, conf: Configuration):
     for sound in self:
         data_in.seek(2048 * math.ceil(data_in.tell() / 2048))
         sound.parse_vag(data_in, conf)
Exemple #27
0
def get_file_length(f_in: BufferedIOBase) -> int:
    pos = f_in.tell()
    f_in.seek(0, io.SEEK_END)
    length = f_in.tell()
    f_in.seek(pos, io.SEEK_SET)
    return length
Exemple #28
0
    def writeto(self, buffer: BufferedIOBase):
        self.first_node_offset = 0x20
        # do strings
        string_pool_base_offset = self.first_node_offset + len(self.nodes) * 12
        buffer.seek(string_pool_base_offset)
        for node in self.nodes:
            node.string_offset = buffer.tell() - string_pool_base_offset
            buffer.write(node.name.encode('ASCII'))
            buffer.write(b'\x00')
        self.all_node_size = buffer.tell() - self.first_node_offset
        # padding before data section to 16
        pad = (32 - (buffer.tell() % 32))
        if pad == 32:
            pad = 0
        buffer.write(b'\x00' * pad)
        self.data_offset = buffer.tell()

        buffer.seek(0)
        buffer.write(MAGIC_HEADER)
        buffer.write(struct.pack('>I', self.first_node_offset))
        buffer.write(struct.pack('>I', self.all_node_size))
        buffer.write(struct.pack('>I', self.data_offset))
        buffer.seek(self.first_node_offset)
        cur_data_offset = self.data_offset

        for i, node in enumerate(self.nodes):
            if node.node_type == b'\x00':
                # todo modified/unmodified
                node.new_data_offset = cur_data_offset
                buffer.seek(cur_data_offset)
                node.write_data_to(self, buffer)
                cur_data_offset += node.get_length()
                # pad to 32
                pad = (32 - (buffer.tell() % 32))
                if pad == 32:
                    pad = 0
                cur_data_offset += pad
            buffer.seek(self.first_node_offset + i * 12)
            node.write_header_to(buffer)

        buffer.seek(cur_data_offset)
        # final padding to 16
        pad = (16 - (cur_data_offset % 16))
        if pad == 16:
            pad = 0
        buffer.write(b'\x00' * pad)
    def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
              **kwargs):
        super().parse(data_in, conf)
        n_flags: int = int.from_bytes(data_in.read(4), 'little')
        data_in.seek(4, SEEK_CUR)
        n_total_frames: int = int.from_bytes(data_in.read(4), 'little')
        has_additional_frame_data_value = int.from_bytes(
            data_in.read(4), 'little')
        has_additional_data: bool = has_additional_frame_data_value == 0
        n_stored_frames = 0
        if conf.game in (G.CROC_2_PS1, G.CROC_2_DEMO_PS1,
                         G.CROC_2_DEMO_PS1_DUMMY):
            n_inter_frames = int.from_bytes(data_in.read(4), 'little')
            if n_inter_frames != 0:
                n_stored_frames = n_total_frames
            data_in.seek(4, SEEK_CUR)
        else:  # Harry Potter 1 & 2
            data_in.seek(8, SEEK_CUR)
            n_inter_frames = 0
        n_vertex_groups: int = int.from_bytes(data_in.read(4), 'little')
        data_in.seek(4, SEEK_CUR)

        if conf.game in (G.HARRY_POTTER_1_PS1, G.HARRY_POTTER_2_PS1):
            n_stored_frames = int.from_bytes(data_in.read(4), 'little')
            data_in.seek(12, SEEK_CUR)

        flags = [data_in.read(4) for _ in range(n_flags)]
        if has_additional_data:
            data_in.seek(8 * n_total_frames, SEEK_CUR)
        data_in.seek(4 * n_total_frames, SEEK_CUR)  # Total frames info
        data_in.seek(n_inter_frames * cls.inter_frames_header_size,
                     SEEK_CUR)  # Inter-frames header
        if conf.game in (G.HARRY_POTTER_1_PS1,
                         G.HARRY_POTTER_2_PS1) or n_inter_frames != 0:
            data_in.seek(4 * n_stored_frames, SEEK_CUR)  # Stored frames info

        if n_stored_frames == 0 or n_inter_frames != 0:  # Rotation matrices
            old_animation_format = True
            n_stored_frames = n_total_frames
        else:  # Unit quaternions
            old_animation_format = False

        if n_total_frames > 500 or n_total_frames == 0:
            if conf.ignore_warnings:
                warnings.warn(
                    f"Too much frames in animation (or no frame): {n_total_frames} frames."
                    f"It is most probably caused by an inaccuracy in my reverse engineering of the textures format."
                )
            else:
                raise AnimationsWarning(data_in.tell(), n_total_frames)
        return cls(n_total_frames, n_stored_frames, n_vertex_groups, n_flags,
                   has_additional_data, flags, old_animation_format,
                   n_inter_frames)
def go_to_pos(f: io.BufferedIOBase, i: int):
    f.seek(i * _VALUE_SIZE)
Exemple #31
0
	def __init__(self, stream: io.BufferedIOBase, new: Version = None):
		"""Parse a MIX from `stream`, which must be a buffered file object.
		
		If `new` is not None, initialize an empty MIX of this version instead.
		MixParseError is raised on parsing errors.
		"""
		
		# Initialize mandatory attributes
		self._dirty = False
		self._stream = None
		self._open = []
		
		# If stream is, for example, a raw I/O object, files could be destroyed
		# without ever raising an error, so check this.
		if not isinstance(stream, io.BufferedIOBase):
			raise TypeError("`stream` must be an instance of io.BufferedIOBase")
		
		if not stream.readable():
			raise ValueError("`stream` must be readable")
		
		if not stream.seekable():
			raise ValueError("`stream` must be seekable")
		
		if new is not None:
			# Start empty (new file)
			if type(new) is not Version:
				raise TypeError("`new` must be a Version enumeration member or None")
			if version is Version.RG:
				raise NotImplementedError("RG MIX files are not yet supported")
			self._stream = stream
			self._index = {}
			self._contents = []
			self._version = version
			self._flags = 0
			return
		
		# Parse an existing file
		filesize = stream.seek(0, io.SEEK_END)
		if filesize <= 6:
			raise MixParseError("File too small")
		stream.seek(0)
		
		first4 = stream.read(4)
		if first4 == b"MIX1":
			raise NotImplementedError("RG MIX files are not yet supported")
		elif first4[:2] == b"\x00\x00":
			# It seems we have a RA or TS MIX so check the flags
			flags = int.from_bytes(first4[2:], "little")
			if flags > 3:
				raise MixParseError("Unsupported properties")
			if flags & 2:
				raise NotImplementedError("Encrypted MIX files are not yet supported")
			
			# FIXME HERE: 80 bytes of westwood key_source if encrypted,
			#             to create a 56 byte long blowfish key from it.
			#
			#             They are followed by a number of 8 byte blocks,
			#             the first of them decrypting to filecount and bodysize.
			
			# Encrypted TS MIXes have a key.ini we can check for later,
			# so at this point assume Version.TS only if unencrypted.
			# Stock RA MIXes seem to be always encrypted.
			version = Version.TS
			
			# RA/TS MIXes hold their filecount after the flags,
			# whilst for TD MIXes their first two bytes are the filecount.
			filecount = int.from_bytes(stream.read(2), "little")
		else:
			version = Version.TD
			flags = 0
			filecount = int.from_bytes(first4[:2], "little")
			stream.seek(2)
			
		# From here it's the same for every unencrypted MIX
		bodysize    = int.from_bytes(stream.read(4), "little")
		indexoffset = stream.tell()
		indexsize   = filecount * 12
		bodyoffset  = indexoffset + indexsize

		# Check if data is sane
		# FIXME: Checksummed MIXes have 20 additional bytes after the body.
		if filesize - bodyoffset != bodysize:
			raise MixParseError("Incorrect filesize or invalid header")

		# OK, time to read the index
		index = {}
		for key, offset, size in struct.iter_unpack("<LLL", stream.read(indexsize)):
			offset += bodyoffset
			
			if offset + size > filesize:
				raise MixParseError("Content extends beyond end of file")

			index[key] = _MixNode(key, offset, size, size, None)

		if len(index) != filecount:
			raise MixParseError("Duplicate key")

		# Now read the names
		# TD/RA: 1422054725; TS: 913179935
		for dbkey in (1422054725, 913179935):
			if dbkey in index:
				stream.seek(index[dbkey].offset)
				header = stream.read(32)

				if header != b"XCC by Olaf van der Spek\x1a\x04\x17'\x10\x19\x80\x00":
					continue

				dbsize  = int.from_bytes(stream.read(4), "little")  # Total filesize

				if dbsize != index[dbkey].size or dbsize > 16777216:
					raise MixParseError("Invalid name table")

				# Skip four bytes for XCC type; 0 for LMD, 2 for XIF
				# Skip four bytes for DB version; Always zero
				stream.seek(8, io.SEEK_CUR)
				gameid = int.from_bytes(stream.read(4), "little")  # XCC Game ID
				
				# XCC saves alias numbers, so converting them
				# to `Version` is not straight forward.
				# FIXME: Check if Dune games and Nox also use MIX files
				if gameid == 0:
					if version is not Version.TD:
						continue
				elif gameid == 1:
					version = Version.RA
				elif 2 <= gameid <= 6 or gameid == 15:
					version = Version.TS
				else:
					continue
				
				namecount = int.from_bytes(stream.read(4), "little")
				bodysize  = dbsize - 53  # Size - Header - Last byte
				namelist  = stream.read(bodysize).split(b"\x00") if bodysize else []
				
				if len(namelist) != namecount:
					raise MixParseError("Invalid name table")
				
				# Remove Database from index
				del index[dbkey]
				
				# Add names to index
				names = False
				for name in namelist:
					name = name.decode(ENCODING, "ignore")
					key = genkey(name, version)
					if key in index:
						index[key].name = name
						names = True
				
				# XCC sometimes puts two Databases in a file by mistake,
				# so if no names were found, give it another try
				if names: break

		# Create a sorted list of all contents
		contents = sorted(index.values(), key=lambda node: node.offset)

		# Calculate alloc values
		# This is the size up to wich a file may grow without needing a move
		for i in range(len(contents) - 1):
			contents[i].alloc = contents[i+1].offset - contents[i].offset

			if contents[i].alloc < contents[i].size:
				raise MixParseError("Overlapping file boundaries")

		# Populate the object
		self._stream = stream
		self._version = version
		self._index = index
		self._contents = contents
		self._flags = flags
    def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args,
              **kwargs):
        super().parse(data_in, conf)
        header: Model3DHeader = kwargs['header']
        is_world_model_3d: bool = kwargs['is_world_model_3d']

        def parse_vertices_normals(mode: int):
            res = []
            group = []
            for i in range(header.n_vertices):
                # Vertices
                xyzi = (int.from_bytes(data_in.read(2), 'little', signed=True),
                        int.from_bytes(data_in.read(2), 'little', signed=True),
                        int.from_bytes(data_in.read(2), 'little', signed=True),
                        int.from_bytes(data_in.read(2), 'little'))
                group.append(xyzi[:3])
                if xyzi[3] < 1:
                    error_cause = \
                        NegativeIndexError.CAUSE_VERTEX if mode == 0 else NegativeIndexError.CAUSE_VERTEX_NORMAL
                    raise NegativeIndexError(data_in.tell(), error_cause,
                                             xyzi[3], xyzi)
                elif xyzi[3] == 1:
                    res.append(np.array(group, dtype=np.int16))
                    group = []
            if group:
                res.append(np.array(group, dtype=np.int16))
            return res, len(res)

        vertices, n_vertices_groups = parse_vertices_normals(0)
        normals: List[np.ndarray] = []

        if not (is_world_model_3d
                and conf.game in (G.HARRY_POTTER_1_PS1, G.HARRY_POTTER_2_PS1)):
            normals, n_normals_groups = parse_vertices_normals(1)
            if n_vertices_groups != n_normals_groups:
                raise VerticesNormalsGroupsMismatch(n_vertices_groups,
                                                    n_normals_groups,
                                                    data_in.tell())

        # Faces
        quads = []
        tris = []
        faces_normals = []
        faces_texture_ids: List[int] = []
        if conf.game == G.CROC_2_DEMO_PS1_DUMMY or not is_world_model_3d:  # Large face headers (Actors' models)
            for face_id in range(header.n_faces):
                raw_face_data = (int.from_bytes(data_in.read(2),
                                                'little',
                                                signed=True),
                                 int.from_bytes(data_in.read(2),
                                                'little',
                                                signed=True),
                                 int.from_bytes(data_in.read(2),
                                                'little',
                                                signed=True),
                                 int.from_bytes(data_in.read(2), 'little'),
                                 int.from_bytes(data_in.read(2), 'little'),
                                 int.from_bytes(data_in.read(2), 'little'),
                                 int.from_bytes(data_in.read(2), 'little'),
                                 int.from_bytes(data_in.read(2), 'little'),
                                 int.from_bytes(data_in.read(2), 'little'),
                                 int.from_bytes(data_in.read(2), 'little'))
                if raw_face_data[
                        9] & 0x0800:  # 1st vertex, then 2nd, 4th and 3rd, except in Croc 2 Demo Dummy WADs
                    if conf.game != G.CROC_2_DEMO_PS1_DUMMY:
                        # FIXME
                        #  quads.append((raw_face_data[4], raw_face_data[5], raw_face_data[7], raw_face_data[6]))
                        quads.append((raw_face_data[4], raw_face_data[5],
                                      raw_face_data[6], raw_face_data[7]))
                    else:
                        quads.append((raw_face_data[4], raw_face_data[5],
                                      raw_face_data[6], raw_face_data[7]))
                else:  # 1st vertex, then 2nd and 3rd
                    tris.append(
                        (raw_face_data[4], raw_face_data[5], raw_face_data[6]))
                faces_normals.append(raw_face_data[:3])
                faces_texture_ids.append(raw_face_data[8])
                if raw_face_data[3] < 1:
                    raise NegativeIndexError(data_in.tell(),
                                             NegativeIndexError.CAUSE_FACE,
                                             raw_face_data[3], raw_face_data)
        else:  # Small face headers (Subchunks' models)
            for face_id in range(header.n_faces):
                raw_face_data = (int.from_bytes(data_in.read(2), 'little'),
                                 int.from_bytes(data_in.read(2), 'little'),
                                 int.from_bytes(data_in.read(2), 'little'),
                                 int.from_bytes(data_in.read(2), 'little'),
                                 int.from_bytes(data_in.read(2), 'little'),
                                 int.from_bytes(data_in.read(2), 'little'))
                if raw_face_data[5] & 0x0800:
                    quads.append((raw_face_data[0], raw_face_data[1],
                                  raw_face_data[2], raw_face_data[3]))
                else:
                    tris.append(
                        (raw_face_data[0], raw_face_data[1], raw_face_data[2]))
                faces_texture_ids.append(raw_face_data[4])
        quads = np.array(quads, dtype=np.uint16)
        tris = np.array(tris, dtype=np.uint16)
        faces_normals = np.array(faces_normals, dtype=np.int16)

        if conf.game in (G.CROC_2_PS1, G.CROC_2_DEMO_PS1,
                         G.CROC_2_DEMO_PS1_DUMMY):
            bounding_box_info_size = 44
        else:  # Harry Potter 1 & 2
            bounding_box_info_size = 32
        data_in.seek(header.n_bounding_box_info * bounding_box_info_size,
                     SEEK_CUR)
        return cls(header, is_world_model_3d, vertices, normals, quads, tris,
                   faces_normals, faces_texture_ids, n_vertices_groups)
 def serialize_vags(self, data_out: BufferedIOBase, conf: Configuration):
     for group in self:
         data_out.seek(2048 * math.ceil(data_out.tell() / 2048))
         group.serialize_vags(data_out, conf)