def pack(self) -> tp.Tuple[bytes, bytes]: header_writer = BinaryWriter( big_endian=self.big_endian or self.flags.is_big_endian) data_writer = BinaryWriter( big_endian=self.big_endian or self.flags.is_big_endian) self.pack_header(header_writer) entries = list(sorted(self._entries, key=lambda e: e.id)) entry_headers = [entry.get_header(self.flags) for entry in entries] for entry_header in entry_headers: entry_header.pack_bnd3(header_writer, self.flags, self.bit_big_endian) if self.flags.has_names: for entry, entry_header in zip(entries, entry_headers): header_writer.fill("path_offset", header_writer.position, obj=entry_header) # NOTE: BND paths are *not* encoded in `shift_jis_2004`, unlike most other strings, but are `shift-jis`. # The relevant difference is that escaped backslashes are encoded as the yen symbol in `shift_jis_2004`. header_writer.append( entry.get_packed_path(encoding="shift-jis")) # Useless BDT3 header. data_writer.append(b"BDF3") data_writer.pack("8s", self.signature.encode("ascii")) data_writer.pad(4) for entry, entry_header in zip(entries, entry_headers): header_writer.fill("data_offset", data_writer.position, obj=entry_header) data_writer.append(entry.data) return header_writer.finish(), data_writer.finish()
def pack_bone_indices(self, writer: BinaryWriter, bone_indices_start: int): if not self.bone_indices: # Weird case for byte-perfect writing. writer.fill("__bone_offset", bone_indices_start, obj=self) else: writer.fill("__bone_offset", writer.position, obj=self) writer.pack(f"{len(self.bone_indices)}i", *self.bone_indices)
def pack_vertex_buffer_indices(self, writer: BinaryWriter, first_vertex_buffer_index: int): writer.fill("__vertex_buffer_offset", writer.position, obj=self) mesh_vertex_buffer_indices = range( first_vertex_buffer_index, first_vertex_buffer_index + len(self.vertex_buffers)) writer.pack(f"{len(self.vertex_buffers)}i", *mesh_vertex_buffer_indices)
def pack_vertex_indices(self, writer: BinaryWriter, vertex_index_size: int, vertex_indices_offset: int): self.fill(writer, __vertex_indices_offset=vertex_indices_offset) if vertex_index_size == 16: fmt = f"{len(self.vertex_indices)}H" elif vertex_index_size == 32: fmt = f"{len(self.vertex_indices)}i" else: raise NotImplementedError( f"Unsupported vertex index size for `pack()`: {vertex_index_size}" ) writer.pack(fmt, *self.vertex_indices)
def pack(self) -> bytes: """Pack TPF file to bytes.""" writer = BinaryWriter( big_endian=self.platform in {TPFPlatform.Xbox360, TPFPlatform.PS3}) writer.append(b"TPF\0") writer.reserve("data_size", "i") writer.pack("i", len(self.textures)) writer.pack("b", self.platform) writer.pack("b", self.tpf_flags) writer.pack("b", self.encoding) writer.pad(1) for i, texture in enumerate(self.textures): texture.pack_header(writer, i, self.platform, self.tpf_flags) for i, texture in enumerate(self.textures): texture.pack_name(writer, i, self.encoding) data_start = writer.position for i, texture in enumerate(self.textures): # TKGP notes: padding varies wildly across games, so don't worry about it too much. if len(texture.data) > 0: writer.pad_align(4) texture.pack_data(writer, i) writer.fill("data_size", writer.position - data_start) return writer.finish()
def pack_header(self, writer: BinaryWriter): writer.append(b"BHF3") writer.pack("8s", self.signature.encode("ascii")) self.flags.pack(writer, self.bit_big_endian) writer.pack("?", self.big_endian) writer.pack("?", self.bit_big_endian) writer.pad(1) writer.pack("i", len(self._entries)) writer.pad(12)
def pack_bnd4(self, writer: BinaryWriter, binder_flags: BinderFlags, bit_big_endian: bool): self.flags.pack(writer, bit_big_endian) writer.pad(3) writer.pack("i", -1) writer.pack("q", self.compressed_size) if binder_flags.has_compression: writer.pack("q", self.uncompressed_size) writer.reserve("data_offset", "q" if binder_flags.has_long_offsets else "I", obj=self) if binder_flags.has_ids: writer.pack("i", self.id) if binder_flags.has_names: writer.reserve("path_offset", "i", obj=self)
def pack_face_set_indices(self, writer: BinaryWriter, first_face_set_index: int): writer.fill("__face_set_offset", writer.position, obj=self) mesh_face_set_indices = range( first_face_set_index, first_face_set_index + len(self.face_sets)) writer.pack(f"{len(self.face_sets)}i", *mesh_face_set_indices)
def pack(self) -> tp.Tuple[bytes, bytes]: header_writer = BinaryWriter( big_endian=self.big_endian or self.flags.is_big_endian) data_writer = BinaryWriter( big_endian=self.big_endian or self.flags.is_big_endian) self.pack_header(header_writer) path_encoding = ("utf-16-be" if self.big_endian else "utf-16-le") if self.unicode else "shift-jis" entries = list(sorted(self._entries, key=lambda e: e.id)) rebuild_hash_table = not self._most_recent_hash_table if not self._most_recent_hash_table or len( entries) != self._most_recent_entry_count: rebuild_hash_table = True else: # Check if any entry paths have changed. for i, entry in enumerate(entries): if entry.path != self._most_recent_paths[i]: rebuild_hash_table = True break self._most_recent_entry_count = len(entries) self._most_recent_paths = [entry.path for entry in entries] entry_headers = [entry.get_header(self.flags) for entry in entries] for entry_header in entry_headers: entry_header.pack_bnd4(header_writer, self.flags, self.bit_big_endian) if self.flags.has_names: for entry, entry_header in zip(entries, entry_headers): header_writer.fill("path_offset", header_writer.position, obj=entry_header) # NOTE: BND paths are *not* encoded in `shift_jis_2004`, unlike most other strings, but are `shift-jis`. # The relevant difference is that escaped backslashes are encoded as the yen symbol in `shift_jis_2004`. header_writer.append( entry.get_packed_path(encoding=path_encoding)) if self.hash_table_type == 4: header_writer.fill("hash_table_offset", header_writer.position) if rebuild_hash_table: header_writer.append( BinderHashTable.build_hash_table(self._entries)) else: header_writer.append(self._most_recent_hash_table) header_writer.fill("data_offset", header_writer.position) # Useless BDT4 header. data_writer.append(b"BDF4") data_writer.pack("?", self.unknown1) data_writer.pack("?", self.unknown2) data_writer.pad(3) data_writer.pack("?", self.big_endian) data_writer.pack("?", not self.bit_big_endian) data_writer.pad(5) data_writer.pack("q", 0x30) # header size data_writer.pack("8s", self.signature.encode("ascii")) data_writer.pad(16) for entry, entry_header in zip(entries, entry_headers): header_writer.fill("data_offset", data_writer.position, obj=entry_header) data_writer.append( entry.data + b"\0" * 10 ) # ten pad bytes between each entry (for byte-perfect writes) return header_writer.finish(), data_writer.finish()
def pack_header(self, writer: BinaryWriter): writer.append(b"BHF4") writer.pack("?", self.unknown1) writer.pack("?", self.unknown2) writer.pad(3) writer.pack("?", self.big_endian) writer.pack("?", not self.bit_big_endian) # note reversal writer.pad(1) writer.pack("i", len(self._entries)) writer.pack("q", 0x40) # header size writer.pack("8s", self.signature.encode("ascii")) writer.pack("q", self.flags.get_bnd_entry_header_size()) writer.reserve("data_offset", "q") writer.pack("?", self.unicode) self.flags.pack(writer, self.bit_big_endian) writer.pack("B", self.hash_table_type) writer.pad(5) if self.hash_table_type == 4: writer.reserve("hash_table_offset", "q") else: writer.pad(8)
def pack(self, writer: BinaryWriter): writer.pack("i", self.unk0) writer.pack("i", len(self.values) * 4) writer.pack(f"{len(self.values)}f", *self.values)
def pack_header(self, writer: BinaryWriter, index: int, platform: TPFPlatform, tpf_flags: int): if platform == TPFPlatform.PC: dds = self.get_dds() if dds.header.caps_2 & DDSCAPS2.CUBEMAP: tex_type = TextureType.Cubemap elif dds.header.caps_2 & DDSCAPS2.VOLUME: tex_type = TextureType.Volume else: tex_type = TextureType.Texture mipmap_count = dds.header.mipmap_count else: tex_type = self.texture_type mipmap_count = self.mipmaps writer.reserve(f"file_data_{index}", "I") writer.reserve(f"file_size_{index}", "i") writer.pack("b", self.format) writer.pack("b", tex_type) writer.pack("b", mipmap_count) writer.pack("b", self.texture_flags) if platform != TPFPlatform.PC: writer.pack("h", self.header.width) writer.pack("h", self.header.height) if platform == TPFPlatform.Xbox360: writer.pad(4) elif platform == TPFPlatform.PS3: writer.pack("i", self.header.unk1) if tpf_flags != 0: writer.pack("i", self.header.unk2) elif platform in {TPFPlatform.PS4, TPFPlatform.XboxOne}: writer.pack("i", self.header.texture_count) writer.pack("i", self.header.unk2) writer.reserve(f"file_name_{index}", "I") writer.pack("i", 0 if self.float_struct is None else 1) if platform in {TPFPlatform.PS4, TPFPlatform.XboxOne}: writer.pack("i", self.header.dxgi_format) if self.float_struct: self.float_struct.pack(writer)
def pack(self, writer: BinaryWriter, bit_big_endian: bool): if not bit_big_endian and not (self.is_big_endian and not self.has_flag_7): writer.pack("B", int(f"{self:08b}"[::-1], 2)) else: writer.pack("B", self)
def pack(self, writer: BinaryWriter, layout: BufferLayout, uv_factor: float): for member in layout: not_implemented = False if member.semantic == LayoutSemantic.Position: if member.layout_type == LayoutType.Float3: writer.pack("3f", *self.position) elif member.layout_type == LayoutType.Float4: writer.pack("4f", *self.position, 0.0) elif member.layout_type == LayoutType.EdgeCompressed: raise NotImplementedError( "Soulstruct cannot load FLVERs with edge-compressed vertex positions." ) else: not_implemented = True elif member.semantic == LayoutSemantic.BoneWeights: if member.layout_type == LayoutType.Byte4A: writer.pack("4b", *[int(w * 127) for w in self.bone_weights]) elif member.layout_type == LayoutType.Byte4C: writer.pack("4B", *[int(w * 255) for w in self.bone_weights]) elif member.layout_type in { LayoutType.UVPair, LayoutType.Short4ToFloat4A }: writer.pack("4h", *[int(w * 32767) for w in self.bone_weights]) else: not_implemented = True elif member.semantic == LayoutSemantic.BoneIndices: if member.layout_type in { LayoutType.Byte4B, LayoutType.Byte4E }: writer.pack("4B", *self.bone_indices) elif member.layout_type == LayoutType.ShortBoneIndices: writer.pack("4h", *self.bone_indices) else: not_implemented = True elif member.semantic == LayoutSemantic.Normal: if member.layout_type == LayoutType.Float3: writer.pack("3f", *self.normal) elif member.layout_type == LayoutType.Float4: writer.pack("4f", *self.normal, self.normal_w) elif member.layout_type in { LayoutType.Byte4A, LayoutType.Byte4B, LayoutType.Byte4C, LayoutType.Byte4E }: writer.pack("4B", *[int(x * 127 + 127) for x in self.normal], self.normal_w) elif member.layout_type == LayoutType.Short2toFloat2: writer.pack("B3b", self.normal_w, *[int(x * 127) for x in self.normal]) elif member.layout_type == LayoutType.Short4ToFloat4A: writer.pack("4h", *[int(x * 32767) for x in self.normal], self.normal_w) elif member.layout_type == LayoutType.Short4ToFloat4B: writer.pack("4H", *[int(x * 32767 + 32767) for x in self.normal], self.normal_w) else: not_implemented = True elif member.semantic == LayoutSemantic.UV: try: uv = self.uv_queue.pop() * uv_factor except IndexError: print(layout) print(member) print( f"{len(self.uvs)} UVS, {len(self.tangents)} tangents, {len(self.colors)} colors" ) raise IndexError("Ran out of vertex UVs to buffer.") if member.layout_type == LayoutType.Float2: writer.pack("2f", uv.x, uv.y) elif member.layout_type == LayoutType.Float3: writer.pack("3f", uv.x, uv.y, uv.z) elif member.layout_type == LayoutType.Float4: writer.pack("2f", uv.x, uv.y) try: uv = self.uv_queue.pop() * uv_factor except IndexError: print(layout) print(member) print( f"{len(self.uvs)} UVS, {len(self.tangents)} tangents, {len(self.colors)} colors" ) raise IndexError("Ran out of vertex UVs to buffer.") writer.pack("2f", uv.x, uv.y) elif member.layout_type in { LayoutType.Byte4A, LayoutType.Byte4B, LayoutType.Short2toFloat2, LayoutType.Byte4C, LayoutType.UV }: writer.pack("2h", int(uv.x), int(uv.y)) elif member.layout_type == LayoutType.UVPair: writer.pack("2h", int(uv.x), int(uv.y)) try: uv = self.uv_queue.pop() * uv_factor except IndexError: print(layout) print(member) print( f"{len(self.uvs)} UVS, {len(self.tangents)} tangents, {len(self.colors)} colors" ) raise IndexError("Ran out of vertex UVs to buffer.") writer.pack("2h", int(uv.x), int(uv.y)) elif member.layout_type == LayoutType.Short4ToFloat4B: writer.pack("4h", int(uv.x), int(uv.y), int(uv.z), 0) else: not_implemented = True elif member.semantic == LayoutSemantic.Tangent: try: tangent = self.tangent_queue.pop() except IndexError: print(layout) print(member) print( f"{len(self.uvs)} UVS, {len(self.tangents)} tangents, {len(self.colors)} colors" ) raise IndexError("Ran out of vertex tangents to buffer.") if member.layout_type == LayoutType.Float4: writer.pack("4f", *tangent) elif member.layout_type in { LayoutType.Byte4A, LayoutType.Byte4B, LayoutType.Byte4C, LayoutType.Short4ToFloat4A, LayoutType.Byte4E, }: writer.pack("4B", *[int(x * 127 + 127) for x in tangent]) else: not_implemented = True elif member.semantic == LayoutSemantic.Bitangent: if member.layout_type in { LayoutType.Byte4A, LayoutType.Byte4B, LayoutType.Byte4C, LayoutType.Byte4E }: writer.pack("4B", *[int(x * 127 + 127) for x in self.bitangent]) else: not_implemented = True elif member.semantic == LayoutSemantic.VertexColor: try: color = self.color_queue.pop() except IndexError: print(layout) print(member) print( f"{len(self.uvs)} UVS, {len(self.tangents)} tangents, {len(self.colors)} colors" ) raise IndexError("Ran out of vertex colors to buffer.") if member.layout_type == LayoutType.Float4: writer.pack("4f", *color) elif member.layout_type in { LayoutType.Byte4A, LayoutType.Byte4C }: writer.pack("4B", *[int(c * 255) for c in color]) else: not_implemented = True else: not_implemented = True if not_implemented: raise NotImplementedError( f"Unsupported vertex member semantic/type combination: " f"{member.semantic.name} | {member.layout_type.name}")
def pack(self, writer: BinaryWriter): for gx_item in self.gx_items: gx_item.pack(writer) writer.pack("iii", self.terminator_id, 100, self.terminator_null_count + 12) writer.pad(self.terminator_null_count)
def pack(self, writer: BinaryWriter, bit_big_endian: bool): if not bit_big_endian: writer.pack("B", int(f"{self:08b}"[::-1], 2)) else: writer.pack("B", self)