def pack(self) -> tp.Tuple[bytes, bytes]: header_writer = BinaryWriter( big_endian=self.big_endian or self.flags.is_big_endian) data_writer = BinaryWriter( big_endian=self.big_endian or self.flags.is_big_endian) self.pack_header(header_writer) entries = list(sorted(self._entries, key=lambda e: e.id)) entry_headers = [entry.get_header(self.flags) for entry in entries] for entry_header in entry_headers: entry_header.pack_bnd3(header_writer, self.flags, self.bit_big_endian) if self.flags.has_names: for entry, entry_header in zip(entries, entry_headers): header_writer.fill("path_offset", header_writer.position, obj=entry_header) # NOTE: BND paths are *not* encoded in `shift_jis_2004`, unlike most other strings, but are `shift-jis`. # The relevant difference is that escaped backslashes are encoded as the yen symbol in `shift_jis_2004`. header_writer.append( entry.get_packed_path(encoding="shift-jis")) # Useless BDT3 header. data_writer.append(b"BDF3") data_writer.pack("8s", self.signature.encode("ascii")) data_writer.pad(4) for entry, entry_header in zip(entries, entry_headers): header_writer.fill("data_offset", data_writer.position, obj=entry_header) data_writer.append(entry.data) return header_writer.finish(), data_writer.finish()
def pack(self) -> bytes: """Pack TPF file to bytes.""" writer = BinaryWriter( big_endian=self.platform in {TPFPlatform.Xbox360, TPFPlatform.PS3}) writer.append(b"TPF\0") writer.reserve("data_size", "i") writer.pack("i", len(self.textures)) writer.pack("b", self.platform) writer.pack("b", self.tpf_flags) writer.pack("b", self.encoding) writer.pad(1) for i, texture in enumerate(self.textures): texture.pack_header(writer, i, self.platform, self.tpf_flags) for i, texture in enumerate(self.textures): texture.pack_name(writer, i, self.encoding) data_start = writer.position for i, texture in enumerate(self.textures): # TKGP notes: padding varies wildly across games, so don't worry about it too much. if len(texture.data) > 0: writer.pad_align(4) texture.pack_data(writer, i) writer.fill("data_size", writer.position - data_start) return writer.finish()
def pack(self): writer = BinaryWriter(big_endian=self.big_endian) writer.pack_struct(self.HEADER_STRUCT, goal_count=len(self.goals)) goal_struct = self.GOAL_STRUCT_64 if self.use_struct_64 else self.GOAL_STRUCT_32 packed_strings_offset = writer.position + len( self.goals) * goal_struct.size packed_goals = b"" packed_strings = b"" encoding = self.encoding z_term = b"\0\0" if self.use_struct_64 else b"\0" for goal in self.goals: name_offset = packed_strings_offset + len(packed_strings) packed_strings += goal.goal_name.encode(encoding=encoding) + z_term goal_kwargs = goal.get_interrupt_details() logic_interrupt_name = goal_kwargs.pop("logic_interrupt_name") if logic_interrupt_name: logic_interrupt_name_offset = packed_strings_offset + len( packed_strings) packed_strings += logic_interrupt_name.encode( encoding=encoding) + z_term else: logic_interrupt_name_offset = 0 packed_goals += goal_struct.pack( goal_id=goal.goal_id, name_offset=name_offset, logic_interrupt_name_offset=logic_interrupt_name_offset, **goal_kwargs, ) writer.append(packed_goals) writer.append(packed_strings) return writer.finish()
def pack(self) -> bytes: writer = BinaryWriter(big_endian=self.big_endian) self.pack_header(writer) path_encoding = ("utf-16-be" if self.big_endian else "utf-16-le") if self.unicode else "shift-jis" rebuild_hash_table = not self._most_recent_hash_table if not self._most_recent_hash_table or len( self._entries) != self._most_recent_entry_count: rebuild_hash_table = True else: # Check if any entry paths have changed. for i, entry in enumerate(self._entries): if entry.path != self._most_recent_paths[i]: rebuild_hash_table = True break self._most_recent_entry_count = len(self._entries) self._most_recent_paths = [entry.path for entry in self._entries] entries = list(sorted(self._entries, key=lambda e: e.id)) entry_headers = [entry.get_header(self.flags) for entry in entries] for entry_header in entry_headers: entry_header.pack_bnd4(writer, self.flags, self.bit_big_endian) if self.flags.has_names: for entry, entry_header in zip(entries, entry_headers): writer.fill("path_offset", writer.position, obj=entry_header) # NOTE: BND paths are *not* encoded in `shift_jis_2004`, unlike most other strings, but are `shift-jis`. # The relevant difference is that escaped backslashes are encoded as the yen symbol in `shift_jis_2004`. writer.append(entry.get_packed_path(encoding=path_encoding)) if self.hash_table_type == 4: writer.fill("hash_table_offset", writer.position) if rebuild_hash_table: writer.append(BinderHashTable.build_hash_table(self._entries)) else: writer.append(self._most_recent_hash_table) writer.fill("data_offset", writer.position) for entry, entry_header in zip(entries, entry_headers): writer.fill("data_offset", writer.position, obj=entry_header) writer.append( entry.data + b"\0" * 10 ) # ten pad bytes between each entry (for byte-perfect writes) return writer.finish()
def pack(self) -> bytes: writer = BinaryWriter( big_endian=self.big_endian or self.flags.is_big_endian) self.pack_header(writer) entries = list(sorted(self._entries, key=lambda e: e.id)) entry_headers = [entry.get_header(self.flags) for entry in entries] for entry_header in entry_headers: entry_header.pack_bnd3(writer, self.flags, self.bit_big_endian) if self.flags.has_names: for entry, entry_header in zip(entries, entry_headers): writer.fill("path_offset", writer.position, obj=entry_header) # NOTE: BND paths are *not* encoded in `shift_jis_2004`, unlike most other strings, but are `shift-jis`. # The relevant difference is that escaped backslashes are encoded as the yen symbol in `shift_jis_2004`. writer.append(entry.get_packed_path(encoding="shift-jis")) for entry, entry_header in zip(entries, entry_headers): writer.fill("data_offset", writer.position, obj=entry_header) writer.append(entry.data) writer.fill("file_size", writer.position) return writer.finish()
def pack(self) -> tp.Tuple[bytes, bytes]: header_writer = BinaryWriter( big_endian=self.big_endian or self.flags.is_big_endian) data_writer = BinaryWriter( big_endian=self.big_endian or self.flags.is_big_endian) self.pack_header(header_writer) path_encoding = ("utf-16-be" if self.big_endian else "utf-16-le") if self.unicode else "shift-jis" entries = list(sorted(self._entries, key=lambda e: e.id)) rebuild_hash_table = not self._most_recent_hash_table if not self._most_recent_hash_table or len( entries) != self._most_recent_entry_count: rebuild_hash_table = True else: # Check if any entry paths have changed. for i, entry in enumerate(entries): if entry.path != self._most_recent_paths[i]: rebuild_hash_table = True break self._most_recent_entry_count = len(entries) self._most_recent_paths = [entry.path for entry in entries] entry_headers = [entry.get_header(self.flags) for entry in entries] for entry_header in entry_headers: entry_header.pack_bnd4(header_writer, self.flags, self.bit_big_endian) if self.flags.has_names: for entry, entry_header in zip(entries, entry_headers): header_writer.fill("path_offset", header_writer.position, obj=entry_header) # NOTE: BND paths are *not* encoded in `shift_jis_2004`, unlike most other strings, but are `shift-jis`. # The relevant difference is that escaped backslashes are encoded as the yen symbol in `shift_jis_2004`. header_writer.append( entry.get_packed_path(encoding=path_encoding)) if self.hash_table_type == 4: header_writer.fill("hash_table_offset", header_writer.position) if rebuild_hash_table: header_writer.append( BinderHashTable.build_hash_table(self._entries)) else: header_writer.append(self._most_recent_hash_table) header_writer.fill("data_offset", header_writer.position) # Useless BDT4 header. data_writer.append(b"BDF4") data_writer.pack("?", self.unknown1) data_writer.pack("?", self.unknown2) data_writer.pad(3) data_writer.pack("?", self.big_endian) data_writer.pack("?", not self.bit_big_endian) data_writer.pad(5) data_writer.pack("q", 0x30) # header size data_writer.pack("8s", self.signature.encode("ascii")) data_writer.pad(16) for entry, entry_header in zip(entries, entry_headers): header_writer.fill("data_offset", data_writer.position, obj=entry_header) data_writer.append( entry.data + b"\0" * 10 ) # ten pad bytes between each entry (for byte-perfect writes) return header_writer.finish(), data_writer.finish()
def pack_header(self) -> bytes: writer = BinaryWriter() self.header.pack(writer) if self.dxt10_header: self.dxt10_header.pack(writer) return writer.finish()
def pack(self): writer = BinaryWriter(big_endian=self.header.endian == b"B\0") encoding = ("utf-16-be" if writer.big_endian else "utf-16-le") if self.header.unicode else "shift_jis_2004" true_face_count = 0 total_face_count = 0 for mesh in self.meshes: allow_primitive_restarts = len( mesh.vertices) < 2**16 - 1 # max unsigned short value for face_set in mesh.face_sets: face_set_true_count, face_set_total_count = face_set.get_face_counts( allow_primitive_restarts) true_face_count += face_set_true_count total_face_count += face_set_total_count if self.header.version < Version.Bloodborne_DS3_A: # Set header's `vertex_index_size` to the largest size detected across all `FaceSet`s (16 or 32). header_vertex_indices_size = 16 for mesh in self.meshes: for face_set in mesh.face_sets: face_set_vertex_index_size = face_set.get_vertex_index_size( ) header_vertex_indices_size = max( header_vertex_indices_size, face_set_vertex_index_size) else: # Vertex size is stored per `VertexBuffer`. header_vertex_indices_size = 0 self.header.pack( writer, dummy_count=len(self.dummies), material_count=len(self.materials), bone_count=len(self.bones), mesh_count=len(self.meshes), vertex_buffer_count=sum( len(mesh.vertex_buffers) for mesh in self.meshes), face_set_count=sum(len(mesh.face_sets) for mesh in self.meshes), buffer_layout_count=len(self.buffer_layouts), texture_count=sum( len(material.textures) for material in self.materials), true_face_count=true_face_count, total_face_count=total_face_count, vertex_indices_size=header_vertex_indices_size, ) for dummy in self.dummies: dummy.pack(writer, color_is_argb=self.header.version == Version.DarkSouls2) for material in self.materials: material.pack(writer) for bone in self.bones: bone.pack(writer) for mesh in self.meshes: mesh.pack(writer) for mesh in self.meshes: for face_set in mesh.face_sets: if header_vertex_indices_size == 0: face_set_vertex_index_size = face_set.get_vertex_index_size( ) else: face_set_vertex_index_size = header_vertex_indices_size face_set.pack(writer, face_set_vertex_index_size) for mesh in self.meshes: for i, vertex_buffer in enumerate(mesh.vertex_buffers): vertex_buffer.pack( writer, self.header.version, mesh_vertex_buffer_index=i, buffer_layouts=self.buffer_layouts, mesh_vertex_count=len(mesh.vertices), ) for i, buffer_layout in enumerate(self.buffer_layouts): buffer_layout.pack(writer) first_texture_index = 0 for i, material in enumerate(self.materials): material.pack_textures(writer, first_texture_index=first_texture_index) first_texture_index += len(material.textures) # TODO: Write unknown Sekiro struct here. # Indexed data only after this point, with 16 pad bytes between each data type. writer.pad_align(16) for i, buffer_layout in enumerate(self.buffer_layouts): buffer_layout.pack_members(writer) writer.pad_align(16) for i, mesh in enumerate(self.meshes): mesh.pack_bounding_box(writer) writer.pad_align(16) bone_indices_start = writer.position for i, mesh in enumerate(self.meshes): mesh.pack_bone_indices(writer, bone_indices_start=bone_indices_start) writer.pad_align(16) first_face_set_index = 0 for i, mesh in enumerate(self.meshes): mesh.pack_face_set_indices(writer, first_face_set_index) first_face_set_index += len(mesh.face_sets) writer.pad_align(16) first_vertex_buffer_index = 0 for mesh in self.meshes: mesh.pack_vertex_buffer_indices(writer, first_vertex_buffer_index) first_vertex_buffer_index += len(mesh.vertex_buffers) writer.pad_align(16) gx_offsets = [] for gx_list in self.gx_lists: gx_offsets.append(writer.position) gx_list.pack(writer) for material in self.materials: material.fill_gx_offset(writer, gx_offsets) writer.pad_align(16) for material in self.materials: material.pack_strings(writer, encoding) for texture in material.textures: texture.pack_zstring(writer, "path", encoding=encoding) texture.pack_zstring(writer, "texture_type", encoding=encoding) writer.pad_align(16) for bone in self.bones: bone.pack_zstring(writer, "name", encoding=encoding) alignment = 32 if self.header.version <= 0x2000E else 16 writer.pad_align(alignment) if self.header.version in {Version.DarkSouls2_NT, Version.DarkSouls2}: writer.pad(32) vertex_data_start = writer.position self.header.fill(writer, vertex_data_offset=vertex_data_start) for mesh in self.meshes: for face_set in mesh.face_sets: if header_vertex_indices_size == 0: face_set_vertex_index_size = face_set.get_vertex_index_size( ) else: face_set_vertex_index_size = header_vertex_indices_size writer.pad_align(16) face_set.pack_vertex_indices( writer, vertex_index_size=face_set_vertex_index_size, vertex_indices_offset=writer.position - vertex_data_start, ) for vertex in mesh.vertices: vertex.prepare_pack() for vertex_buffer in mesh.vertex_buffers: writer.pad_align(16) uv_factor = 2048 if self.header.version >= Version.DarkSouls2_NT else 1024 vertex_buffer.pack_buffer( writer, buffer_layouts=self.buffer_layouts, vertices=mesh.vertices, buffer_offset=writer.position - vertex_data_start, uv_factor=uv_factor, ) for vertex in mesh.vertices: vertex.finish_pack() writer.pad_align(16) self.header.fill(writer, vertex_data_size=writer.position - vertex_data_start) if self.header.version in {Version.DarkSouls2_NT, Version.DarkSouls2}: writer.pad(32) return writer.finish()