def pad(self, alignment=4): size = self.get_size() padding_bytes = get_padding(size, alignment) logging.debug( f"Padded pool of ({size} bytes) with {len(padding_bytes)}, alignment = {alignment}" ) self.data.write(padding_bytes)
def update_data(self, data, update_copies=False, pad_to=None, include_old_pad=False): """Update data and size of this pointer""" self.data = data # only change padding if a new alignment is given if pad_to: len_d = len(data) # consider the old padding for alignment? if include_old_pad: len_d += len(self.padding) new_pad = get_padding(len_d, pad_to) # append new to the old padding if include_old_pad: self.padding = self.padding + new_pad # overwrite the old padding else: self.padding = new_pad self.data_size = len(self.data + self.padding) # update other pointers if asked to by the injector if update_copies: for other_pointer in self.copies: if other_pointer is not self: other_pointer.update_data(data, pad_to=pad_to, include_old_pad=include_old_pad)
def update_with(self, list_of_arrays): """Updates this name buffer with a list of (array, attrib) whose elements have offset: bytes relative to the start of the name block [attrib]: string""" print("Updating name buffer") self.strings = [] offset_dic = {} with BinaryStream() as stream: # for name in self.names: for array, attrib in list_of_arrays: for item in sorted(array, key=lambda i: getattr(i, attrib)): name = getattr(item, attrib) if name in offset_dic: # known string, just get offset address = offset_dic[name] else: # new string, store offset and write zstring address = stream.tell() self.strings.append(name) offset_dic[name] = address stream.write_zstring(name) # store offset on item item.offset = address # get the actual result buffer buffer_bytes = stream.getvalue() self.data = buffer_bytes + get_padding(len(buffer_bytes), alignment=8)
def update_matcol_pointers(self, pointers, new_names): # it looks like fragments are not reused here, and not even pointers are # but as they point to the same address the writer treats them as same # so the pointer map has to be updated for the involved pools # also the copies list has to be adjusted # so this is a hack that only considers one entry for each union of pointers # map doffset to tuple of pointer and new data dic = {} for p, n in zip(pointers, new_names): dic[p.data_offset] = (p, n.encode() + b"\x00") sorted_keys = list(sorted(dic)) # print(sorted_keys) print("Names in ovl order:", list(dic[k][1] for k in sorted_keys)) sum_l = 0 for k in sorted_keys: p, d = dic[k] sum_l += len(d) for pc in p.copies: pc.data = d pc.padding = b"" # apply padding to the last element padding = get_padding(sum_l, alignment=64) for pc in p.copies: pc.padding = padding
def _get_data(self, file_path): """Loads and returns the data for a LUA""" raw_bytes = self.get_content(file_path) icname, icpath = [line.strip() for line in raw_bytes.split(b'\n') if line.strip()] f_01 = zstr(icname) f_11 = zstr(icpath) return f_01, f_11 + get_padding(len(f_01) + len(f_11), 64)
def write(self, stream): """Update representation, then write the container from the internal representation""" offset = 0 if not self.hirc: for pointer in self.didx.data_pointers: pointer.data_section_offset = offset pointer.wem_filesize = len(pointer.data) pointer.pad = get_padding(len(pointer.data), alignment=16) offset += len(pointer.data + pointer.pad) for chunk_id, chunk in self.chunks: stream.write(chunk_id) stream.write_type(chunk) if self.hirc: stream.write(bytearray(self.old_size - stream.tell())) print(stream.tell) return if not self.didx.data_pointers: return data = b"".join(pointer.data + pointer.pad for pointer in self.didx.data_pointers) stream.write(b"DATA") stream.write_uint(len(data) - len(pointer.pad)) stream.write(data) # ovl ignores the padding of the last wem self.size_for_ovl = stream.tell() - len(pointer.pad) print("AUX size for OVL", self.size_for_ovl)
def create(self, ovs, file_entry): self.ovs = ovs dbuffer = self.getContent(file_entry.path) pool_index, pool = self.get_pool(2) offset = pool.data.tell() # userinterfaceicondata, 2 frags icname, icpath = [ line.strip() for line in dbuffer.split(b'\n') if line.strip() ] outb = zstr(icname) + zstr(icpath) pool.data.write(outb + get_padding(len(outb), 64) + struct.pack('8s', b'')) newoffset = pool.data.tell() pool.data.write(struct.pack('16s', b'')) new_frag0 = self.create_fragment() new_frag0.pointers[0].pool_index = pool_index new_frag0.pointers[0].data_offset = newoffset new_frag0.pointers[1].pool_index = pool_index new_frag0.pointers[1].data_offset = offset new_frag1 = self.create_fragment() new_frag1.pointers[0].pool_index = pool_index new_frag1.pointers[0].data_offset = newoffset + 8 new_frag1.pointers[1].pool_index = pool_index new_frag1.pointers[1].data_offset = offset + len(icname) + 1 new_ss = self.create_ss_entry(file_entry) new_ss.pointers[0].pool_index = pool_index new_ss.pointers[0].data_offset = newoffset
def update(self): if ovl_versions.is_pz16(self.ovl): logging.info(f"Updating MS2 buffer 0 with padding for {self.sized_str_entry.name}") name_buffer, bone_infos, verts = self.get_ms2_buffer_datas() # make sure buffer 0 is padded to 4 bytes padding = get_padding(len(name_buffer), 4) if padding: self.sized_str_entry.data_entry.update_data([name_buffer + padding, bone_infos, verts])
def create(self, ovs, file_entry): self.ovs = ovs dbuffer = self.getContent(file_entry.path) pool_index, pool = self.get_pool(2) offset = pool.data.tell() new_ss = self.create_ss_entry(file_entry) new_ss.pointers[0].pool_index = pool_index new_ss.pointers[0].data_offset = offset data = struct.pack("I", len(dbuffer)) + zstr(dbuffer) pool.data.write(data + get_padding(len(data), alignment=8)) pool.num_files += 1
def _get_data(self, file_path): """Loads and returns the data for a CURVE""" curvedata = self.load_xml(file_path) f_1 = bytearray() for item in curvedata: f_1 += struct.pack("<ff", float(item.attrib['key']), float(item.text)) f_0 = struct.pack('<QQ', 0x00, len(curvedata)) # fragment pointer 0 data # there is not a reason to add this padding but just to be safe return f_0, f_1 + get_padding(len(f_1), alignment=8)
def pack_mips(self, num_mips): """From a standard DDS stream, pack the lower mip levels into one image and pad with empty bytes""" logging.info("Packing mip maps") out_mips = [] packed_levels = [] # print("\nstandard mips") # start packing when one line of the mip == 128 bytes for i, (h, w, b) in enumerate(self.mips): if i == num_mips: break if self.get_bytes_size(w) > 32: out_mips.append(b) else: packed_levels.append((h, w, b)) # no packing at all, just grab desired mips and done if not packed_levels: logging.info(f"Mip packing is not needed") return b"".join(out_mips) with io.BytesIO() as packed_writer: # pack the last mips into one image for i, (height, width, level_bytes) in enumerate(packed_levels): # no matter what pixel size the mips represent, they must be at least one 4x4 chunk height = max(self.block_len_pixels_1d, height) # write horizontal lines # get count of h slices, 1 block is 4x4 px num_slices_y = height // self.block_len_pixels_1d bytes_per_line = len(level_bytes) // num_slices_y # write the bytes for this line from the mip bytes for slice_i in range(num_slices_y): # get the bytes that represent the blocks of this line sl = level_bytes[slice_i * bytes_per_line:(slice_i + 1) * bytes_per_line] packed_writer.write(sl) # fill the line with padding blocks packed_writer.write(get_padding(len(sl), alignment=256)) # add one fully blank line for those cases if num_slices_y == 1: packed_writer.write(b"\x00" * 256) packed_mip_bytes = packed_writer.getvalue() out_mips.append(packed_mip_bytes) # get final merged output bytes return b"".join(out_mips)
def write_all_bone_infos(self, stream): # functional for JWE detailobjects.ms2, if joint_data is read bone_infos_start = stream.tell() for bone_info_index, bone_info in enumerate(self.bone_infos): logging.debug( f"BONE INFO {bone_info_index} starts at {stream.tell()}") bone_info.write(stream) self.write_hitcheck_verts(bone_info, stream) if bone_info_index + 1 < len(self.bone_infos): relative_offset = stream.tell() - bone_infos_start padding = get_padding(relative_offset) logging.debug(f"Writing padding {padding}") stream.write(padding) self.bone_info_size = stream.tell() - bone_infos_start
def create(self): self.sized_str_entry = self.create_ss_entry(self.file_entry) pscollection = self.load_xml(self.file_entry.path) # pscollection needs 8 bytes for the ptr and the array count # then also needs more per each entry and each arg f_0 = self.create_fragments(self.sized_str_entry, 1)[0] self.write_to_pool(f_0.pointers[0], 2, b"") self.write_to_pool(self.sized_str_entry.pointers[0], 2, struct.pack("<QQ", 0, len(pscollection))) # point the first frag to the array of data now # ptr, count, ptr ptr self.write_to_pool( f_0.pointers[1], 2, b"".join( struct.pack("<QQQQ", 0, len(ps), 0, 0) for ps in pscollection)) rel_offset = 0 for prepared_statment in pscollection: # if there are args, make a frag for it if len(prepared_statment): f = self.create_fragments(self.sized_str_entry, 1)[0] args_data = b"".join( struct.pack('<BBBBIQQ', 0, int(arg.text), 1 + i, 0, 0, 0, 0) for i, arg in enumerate(prepared_statment)) self.ptr_relative(f.pointers[0], f_0.pointers[1], rel_offset=rel_offset) self.write_to_pool( f.pointers[1], 2, args_data + get_padding(len(args_data), alignment=16)) # write name and sql as a name ptr each f_name, f_sql = self.create_fragments(self.sized_str_entry, 2) self.ptr_relative(f_name.pointers[0], f_0.pointers[1], rel_offset=rel_offset + 0x10) self.write_to_pool( f_name.pointers[1], 2, f"{prepared_statment.attrib['name']}\00".encode('utf-8')) self.ptr_relative(f_sql.pointers[0], f_0.pointers[1], rel_offset=rel_offset + 0x18) self.write_to_pool( f_sql.pointers[1], 2, f"{prepared_statment.attrib['sql']}\00".encode('utf-8')) # increase psptr to the next array member rel_offset += 0x20
def create(self, ovs, file_entry): self.ovs = ovs # assetpkg.. copy content, pad to 64b, then assign 1 fragment and 1 empty sized str. pool_index, pool = self.get_pool(2) offset = pool.data.tell() dbuffer = self.getContent(file_entry.path) dbuffer = zstr(dbuffer) + get_padding(len(zstr(dbuffer)), 64) pool.data.write(dbuffer) # fragment pointer 1 data pool.data.write(struct.pack('16s', b'')) # fragment pointer 0 data new_frag = self.create_fragment() new_frag.pointers[0].pool_index = pool_index new_frag.pointers[0].data_offset = offset + len(dbuffer) new_frag.pointers[1].pool_index = pool_index new_frag.pointers[1].data_offset = offset new_ss = self.create_ss_entry(file_entry) new_ss.pointers[0].pool_index = pool_index new_ss.pointers[0].data_offset = offset + len(dbuffer)
def create(self): ss, buffer_0 = self._get_data(self.file_entry.path) file_name_bytes = self.file_entry.basename.encode(encoding='utf8') self.sized_str_entry = self.create_ss_entry(self.file_entry) self.create_data_entry(self.sized_str_entry, (buffer_0,)) f_0, f_1 = self.create_fragments(self.sized_str_entry, 2) # first these self.write_to_pool(f_0.pointers[1], 2, zstr(file_name_bytes)) self.write_to_pool(f_1.pointers[1], 2, b'\x00') # now pad f_1.pointers[1].pool.data.write(get_padding(f_1.pointers[1].pool.data.tell(), 4)) # finally the rest, already alignd ss_ptr = self.sized_str_entry.pointers[0] self.write_to_pool(ss_ptr, 2, ss) self.ptr_relative(f_0.pointers[0], ss_ptr, rel_offset=16) self.ptr_relative(f_1.pointers[0], ss_ptr, rel_offset=24)
def write(self, stream): self.io_start = stream.tell() i = 0 if self.context.version < 47: raise NotImplementedError("Can't write old style mesh and bone info blocks") else: for model_info in self.arg: model_info.model.write(stream) self.bone_info_start = stream.tell() for model_info in self.arg: if model_info.increment_flag: logging.debug(f"BONE INFO {i} starts at {stream.tell()}") model_info.bone_info.write(stream) self.write_hitcheck_verts(model_info.bone_info, stream) if i + 1 < len(self.bone_infos): relative_offset = stream.tell() - self.bone_info_start padding = get_padding(relative_offset) logging.debug(f"Writing padding {padding}") stream.write(padding) i += 1 self.bone_info_size = stream.tell() - self.bone_info_start self.io_size = stream.tell() - self.io_start
def _get_data(self, file_path): """Loads and returns the data for a CURVE""" # copy content, pad to 64b, then assign 1 fragment and 1 empty sized str. f_0 = struct.pack('16s', b'') # fragment pointer 0 data f_1 = zstr(self.get_content(file_path)) # fragment pointer 1 data return f_0, f_1 + get_padding(len(f_1), alignment=64)
def _get_data(self, file_path): """Loads and returns the data for a TXT""" raw_txt_bytes = self.get_content(file_path) ss = struct.pack("<I", len(raw_txt_bytes)) + raw_txt_bytes + b"\x00" return ss + get_padding(len(ss), alignment=8)
def _get_data(self, file_path): """Loads and returns the data for a TXT""" # copy content, pad to 64b, then assign 1 fragment and 1 empty sized str. ss = b"\x00" * 16 # 1 ptr, 8 unused bytes f_1 = zstr(self.get_content(file_path)) # fragment pointer 1 data return ss, f_1 + get_padding(len(f_1), alignment=64)
def save(self, filepath): start_time = time.time() self.basename = os.path.basename(self.filepath) print(f"Saving {self.basename}...") # update data self.update_names( (self.datas, self.sizes, self.positions, self.materials)) if is_pc(self): self.info.height_array_size_pc = self.info.x * self.info.y * 4 # write the buffer data to a temporary stream with BinaryStream() as stream: # write the images if is_pc(self): stream.write_floats(self.heightmap) stream.write_ubytes(self.weights) else: # PC and JWE store the images attached to data infos for data in self.datas: data.offset = stream.tell() if data.type == 0: stream.write_ubytes(data.im) elif data.type == 2: stream.write_floats(data.im) self.info.data_offset = stream.tell() self.info.data_count = len(self.datas) stream.write_types(self.datas) self.info.size_offset = stream.tell() self.info.size_count = len(self.sizes) # todo - need to update this?? stream.write_types(self.sizes) # write object positions for pos in self.positions: pos.offset = stream.tell() stream.write_floats(pos.locs) self.info.position_offset = stream.tell() self.info.position_count = len(self.positions) stream.write_types(self.positions) # write 'materials' / bbox / whatever for mat in self.materials: mat.offset = stream.tell() stream.write_floats(mat.locs) self.info.material_offset = stream.tell() self.info.material_count = len(self.materials) stream.write_types(self.materials) # write names name_addresses = [] name_start = stream.tell() for name in self.names: name_addresses.append(stream.tell()) stream.write_zstring(name) # pad name section stream.write(get_padding(stream.tell() - name_start, alignment=8)) stream.write_uint64s(name_addresses) # get the actual result buffer buffer_bytes = stream.getvalue() # write the actual file with self.writer(filepath) as stream: self.write(stream) stream.write(buffer_bytes) print( f"Saved {self.basename} in {time.time()-start_time:.2f} seconds!")
def create(self): ms2_file = Ms2File() ms2_file.load(self.file_entry.path, read_bytes=True) ms2_dir = os.path.dirname(self.file_entry.path) ms2_entry = self.create_ss_entry(self.file_entry) ms2_entry.children = [] versions = get_versions(self.ovl) # 1 for the ms2, 2 for each mdl2 # pool.num_files += 1 # create sized str entries and mesh data fragments for model_info, mdl2_name in zip(ms2_file.model_infos, ms2_file.mdl_2_names): # pool.num_files += 2 mdl2_path = os.path.join(ms2_dir, mdl2_name+".mdl2") mdl2_file_entry = self.get_file_entry(mdl2_path) mdl2_entry = self.create_ss_entry(mdl2_file_entry) mdl2_entry.pointers[0].pool_index = -1 ms2_entry.children.append(mdl2_entry) # first, create all MeshData structs as fragments mdl2_entry.model_data_frags = [self.create_fragment() for _ in range(model_info.num_meshes)] first_materials_ptr = None # create the 5 fixed frags per MDL2 and write their data for model_info, mdl2_entry in zip(ms2_file.model_infos, ms2_entry.children): mdl2_entry.fragments = [self.create_fragment() for _ in range(5)] materials, lods, objects, meshes, model_info_ptr = mdl2_entry.fragments if first_materials_ptr is None: first_materials_ptr = materials.pointers[1] self.write_to_pool(materials.pointers[1], 2, as_bytes(model_info.model.materials, version_info=versions)) self.write_to_pool(lods.pointers[1], 2, as_bytes(model_info.model.lods, version_info=versions)) objects_bytes = as_bytes(model_info.model.objects, version_info=versions) # todo - padding like this is likely wrong, probably relative to start of materials self.write_to_pool(objects.pointers[1], 2, objects_bytes + get_padding(len(objects_bytes), alignment=8)) self.write_to_pool(meshes.pointers[1], 2, as_bytes(model_info.model.meshes, version_info=versions)) self.ptr_relative(model_info_ptr.pointers[1], first_materials_ptr) # point to start of each modeldata offset = 0 for frag in mdl2_entry.model_data_frags: self.ptr_relative(frag.pointers[0], meshes.pointers[1], rel_offset=offset) offset += 64 # create fragments for ms2 buffer_info_frag, model_info_frag, end_frag = self.create_fragments(ms2_entry, 3) # write mesh info self.write_to_pool(model_info_frag.pointers[1], 2, as_bytes(ms2_file.model_infos, version_info=versions)) offset = 0 for mdl2_entry in ms2_entry.children: # byte size of modelinfo varies - JWE1 (176 bytes total) if ovl_versions.is_jwe(self.ovl): offset += 104 # 16 additional bytes for PZ/PZ16/JWE2 (192 bytes total) else: offset += 120 for frag in mdl2_entry.fragments: self.ptr_relative(frag.pointers[0], model_info_frag.pointers[1], rel_offset=offset) offset += 8 offset += 32 # buffer info data buffer_info_bytes = as_bytes(ms2_file.buffer_info, version_info=versions) self.write_to_pool(buffer_info_frag.pointers[1], 2, buffer_info_bytes) # set ptr to buffer info for each MeshData frag for mdl2_entry in ms2_entry.children: for frag in mdl2_entry.model_data_frags: self.ptr_relative(frag.pointers[1], buffer_info_frag.pointers[1]) # ms2 ss data ms2_ss_bytes = as_bytes(ms2_file.info, version_info=versions) self.write_to_pool(ms2_entry.pointers[0], 2, ms2_ss_bytes) # set frag ptr 0 for frag, offset in zip(ms2_entry.fragments, (24, 32, 40)): self.ptr_relative(frag.pointers[0], ms2_entry.pointers[0], rel_offset=offset) # the last ms2 fragment self.write_to_pool(end_frag.pointers[1], 2, struct.pack("<ii", -1, 0)) # create ms2 data self.create_data_entry(ms2_entry, ms2_file.buffers)
def create(self, ovs, file_entry): self.ovs = ovs self.ovl = ovs.ovl ms2_file = Ms2File() ms2_file.load(file_entry.path, read_bytes=True) ms2_entry = self.create_ss_entry(file_entry) ms2_entry.children = [] versions = get_versions(ovs.ovl) pool_index, pool = self.get_pool(2) offset = pool.data.tell() ms2_dir, ms2_basename = os.path.split(file_entry.path) mdl2_names = [ f for f in os.listdir(ms2_dir) if f.lower().endswith(".mdl2") ] mdl2s = [] for mdl2_name in mdl2_names: mdl2_path = os.path.join(ms2_dir, mdl2_name) mdl2 = Mdl2File() mdl2.load(mdl2_path) if mdl2.ms_2_name == ms2_basename: mdl2s.append((mdl2_name, mdl2)) # sort them by model index mdl2s.sort(key=lambda tup: tup[1].index) # create sized str entries and model data fragments for mdl2_name, mdl2 in mdl2s: mdl2_file_entry = self.get_file_entry(mdl2_name) mdl2_entry = self.create_ss_entry(mdl2_file_entry) mdl2_entry.pointers[0].pool_index = -1 mdl2_entry.pointers[0].data_offset = 0 ms2_entry.children.append(mdl2_entry) # first, create all ModelData structs as fragments mdl2_entry.model_data_frags = [ self.create_fragment() for _ in range(mdl2.model_info.num_models) ] # create the 5 fixed frags per MDL2 and write their data for (mdl2_name, mdl2), mdl2_entry in zip(mdl2s, ms2_entry.children): mdl2_entry.fragments = [self.create_fragment() for _ in range(5)] materials, lods, objects, model_data_ptr, model_info = mdl2_entry.fragments materials_offset = pool.data.tell() materials.pointers[1].pool_index = pool_index materials.pointers[1].data_offset = materials_offset pool.data.write(as_bytes(mdl2.materials, version_info=versions)) lods.pointers[1].pool_index = pool_index lods.pointers[1].data_offset = pool.data.tell() pool.data.write(as_bytes(mdl2.lods, version_info=versions)) objects.pointers[1].pool_index = pool_index objects.pointers[1].data_offset = pool.data.tell() objects_bytes = as_bytes(mdl2.objects, version_info=versions) pool.data.write(objects_bytes + get_padding(len(objects_bytes), alignment=8)) # modeldatas start here model_info.pointers[1].pool_index = pool_index model_info.pointers[1].data_offset = materials_offset # write modeldata for (mdl2_name, mdl2), mdl2_entry in zip(mdl2s, ms2_entry.children): materials, lods, objects, model_data_ptr, model_info = mdl2_entry.fragments model_data_ptr.pointers[1].pool_index = pool_index model_data_ptr.pointers[1].data_offset = pool.data.tell() # write mdl2 modeldata frags for frag, modeldata in zip(mdl2_entry.model_data_frags, mdl2.models): frag.pointers[0].pool_index = pool_index frag.pointers[0].data_offset = pool.data.tell() pool.data.write(as_bytes(modeldata, version_info=versions)) # create fragments for ms2 ms2_entry.fragments = [self.create_fragment() for _ in range(3)] # write model info for mdl2_name, mdl2 in mdl2s: model_info_bytes = as_bytes(mdl2.model_info, version_info=versions) if mdl2.index == 0: f_0, f_1, f_2 = ms2_entry.fragments f_1.pointers[1].pool_index = pool_index f_1.pointers[1].data_offset = pool.data.tell() # only write core model info pool.data.write(model_info_bytes) else: # grab the preceding mdl2 entry since it points ahead prev_mdl2_entry = ms2_entry.children[mdl2.index - 1] # get its model info fragment materials, lods, objects, model_data_ptr, model_info = prev_mdl2_entry.fragments model_info.pointers[1].pool_index = pool_index model_info.pointers[1].data_offset = pool.data.tell() # we write this anyway # todo - get the actual data pool.data.write(b"\x00" * 40) # we should only pool.data.write(model_info_bytes) this_mdl2_entry = ms2_entry.children[mdl2.index] materials, lods, objects, model_data_ptr, model_info = this_mdl2_entry.fragments for frag in (materials, lods, objects, model_data_ptr): frag.pointers[0].pool_index = pool_index frag.pointers[0].data_offset = pool.data.tell() pool.data.write(b"\x00" * 8) # write last 40 bytes to model_info if mdl2s: model_info.pointers[0].pool_index = pool_index model_info.pointers[0].data_offset = pool.data.tell() pool.data.write(b"\x00" * 40) # write the ms2 itself ms2_entry.pointers[0].pool_index = pool_index ms2_entry.pointers[0].data_offset = pool.data.tell() # load ms2 ss data ms2_ss_bytes = as_bytes( ms2_file.general_info, version_info=versions) # + ms2_entry.pointers[0].data[24:] pool.data.write(ms2_ss_bytes) # first, 3 * 8 bytes of 00 for frag in ms2_entry.fragments: frag.pointers[0].pool_index = pool_index frag.pointers[1].pool_index = pool_index frag.pointers[0].data_offset = pool.data.tell() pool.data.write(b"\x00" * 8) # now the actual data buffer_info_frag, model_info_frag, end_frag = ms2_entry.fragments buffer_info_offset = pool.data.tell() # set ptr to buffer info for each ModelData frag for mdl2_entry in ms2_entry.children: for frag in mdl2_entry.model_data_frags: frag.pointers[1].pool_index = pool_index frag.pointers[1].data_offset = buffer_info_offset # todo - from the frag log, buffer_info_bytes should be 48 bytes but is 32 buffer_info_frag.pointers[1].data_offset = buffer_info_offset buffer_info_bytes = as_bytes(ms2_file.buffer_info, version_info=versions) logging.debug(f"len(buffer_info_bytes) {len(buffer_info_bytes)}") pool.data.write(buffer_info_bytes) # the last ms2 fragment end_frag.pointers[1].data_offset = pool.data.tell() pool.data.write(struct.pack("<ii", -1, 0)) # create ms2 data self.create_data_entry( file_entry, (ms2_file.buffer_0_bytes, ms2_file.buffer_1_bytes, ms2_file.buffer_2_bytes))