def update_with(self, list_of_arrays): """Updates this name buffer with a list of (array, attrib) whose elements have offset: bytes relative to the start of the name block [attrib]: string""" print("Updating name buffer") self.strings = [] offset_dic = {} with BinaryStream() as stream: # for name in self.names: for array, attrib in list_of_arrays: for item in sorted(array, key=lambda i: getattr(i, attrib)): name = getattr(item, attrib) if name in offset_dic: # known string, just get offset address = offset_dic[name] else: # new string, store offset and write zstring address = stream.tell() self.strings.append(name) offset_dic[name] = address stream.write_zstring(name) # store offset on item item.offset = address # get the actual result buffer buffer_bytes = stream.getvalue() self.data = buffer_bytes + get_padding(len(buffer_bytes), alignment=8)
def update_buffer_0_bytes(self): # update self.bone_names_size with BinaryStream() as temp_writer: assign_versions(temp_writer, get_versions(self)) temp_writer.ms_2_version = self.general_info.ms_2_version self.buffer_0.write(temp_writer) self.buffer_0_bytes = temp_writer.getvalue() self.bone_names_size = len(self.buffer_0_bytes)
def load_as(self, cls, num=1, version_info={}): """Return self.data as codegen cls version_info must be a dict that has version & user_version attributes""" with BinaryStream(self.data) as stream: assign_versions(stream, version_info) insts = [] for i in range(num): inst = cls() inst.read(stream) insts.append(inst) return insts
def as_bytes(inst, version_info={}): """helper that returns the bytes representation of a pyffi struct""" # we must make sure that pyffi arrays are not treated as a list although they are an instance of 'list' if isinstance(inst, list) and not isinstance(inst, Array): return b"".join(as_bytes(c, version_info) for c in inst) # zero terminated strings show up as strings if isinstance(inst, str): return inst.encode() + b"\x00" with BinaryStream() as stream: for k, v in version_info.items(): setattr(stream, k, v) inst.write(stream) return stream.getvalue()
def load_as(self, cls, num=1, version_info={}, args=(), context=None): """Return self.data as codegen cls""" insts = [] if context: con = context else: con = self.context with BinaryStream(self.data) as stream: try: for i in range(num): inst = cls(con, *args) inst.read(stream) insts.append(inst) except: traceback.print_exc() print(insts) return insts
def get_pool(self, pool_type_key, ovs="STATIC"): ovs_file = self.ovl.create_archive(ovs) # get one directly editable pool, if it exists # todo - remove pool index throughout all formats for pool_index, pool in enumerate(ovs_file.pools): # todo - reasonable add size condition if pool.type == pool_type_key and pool.new: return pool # nope, means we gotta create pool pool = MemPool(self.ovl.context) pool.data = BinaryStream() # the real address isn't known until it is written, but declare it anyway pool.address = 0 # assign_versions(pool.data, get_versions(self.ovl)) pool.type = pool_type_key # we write to the pool IO directly, so do not reconstruct its data from the pointers' data pool.new = True ovs_file.pools.append(pool) return pool
def get_pool(self, pool_type): # get one if it exists for pool_index, pool in enumerate(self.ovs.pools): if pool.type == pool_type: return pool_index, pool # nope, means we gotta create pool type and pool header_type = PoolType() header_type.type = pool_type header_type.num_pools = 1 pool = MemPool() # pool.data = io.BytesIO(self.pool_data) # pool.size = len(self.pool_data) pool.data = BinaryStream() # assign_versions(pool.data, get_versions(self.ovl)) pool.type = type # pool.offset = 0 # pool.num_files = file_entry_count pool.type = header_type.type self.ovs.pool_types.append(header_type) self.ovs.pools.append(pool) return len(self.ovs.pools) - 1, pool
def save(self, filepath): start_time = time.time() self.basename = os.path.basename(self.filepath) print(f"Saving {self.basename}...") # update data self.update_names( (self.datas, self.sizes, self.positions, self.materials)) if is_pc(self): self.info.height_array_size_pc = self.info.x * self.info.y * 4 # write the buffer data to a temporary stream with BinaryStream() as stream: # write the images if is_pc(self): stream.write_floats(self.heightmap) stream.write_ubytes(self.weights) else: # PC and JWE store the images attached to data infos for data in self.datas: data.offset = stream.tell() if data.type == 0: stream.write_ubytes(data.im) elif data.type == 2: stream.write_floats(data.im) self.info.data_offset = stream.tell() self.info.data_count = len(self.datas) stream.write_types(self.datas) self.info.size_offset = stream.tell() self.info.size_count = len(self.sizes) # todo - need to update this?? stream.write_types(self.sizes) # write object positions for pos in self.positions: pos.offset = stream.tell() stream.write_floats(pos.locs) self.info.position_offset = stream.tell() self.info.position_count = len(self.positions) stream.write_types(self.positions) # write 'materials' / bbox / whatever for mat in self.materials: mat.offset = stream.tell() stream.write_floats(mat.locs) self.info.material_offset = stream.tell() self.info.material_count = len(self.materials) stream.write_types(self.materials) # write names name_addresses = [] name_start = stream.tell() for name in self.names: name_addresses.append(stream.tell()) stream.write_zstring(name) # pad name section stream.write(get_padding(stream.tell() - name_start, alignment=8)) stream.write_uint64s(name_addresses) # get the actual result buffer buffer_bytes = stream.getvalue() # write the actual file with self.writer(filepath) as stream: self.write(stream) stream.write(buffer_bytes) print( f"Saved {self.basename} in {time.time()-start_time:.2f} seconds!")
def save(self, filepath, mdl2): print("Writing verts and tris to temporary buffer") # write each model's vert & tri block to a temporary buffer temp_vert_writer = io.BytesIO() temp_tris_writer = io.BytesIO() vert_offset = 0 tris_offset = 0 with BinaryStream() as temp_bone_writer: temp_bone_writer.version = self.version temp_bone_writer.user_version = self.user_version temp_bone_writer.ms_2_version = self.general_info.ms_2_version self.bone_info.write(temp_bone_writer) bone_bytes = temp_bone_writer.getvalue() print("new bone info length: ", len(bone_bytes)) for i, model in enumerate(mdl2.models): model.write_verts(temp_vert_writer) model.write_tris(temp_tris_writer) print("vert_offset", vert_offset) print("tris_offset", tris_offset) # update ModelData struct model.vertex_offset = vert_offset model.tri_offset = tris_offset model.vertex_count = len(model.verts) model.tri_index_count = len(model.tri_indices) # offsets for the next model vert_offset = temp_vert_writer.tell() tris_offset = temp_tris_writer.tell() # update lod fragment print("update lod fragment") for lod in mdl2.lods: # print(lod) lod_models = tuple( model for model in mdl2.models[lod.first_model_index:lod.last_model_index]) # print(lod_models) lod.vertex_count = sum(model.vertex_count for model in lod_models) lod.tri_index_count = sum(model.tri_index_count for model in lod_models) print("lod.vertex_count", lod.vertex_count) print("lod.tri_index_count", lod.tri_index_count) print("Writing final output") # get original header and buffers 0 & 1 # first get all bytes of the whole bone infos block print("old bone info length: ", len(self.bone_info_bytes)) cut = len(bone_bytes) - len(self.bone_info_bytes) # get bytes from IO object vert_bytes = temp_vert_writer.getvalue() tris_bytes = temp_tris_writer.getvalue() # modify buffer size self.buffer_info.vertexdatasize = len(vert_bytes) self.buffer_info.facesdatasize = len(tris_bytes) # write output ms2 with self.writer(filepath) as f: self.write(f) f.write(bone_bytes) if cut != 0: f.write(self.bone_info_bytes[cut:]) f.write(vert_bytes) f.write(tris_bytes)
def flush_pointers(self, ignore_unaccounted_bytes=False): """Pre-writing step to convert all edits that were done on individual pointers back into the consolidated header data io block""" logging.debug(f"Flushing ptrs") # first, get all ptrs that have data to write sorted_ptrs_map = sorted(self.pointer_map.items()) stack = [] last_offset = -1 for i, (offset, pointers) in enumerate(sorted_ptrs_map): for ptr in pointers: if ptr._data is not None: if last_offset == offset: logging.warning( f"last offset is same as offset {offset}, skipping ptr for update" ) continue stack.append((ptr, i, offset)) last_offset = offset # check if rewriting is needed if not stack: return # create new data writer data = BinaryStream() last_offset = 0 logging.debug(f"Stack size = {len(stack)}") # now go sequentially over all ptrs in the stack for ptr, i, offset in stack: from_end_of_last_to_start_of_this = self.get_at(last_offset, size=offset - last_offset) # write old data before this chunk and new data data.write(from_end_of_last_to_start_of_this) logging.debug( f"Flushing stack member {i} at original offset {offset} to {data.tell()}" ) data.write(ptr._data) # update offset to end of the original ptr last_offset = offset + ptr.data_size # check delta # todo - padding ptr._padding_size = ptr.padding_size delta = (len(ptr._data) + ptr._padding_size) - (ptr.data_size + ptr.padding_size) # update new data size on ptr ptr.data_size = len(ptr._data) if delta: # logging.debug(f"data size of stack [len: {len(sorted_ptrs_map)}] member {i} has changed") # get all ptrs that point into this pool, but after this ptr if i < len(sorted_ptrs_map): for offset_later, pointers in sorted_ptrs_map[i + 1:]: # logging.debug(f"Moving {offset_later} to {offset_later+delta}") # update their data offsets for p in pointers: p.data_offset += delta # remove from ptr map, so pool can be deleted if it's empty if not ptr._data: if offset in self.pointer_map: logging.debug(f"Removed offset {offset} from pool") self.pointer_map.pop(offset) # write the rest of the data data.write(self.get_at(last_offset)) # clear ptr data and stack for ptr, i, offset in stack: ptr._data = None # overwrite internal data self.data = data
def save(self, filepath): names_writer = BinaryStream() data_writer = BinaryStream() # shader name is at 0 names_writer.write_zstring(self.shader_name) # attribs are written first for attrib in self.attributes: attrib.offset = names_writer.tell() names_writer.write_zstring(attrib.name) attrib.first_value_offset = data_writer.tell() fmt = dtypes[attrib.dtype] b = struct.pack("<" + fmt, *attrib.value) data_writer.write(b) for texture in self.textures: if texture.textured: for i in range(len(texture.indices)): # uint - hashes texture.indices[i] = max(0, texture.value[i]) tex_ind = texture.indices[0] self.texture_names[tex_ind] = texture.name texture.offset = names_writer.tell() names_writer.write_zstring(texture.type) # write the output stream with self.writer(filepath) as stream: self.write(stream) stream.write(b"\x00" * self.zeros_size) stream.write(data_writer.getvalue()) stream.write(names_writer.getvalue())
def save(self, filepath): names_writer = BinaryStream() data_writer = BinaryStream() # shader name is at 0 names_writer.write_zstring(self.shader_name) names_writer.write(b"\x00") # attribs are written first for attrib in self.attributes: attrib.offset = names_writer.tell() names_writer.write_zstring(attrib.name) attrib.value_offset = data_writer.tell() b = struct.pack(f"<{dtypes[attrib.dtype]}", *attrib.value) data_writer.write(b) self.texture_files.clear() for texture in self.textures: # if the texture has a file, store its index if texture.textured: texture.indices[0] = len(self.texture_files) self.texture_files.append(texture.file) texture.offset = names_writer.tell() names_writer.write_zstring(texture.name) # update counts data_bytes = data_writer.getvalue() self.data_lib_size = len(data_bytes) self.dependency_count = len(self.texture_files) self.fgm_info.texture_count = len(self.textures) self.fgm_info.attribute_count = len(self.attributes) # write the output stream with self.writer(filepath) as stream: self.write(stream) stream.write(data_bytes) stream.write(names_writer.getvalue())
def update_buffer_1_bytes(self): with BinaryStream() as temp_bone_writer: self.models_reader.write(temp_bone_writer) self.buffer_1_bytes = temp_bone_writer.getvalue( )[self.models_reader.bone_info_start:] self.bone_info_size = self.models_reader.bone_info_size
def update_buffer_0_bytes(self): with BinaryStream() as temp_writer: self.buffer_0.write(temp_writer) self.buffer_0_bytes = temp_writer.getvalue()
def update_buffer_1_bytes(self): with BinaryStream() as temp_bone_writer: assign_versions(temp_bone_writer, get_versions(self)) temp_bone_writer.ms_2_version = self.general_info.ms_2_version self.write_all_bone_infos(temp_bone_writer) self.buffer_1_bytes = temp_bone_writer.getvalue()