def collect_mono_scripts(serializer, stream: FileStream): MONO_SCRIPT_TYPE_ID = -1 for n in range(len(serializer.type_trees)): t = serializer.type_trees[n] if t.persistent_type_id == serialize.MONO_SCRIPT_PERSISTENT_ID: MONO_SCRIPT_TYPE_ID = n break if MONO_SCRIPT_TYPE_ID == -1: return type_tree = serializer.type_trees[MONO_SCRIPT_TYPE_ID] for n in range(len(serializer.objects)): o = serializer.objects[n] if o.type_id == MONO_SCRIPT_TYPE_ID: stream.seek(serializer.node.offset + serializer.header.data_offset + o.byte_start) script = serializer.deserialize( fs=stream, meta_type=type_tree.type_dict.get(0)) type_name = script.get('m_ClassName') namespace = script.get('m_Namespace') assembly = script.get('m_AssemblyName') # encode mono scripts to cache storage if o.local_identifier_in_file not in mono_scripts: mono_scripts_stream.write( struct.pack('q', o.local_identifier_in_file)) mono_scripts_stream.write(struct.pack('i', len(type_name))) mono_scripts_stream.write(type_name) mono_scripts_stream.write(struct.pack('i', len(namespace))) mono_scripts_stream.write(namespace) mono_scripts_stream.write(struct.pack('i', len(assembly))) mono_scripts_stream.write(assembly) mono_scripts[ o. local_identifier_in_file] = type_name, namespace, assembly
def dump(self, fs: FileStream): for o in self.objects: fs.seek(self.node.offset + self.header.data_offset + o.byte_start) type_tree = self.type_trees[o.type_id] if not type_tree.type_dict: continue try: self.print(vars(type_tree.type_dict.get(0))) except: continue offset = fs.position try: data = self.deserialize(fs=fs, meta_type=type_tree.type_dict.get(0)) assert fs.position - offset == o.byte_size self.print(data) self.print() except: traceback.print_exc() self.print('position={} remain={}'.format(fs.position, fs.bytes_available))
def decode(self, file_path: str): fs = FileStream() fs.open(file_path) self.header.decode(fs) blocks_info_offset = self.header.get_blocks_info_offset() self.print(vars(self.header), blocks_info_offset, fs.position, self.header.compression_type) fs.seek(blocks_info_offset) compression_type = self.header.compression_type if compression_type != CompressionType.NONE: compressed_data = fs.read(self.header.compressed_blocks_info_size) assert len( compressed_data) == self.header.compressed_blocks_info_size uncompressed_data = lz4.block.decompress( compressed_data, self.header.uncompressed_blocks_info_size) temp = FileStream(data=uncompressed_data) self.read_blocks_and_directory(temp) else: assert self.header.compressed_blocks_info_size == self.header.uncompressed_blocks_info_size self.read_blocks_and_directory(fs) import io buffer = io.BytesIO() for block in self.blocks_info.blocks: if block.compression_type != CompressionType.NONE: compressed_data = fs.read(block.compressed_size) uncompressed_data = lz4.block.decompress( compressed_data, block.uncompressed_size) assert len(uncompressed_data ) == block.uncompressed_size, uncompressed_data buffer.write(uncompressed_data) else: uncompressed_data = fs.read(block.uncompressed_size) buffer.write(uncompressed_data) assert fs.position == fs.length buffer.seek(0) with open('data.bin', 'wb') as fp: fp.write(buffer.read()) buffer.seek(0) return FileStream(data=buffer.read())
def decode(self, fs: FileStream): offset = fs.position self.persistent_type_id = fs.read_sint32() self.is_stripped = fs.read_boolean() self.script_index = fs.read_sint16() if self.persistent_type_id == MONO_BEHAVIOUR_PERSISTENT_ID: self.mono_hash = fs.read(16) self.type_hash = fs.read(16) self.nodes = [] self.strings = {} if self.type_tree_enabled: self.decode_type_tree(fs) else: cache_path = self.get_cache_path() if p.exists(cache_path): tmp = FileStream(file_path=cache_path) tmp.endian = '<' persistent_type_id = tmp.read_sint32() assert persistent_type_id == self.persistent_type_id, '{} != {}'.format( persistent_type_id, self.persistent_type_id) tmp.seek(fs.position - offset) self.decode_type_tree(fs=tmp)
def decode(self, fs: FileStream): fs.seek(self.node.offset) header = self.header header.metadata_size = fs.read_sint32() header.file_size = fs.read_sint32() assert self.node.size == header.file_size, '{} != {}'.format( self.node.size, header.file_size) header.version = fs.read_sint32() header.data_offset = fs.read_sint32() header.endianess = fs.read_boolean() fs.read(3) # reserved bytes fs.endian = '>' if header.endianess else '<' self.print(vars(header)) self.version = fs.read_string() self.platform = fs.read_uint32() self.type_tree_enabled = fs.read_boolean() self.print('version={} platform={} type_tree_enabled={}'.format( self.version, self.platform, self.type_tree_enabled)) self.type_trees = [] type_count = fs.read_uint32() self.print('type', type_count) for _ in range(type_count): offset = fs.position type_tree = MetadataTypeTree( type_tree_enabled=self.type_tree_enabled) type_tree.decode(fs) if self.type_tree_enabled: position = fs.position fs.seek(offset) type_data = fs.read(position - offset) with open(type_tree.get_cache_path(auto_create=True), 'wb') as fp: fp.write(type_data) self.type_trees.append(type_tree) self.register_type_tree(type_tree=type_tree) self.print(type_tree) object_count = fs.read_sint32() self.print('object', object_count) for _ in range(object_count): fs.align(4) obj = ObjectInfo() obj.decode(fs) type_tree = self.type_trees[obj.type_id] obj.name = type_tree.name self.objects.append(obj) self.print(vars(obj)) script_type_count = fs.read_sint32() self.print('typeinfo', script_type_count) for _ in range(script_type_count): st = ScriptTypeInfo() st.decode(fs) self.typeinfos.append(st) self.print(vars(st)) external_count = fs.read_sint32() self.print('external', external_count) for _ in range(external_count): ext = ExternalInfo() ext.decode(fs) self.externals.append(ext) self.print(ext) fs.read_string()