def deserialize_v2(self, f: ArchiveFile): self.META_entry_ptr = f.tell() self.v_path = f.read_strl_u32() # string raw length in multiples of 4 bytes (based on theHunter:COTW) self.v_path = self.v_path.strip(b'\00') self.META_entry_offset_ptr = f.tell() self.offset = f.read_u32() self.META_entry_size_ptr = f.tell() self.length = f.read_u32() self.v_hash = hash32_func(self.v_path) self.is_symlink = self.offset == 0
def rtpc_from_binary(f_raw, rtpc: Optional[Rtpc] = None): if rtpc is None: rtpc = Rtpc() f = ArchiveFile(f_raw) rtpc.magic = f.read_strl(4) if rtpc.magic != b'RTPC': raise Exception('Bad MAGIC {}'.format(rtpc.magic)) rtpc.version = f.read_u32() rtpc.root_node = RtpcNode() rtpc_node_from_binary(f, rtpc.root_node) return rtpc
0xa0000: [[71, 256 * 2, 256 * 2], [71, 256 * 4, 256 * 4]], 0x80000: [[71, 256 * 4, 256 * 4]], 0x40000: [[71, 256 * 4, 128 * 4]], 0x20000: [[71, 128 * 4, 128 * 4]], # 0x8000: [[83, 128 * 2, 128 * 1]], } with ArchiveFile(open(in_file, 'rb')) as f0: no_header = file_sz in file_data_layout_db if not no_header: file_sz = file_sz - 128 header = f0.read(128) fh = ArchiveFile(io.BytesIO(header)) p = 0 magic = fh.read_u32() version = fh.read_u16() d = fh.read_u8() dim = fh.read_u8() pixel_format = fh.read_u32() nx0 = fh.read_u16() ny0 = fh.read_u16() depth = fh.read_u16() flags = fh.read_u16() full_mip_count = fh.read_u8() mip_count = fh.read_u8() d = fh.read_u16() while fh.tell() < 128: d = fh.read_u32() nx = nx0 >> (full_mip_count - mip_count)
def deserialize(self, fp, map_typedef=None, process_instances=True): if map_typedef is None: map_typedef = {} header = fp.read(0x40) fh = ArchiveFile(io.BytesIO(header)) if len(header) < 0x40: raise EDecaErrorParse('File Too Short') magic = fh.read_strl(4) if magic != b' FDA': raise EDecaErrorParse('Magic does not match') self.version = fh.read_u32() self.instance_count = fh.read_u32() self.instance_offset = fh.read_u32() self.typedef_count = fh.read_u32() self.typedef_offset = fh.read_u32() self.stringhash_count = fh.read_u32() self.stringhash_offset = fh.read_u32() self.nametable_count = fh.read_u32() self.nametable_offset = fh.read_u32() self.total_size = fh.read_u32() self.unknown = fh.read_u32(5) self.comment = fp.read_strz() # name table self.table_name = [[0, b''] for i in range(self.nametable_count)] fp.seek(self.nametable_offset) for i in range(self.nametable_count): self.table_name[i][0] = fp.read_u8() for i in range(self.nametable_count): self.table_name[i][1] = fp.read(self.table_name[i][0] + 1)[0:-1] # string hash self.table_stringhash = [ StringHash() for i in range(self.stringhash_count) ] self.map_stringhash = {} fp.seek(self.stringhash_offset) for i in range(self.stringhash_count): self.table_stringhash[i].deserialize(fp, self.table_name) self.map_stringhash[ self.table_stringhash[i].value_hash] = self.table_stringhash[i] # typedef self.table_typedef = [TypeDef() for i in range(self.typedef_count)] self.extended_map_typedef = {} for k, v in map_typedef.items(): self.extended_map_typedef[k] = v self.map_typedef = {} fp.seek(self.typedef_offset) for i in range(self.typedef_count): self.table_typedef[i].deserialize(fp, self.table_name) self.map_typedef[ self.table_typedef[i].type_hash] = self.table_typedef[i] self.extended_map_typedef[ self.table_typedef[i].type_hash] = self.table_typedef[i] # print(typedef_map) # instance self.table_instance = [ InstanceEntry() for i in range(self.instance_count) ] self.map_instance = {} fp.seek(self.instance_offset) for i in range(self.instance_count): self.table_instance[i].deserialize(fp, self.table_name) self.map_instance[ self.table_instance[i].name_hash] = self.table_instance[i] self.found_strings = set() self.table_instance_values = [None] * len(self.table_instance) self.table_instance_full_values = [None] * len(self.table_instance) if process_instances: for i in range(len(self.table_instance)): ins = self.table_instance[i] fp.seek(ins.offset) buffer = fp.read(ins.size) n_buffer = len(buffer) buffer_pos = 0 v, buffer_pos = read_instance(buffer, n_buffer, buffer_pos, ins.type_hash, self.extended_map_typedef, self.map_stringhash, ins.offset, found_strings=self.found_strings) self.table_instance_full_values[i] = v self.table_instance_values[i] = adf_value_extract(v)
def determine_file_type_and_size(f, file_size0): file_type = None file_sub_type = None file_size = file_size0 start_pos = f.tell() magic = f.read(256) magic_int = None if len(magic) >= 20: magic_int = struct.unpack('I', magic[0:4])[0] if b' FDA' == magic[0:4]: file_type = FTYPE_ADF elif b'\x00FDA' == magic[0:4]: file_type = FTYPE_ADF0 elif b'AVTX' == magic[0:4]: file_type = FTYPE_AVTX header = DdImageHeader() header.deserialize_ddsc(magic) file_sub_type = header.dds_header_dxt10.dxgiFormat elif b'DDS ' == magic[0:4]: file_type = FTYPE_DDS header = DdImageHeader() header.deserialize_dds(magic) file_sub_type = header.dds_header_dxt10.dxgiFormat elif b'AAF' == magic[0:3].upper(): file_type = FTYPE_AAF f.seek(start_pos) aafh = load_aaf_header(f) file_size = aafh.size_u elif b'RTPC' == magic[0:4]: file_type = FTYPE_RTPC elif b'CFX' == magic[0:3]: file_type = FTYPE_GFX elif b'GFX' == magic[0:3]: file_type = FTYPE_GFX elif b'RIFF' == magic[0:4]: file_type = FTYPE_RIFF elif b'OggS' == magic[0:4]: file_type = FTYPE_OGG elif b'BM6' == magic[0:3]: file_type = FTYPE_BMP elif b'BM8' == magic[0:3]: file_type = FTYPE_BMP elif b'MDI\x00' == magic[0:4]: file_type = FTYPE_MDI elif b'PFX\x00' == magic[0:4]: file_type = FTYPE_PFX elif b'SARC' == magic[4:8]: file_type = FTYPE_SARC elif b'TAG0' == magic[4:8]: file_type = FTYPE_TAG0 elif b'FSB5' == magic[16:20]: file_type = FTYPE_FSB5C elif b'\x57\xE0\xE0\x57\x10\xC0\xC0\x10' == magic[0:8]: file_type = FTYPE_H2014 elif b'\x05\x00\x00\x00RBMDL' == magic[0:9]: file_type = FTYPE_RBMDL elif b'KB2' == magic[0:3]: file_type = FTYPE_BINK_KB2 elif b'BIK' == magic[0:3]: file_type = FTYPE_BINK_BIK elif b'GT0C' == magic[0:4]: file_type = FTYPE_GT0C # need to inspect file structure fm = ArchiveFile(f) if file_type is None: # OBC files with (u32)4, (u32)count , 80 * count bytes, something to do with areas on the map? object placement? fm.seek(start_pos) ver = fm.read_u32() cnt = fm.read_u32() if ver == 4 and cnt * 80 + 8 == file_size0: file_type = FTYPE_OBC if file_type is None: # text file only contains text bytes, json, xml, ... fm.seek(start_pos) counts = file_stats(fm, file_size0) all_sum = np.sum(counts) pri_sum = np.sum(counts[[9, 10, 13] + list(range(20, 128))]) if all_sum == pri_sum: file_type = FTYPE_TXT return file_type, file_size, magic_int, file_sub_type