Пример #1
0
    def process_adf_in_exe(self, exepath, node_uid):
        self.type_map_def = {}

        adf_sub_files = []

        exe_stat = os.stat(exepath)

        with open(exepath, 'rb') as f:
            exe = f.read(exe_stat.st_size)

        poss = 0
        while True:
            poss = exe.find(b' FDA\x04\x00\x00\x00', poss + 1)
            if poss < 0:
                break

            exe_short = exe[poss:]
            with ArchiveFile(BytesIO(exe_short)) as f:
                try:
                    adf = Adf()
                    adf.deserialize(f, map_typedef=self.type_map_def)

                except AdfTypeMissing as ae:
                    self.type_missing.add((ae.type_id, node_uid))
                    self._type_map_updated = True

            adf_sub_files.append((poss, adf.total_size))

            self.typedefs_add(adf.map_typedef)
            # TODO also add field strings to adf_db/vfs, combine adf_db and vfs into GAME DATABASE?

        return adf_sub_files
Пример #2
0
def rtpc_from_binary(f_raw, rtpc: Optional[Rtpc] = None):
    if rtpc is None:
        rtpc = Rtpc()

    f = ArchiveFile(f_raw)

    rtpc.magic = f.read_strl(4)
    if rtpc.magic != b'RTPC':
        raise Exception('Bad MAGIC {}'.format(rtpc.magic))

    rtpc.version = f.read_u32()

    rtpc.root_node = RtpcNode()
    rtpc_node_from_binary(f, rtpc.root_node)

    return rtpc
Пример #3
0
    def serialize_v2(self, f: ArchiveFile):
        # prepare data that will be written, v_path should be a multiple of 4 in length
        v_path = self.v_path + b'\00' * (((len(self.v_path) + 3) // 4 * 4) - len(self.v_path))
        f.write_u32(len(v_path))
        f.write(v_path)

        f.write_u32(self.offset)

        f.write_u32(self.length)
Пример #4
0
def load_aaf_header(fin):
    with ArchiveFile(fin) as f:
        aafh = AafHeader()
        aafh.magic = f.read(4)
        aafh.version = f.read_u32()
        aafh.aic = f.read(8 + 16 + 4)
        aafh.size_u = f.read_u32()  # uncompressed length, whole file
        aafh.section_size = f.read_u32()  # uncompress length, max any section?
        aafh.section_count = f.read_u32(
        )  # section count? Normally 1 (2-5 found), number of 32MiB blocks?
    return aafh
Пример #5
0
    def _load_adf(self, buffer):
        with ArchiveFile(io.BytesIO(buffer)) as fp:
            obj = Adf()
            try:
                # import time
                # t0 = time.time()
                obj.deserialize(fp, self.type_map_def)
                # t1 = time.time()
                # print(f'Time ADF = {t1 - t0}')

                # get typedefs from regular load, to handle the case where types are in ADF, and ADFB but not EXE
                self.typedefs_add(obj.map_typedef)
                return obj
            except EDecaErrorParse:
                return None
Пример #6
0
    def deserialize_v2(self, f: ArchiveFile):
        self.META_entry_ptr = f.tell()
        self.v_path = f.read_strl_u32()  # string raw length in multiples of 4 bytes (based on theHunter:COTW)
        self.v_path = self.v_path.strip(b'\00')
        self.META_entry_offset_ptr = f.tell()
        self.offset = f.read_u32()
        self.META_entry_size_ptr = f.tell()
        self.length = f.read_u32()

        self.v_hash = hash32_func(self.v_path)
        self.is_symlink = self.offset == 0
Пример #7
0
    def header_deserialize(self, fin):
        with ArchiveFile(fin) as f:
            self.version = f.read_u32()
            self.magic = f.read(4)
            self.ver2 = f.read_u32()
            # assuming 16 byte boundry based on some some examples from theHunter:COTW
            self.dir_block_len = f.read_u32()

            assert(self.version == 4)
            assert(self.magic == b'SARC')
            assert(self.ver2 in {2, 3})

            self.entries = []

            if self.ver2 == 2:
                self.entries_begin = f.tell()
                end_pos = f.tell() + self.dir_block_len
                idx = 0
                while f.tell() + 12 <= end_pos:  # 12 is minimum length of v2 sarc entry and they pad with some zeros
                    entry = EntrySarc(idx)
                    entry.deserialize_v2(f)
                    self.entries.append(entry)
                    idx += 1

            elif self.ver2 == 3:
                string_len = f.read_u32()
                self.strings0 = f.read(string_len)
                self.strings = self.strings0.split(b'\00')
                self.strings = [s for s in self.strings if len(s) > 0]

                self.entries_begin = f.tell()
                self.entries = [EntrySarc(index=i, v_path=s) for i, s in enumerate(self.strings)]
                for ent in self.entries:
                    ent.deserialize_v3(f)

            else:
                raise NotImplementedError('FileSarc.header_deserialize: self.ver2 == {}'.format(self.ver2))

            self.entries_end = f.tell()
Пример #8
0
    def build_node_sarc(
        self,
        dst_path: str,
        src_path: Union[None, str],
        vnode: VfsNode,
        vfs: VfsDatabase,
        vpath_complete_map,
        symlink_changed_file,
    ):
        assert (vnode.file_type == FTYPE_SARC)

        v_path = vnode.v_path
        print('BUILD SARC {}'.format(vnode.v_path))

        # parse existing file
        sarc_file = FileSarc()
        with vfs.file_obj_from(vnode) as f:
            sarc_file.header_deserialize(f)

        if src_path is not None:
            if src_path.find('DECA.FILE_LIST') >= 0:
                with open(src_path, 'r') as f:
                    src_lines = f.readlines()
                callex = re.compile(r'^([A-Za-z]*[.A-Za-z]*)\(([^)]*)\);$')
                for src_idx, src_line in enumerate(src_lines):
                    src_context = f'{src_path}:{src_idx + 1}'
                    mr = callex.match(src_line)
                    if mr is not None:
                        cmd = mr.group(1)
                        param = mr.group(2)
                    else:
                        raise EDecaBuildError(
                            'BUILD ERROR: {}: Parser error in command "{}"'.
                            format(src_context, src_line))

                    mr = re.match(r'^"([^"]*)"$', param)
                    v_path = None
                    if mr is not None:
                        v_path = mr.group(1).encode('ascii')

                    if cmd == 'sarc.clear':
                        sarc_file.entries.clear()
                    elif cmd in {'sarc.add', 'sarc.symlink'}:
                        # Check to make sure entry does not exist
                        for entry in sarc_file.entries:
                            if entry.v_path == v_path:
                                # raise EDecaBuildError(
                                #     'BUILD ERROR: {}: Tried to re-add v_path: {}'.format(
                                #         src_context, v_path.decode('UTF-8')))
                                print(
                                    'BUILD WARNING: Do not do this unless you are Ciprianno: {}: Tried to re-add v_path: {}'
                                    .format(src_context,
                                            v_path.decode('UTF-8')))

                        # Add to end
                        entry = EntrySarc(v_path=v_path)
                        entry.is_symlink = cmd == 'sarc.symlink'
                        src_node = vfs.nodes_where_match(v_path=v_path)
                        if not src_node:
                            raise EDecaBuildError(
                                'BUILD ERROR: {}: v_path does not exist in database: {}'
                                .format(src_context, v_path.decode('UTF-8')))
                        src_node = src_node[0]
                        entry.length = src_node.size_u
                        sarc_file.entries.append(entry)
                    # elif cmd == 'sarc.remove':
                    #     pass
                    else:
                        raise EDecaBuildError(
                            'BUILD ERROR: {}: Unhandled command: {}'.format(
                                src_context, cmd))

            else:
                raise EDecaBuildError(
                    'BUILD ERROR: Unhandled src file for SARC file: {}'.format(
                        src_path))

        src_files: List[Union[None, str]] = [None] * len(sarc_file.entries)
        entry: EntrySarc
        for i, entry in enumerate(sarc_file.entries):
            if entry.v_path in vpath_complete_map:
                src_file = vpath_complete_map[entry.v_path]
                src_files[i] = src_file
                entry.length = os.stat(src_file).st_size
                if symlink_changed_file:
                    entry.offset = 0
                    entry.is_symlink = True

        # extract existing file
        fn_dst = os.path.join(dst_path, vnode.v_path.decode('utf-8'))
        pt, fn = os.path.split(fn_dst)
        os.makedirs(pt, exist_ok=True)

        with ArchiveFile(open(fn_dst, 'wb')) as fso:
            sarc_file.header_serialize(fso)

            for i, entry in enumerate(sarc_file.entries):
                buf = None
                src_file = src_files[i]
                if entry.is_symlink:
                    print('  SYMLINK {}'.format(entry.v_path))
                    pass
                elif src_file is not None:
                    print('  INSERTING {} src file to new file'.format(
                        entry.v_path))
                    with open(src_file, 'rb') as f:
                        buf = f.read(entry.length)
                else:
                    print('  COPYING {} from old file to new file'.format(
                        entry.v_path))
                    vn = vfs.nodes_where_match(v_path=entry.v_path)[0]
                    with vfs.file_obj_from(vn) as fsi:
                        buf = fsi.read(entry.length)

                if buf is not None:
                    fso.seek(entry.offset)
                    fso.write(buf)

        vpath_complete_map[v_path] = fn_dst
from deca.file import ArchiveFile

infile = '/home/krys/prj/deca/doc/debug_port/gz_debug_port_9200.dat'

with ArchiveFile(open(infile, 'rb')) as f:
    count = f.read_u32()
    while count is not None:
        if count > 4:
            flag = f.read_u8()
            if flag == 0:
                objid = f.read_u64()
                param = objid & 0xFFFF
                objid = objid >> 16
                cnt = f.read_u32()
                scripts = []
                for i in range(cnt):
                    scnt1 = f.read_u32()
                    id1 = f.read(scnt1)
                    scripts.append(id1)
                loca_size = f.read_u32()
                loca = f.read(loca_size)
                print('msg{:02x}: {} 0x{:012X} {} {} {} {}'.format(
                    flag, count, objid, param, cnt, scripts, loca))
            elif flag == 1:
                objid = f.read_u64()
                param = objid & 0xFFFF
                objid = objid >> 16
                print('msg{:02x} {} 0x{:012X} {}'.format(
                    flag, count, objid, param))
            else:
                buffer = f.read(count - 5)
Пример #10
0
from deca.file import ArchiveFile
import socket

ip = '10.0.0.16'
port = 9200

s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))

with ArchiveFile(s.makefile(mode='rwb')) as f:
    count = f.read_u32()
    while count is not None:
        if count > 4:
            flag = f.read_u8()
            if flag == 0:
                objid = f.read_u64()
                param = objid & 0xFFFF
                objid = objid >> 16
                cnt = f.read_u32()
                scripts = []
                for i in range(cnt):
                    scnt1 = f.read_u32()
                    id1 = f.read(scnt1)
                    scripts.append(id1)
                loca_size = f.read_u32()
                loca = f.read(loca_size)
                print('msg{:02x}: {} 0x{:012X} {} {} {} {}'.format(
                    flag, count, objid, param, cnt, scripts, loca))
            elif flag == 1:
                objid = f.read_u64()
                param = objid & 0xFFFF
#
# sz = os.stat(fn).st_size
#
# with open(fn, 'rb') as f:
#     buf = f.read(sz)
#     print('{}'.format(buf[0:16]))
#     h = hash_little(buf, 0xdeadbeef)
#     print('{} {:0x}'.format(h, h))
#     h = hash_little(buf, 0x0)
#     print('{} {:0x}'.format(h, h))
#
#
# exit(0)

fn = '/home/krys/prj/work/gzb/mod/global/global.blo'
with ArchiveFile(open(fn, 'r+b')) as f:
    f.seek(0x00021020)
    mat4 = list(f.read_f32(16))
    mat4 = np.transpose(np.array(mat4).reshape((4, 4)))

    f.seek(0x00026a58)
    sz = f.read_u32()
    vals = list(f.read_f32(sz))
    print(vals)

    vals = np.asarray(np.array(vals).reshape(sz // 4, 4))
    for i in range(sz // 4):
        vals[i, 0] += mat4[0, 3]
        vals[i, 1] += mat4[1, 3]
        vals[i, 2] += mat4[2, 3]
Пример #12
0
    if os.path.isdir(fcat):
        fcat = fcat + '/'
        files = os.listdir(fcat)
        for file in files:
            if 'tab' == file[-3:]:
                input_files.append((cat, file[0:-4]))

for ta_file in input_files:
    outpath = prefix_out + ta_file[0] + '/' + ta_file[1] + '/'
    os.makedirs(outpath, exist_ok=True)
    inpath = prefix_in + ta_file[0] + '/' + ta_file[1]

    file_tab = inpath + '.tab'
    file_arc = inpath + '.arc'

    with ArchiveFile(open(file_tab, 'rb'), debug=debug) as f:
        if 3 == ver:
            tab_file = TabFileV3()
        elif 4 == ver:
            tab_file = TabFileV4()

        tab_file.deserialize(f)

    k_buffer_size = 1024 * 1024
    with open(file_arc, 'rb') as fi:
        for i in range(len(tab_file.file_table)):
            frec = tab_file.file_table[i]
            print('Processing {} of {}: {}'.format(i + 1,
                                                   len(tab_file.file_table),
                                                   frec.debug()))
Пример #13
0
    def deserialize(self, fp, map_typedef=None, process_instances=True):
        if map_typedef is None:
            map_typedef = {}

        header = fp.read(0x40)

        fh = ArchiveFile(io.BytesIO(header))

        if len(header) < 0x40:
            raise EDecaErrorParse('File Too Short')

        magic = fh.read_strl(4)

        if magic != b' FDA':
            raise EDecaErrorParse('Magic does not match')

        self.version = fh.read_u32()

        self.instance_count = fh.read_u32()
        self.instance_offset = fh.read_u32()

        self.typedef_count = fh.read_u32()
        self.typedef_offset = fh.read_u32()

        self.stringhash_count = fh.read_u32()
        self.stringhash_offset = fh.read_u32()

        self.nametable_count = fh.read_u32()
        self.nametable_offset = fh.read_u32()

        self.total_size = fh.read_u32()

        self.unknown = fh.read_u32(5)

        self.comment = fp.read_strz()

        # name table
        self.table_name = [[0, b''] for i in range(self.nametable_count)]
        fp.seek(self.nametable_offset)
        for i in range(self.nametable_count):
            self.table_name[i][0] = fp.read_u8()
        for i in range(self.nametable_count):
            self.table_name[i][1] = fp.read(self.table_name[i][0] + 1)[0:-1]

        # string hash
        self.table_stringhash = [
            StringHash() for i in range(self.stringhash_count)
        ]
        self.map_stringhash = {}
        fp.seek(self.stringhash_offset)
        for i in range(self.stringhash_count):
            self.table_stringhash[i].deserialize(fp, self.table_name)
            self.map_stringhash[
                self.table_stringhash[i].value_hash] = self.table_stringhash[i]

        # typedef
        self.table_typedef = [TypeDef() for i in range(self.typedef_count)]

        self.extended_map_typedef = {}
        for k, v in map_typedef.items():
            self.extended_map_typedef[k] = v

        self.map_typedef = {}
        fp.seek(self.typedef_offset)
        for i in range(self.typedef_count):
            self.table_typedef[i].deserialize(fp, self.table_name)
            self.map_typedef[
                self.table_typedef[i].type_hash] = self.table_typedef[i]
            self.extended_map_typedef[
                self.table_typedef[i].type_hash] = self.table_typedef[i]

        # print(typedef_map)

        # instance
        self.table_instance = [
            InstanceEntry() for i in range(self.instance_count)
        ]
        self.map_instance = {}
        fp.seek(self.instance_offset)
        for i in range(self.instance_count):
            self.table_instance[i].deserialize(fp, self.table_name)
            self.map_instance[
                self.table_instance[i].name_hash] = self.table_instance[i]

        self.found_strings = set()
        self.table_instance_values = [None] * len(self.table_instance)
        self.table_instance_full_values = [None] * len(self.table_instance)
        if process_instances:
            for i in range(len(self.table_instance)):
                ins = self.table_instance[i]
                fp.seek(ins.offset)
                buffer = fp.read(ins.size)
                n_buffer = len(buffer)
                buffer_pos = 0
                v, buffer_pos = read_instance(buffer,
                                              n_buffer,
                                              buffer_pos,
                                              ins.type_hash,
                                              self.extended_map_typedef,
                                              self.map_stringhash,
                                              ins.offset,
                                              found_strings=self.found_strings)
                self.table_instance_full_values[i] = v
                self.table_instance_values[i] = adf_value_extract(v)
Пример #14
0
from deca.file import ArchiveFile
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

# fn = './test/gz/__TEST__/locations/world/ostervik_neighborhood_04_housing.obc.B00C511A'
fn = './test/gz/__TEST__/locations/world/archipelago_iboholmen_yttervik_house.obc.16F90A59'

rs = []
with ArchiveFile(open(fn, 'rb'), debug=True) as f:
    ver = f.read_u32()
    count = f.read_u32()
    for i in range(count):
        r = f.read_f32(20)
        rs.append(r)

d = np.array(rs)

np.savetxt('./test/gz/test.tsv', d, delimiter='\t')
# fig = plt.figure()
# ax = Axes3D(fig)
# ax.scatter(d[:,4],d[:,5],d[:,6])
# plt.show()
Пример #15
0
line = '{}\t{}\t{}\t{}\th{}'.format('path', 'ftype', 'file_size',
                                    'file_size_hex', 'magic_hex')
print(line)

while len(paths) > 0:
    path = paths.pop(-1)
    # print(path)
    if os.path.isdir(path):
        path = path + '/'
        files = os.listdir(path)
        for file in files:
            ffn = path + file
            paths.append(ffn)
    else:
        file_size = os.stat(path).st_size
        with ArchiveFile(open(path, 'rb')) as f:
            magic = f.read(32)

        # guess type
        raw_image_size = {
            0x1000000: '',
            0x800000: '',
            0x400000: '',
            0x280000: '',
            0x200000: '',
            0x140000: '',
            0x100000: '',
            0xa0000: '',
            0x80000: '',
            0x40000: '',
            0x20000: '',
Пример #16
0
        0x1000000: [[71, 8192, 4096]],
        0x800000: [[71, 4096, 4096]],
        0x400000: [[71, 4096, 2048]],
        0x280000: [[71, 512 * 4, 640 * 4]],
        0x200000: [[71, 512 * 4, 512 * 4]],
        0x150000: [[83, 512 * 1, 512 * 1+128], [83, 512 * 2, 512 * 2 + 256]],
        0x140000: [[71, 512 * 4, 320 * 4]],
        0x100000: [[71, 512 * 4, 256 * 4]],
        0xa0000: [[71, 256 * 2, 256 * 2], [71, 256 * 4, 256 * 4]],
        0x80000: [[71, 256 * 4, 256 * 4]],
        0x40000: [[71, 256 * 4, 128 * 4]],
        0x20000: [[71, 128 * 4, 128 * 4]],
        # 0x8000: [[83, 128 * 2, 128 * 1]],
    }

    with ArchiveFile(open(in_file, 'rb')) as f0:
        no_header = file_sz in file_data_layout_db
        if not no_header:
            file_sz = file_sz - 128
            header = f0.read(128)
            fh = ArchiveFile(io.BytesIO(header))

            p = 0
            magic = fh.read_u32()
            version = fh.read_u16()
            d = fh.read_u8()
            dim = fh.read_u8()
            pixel_format = fh.read_u32()
            nx0 = fh.read_u16()
            ny0 = fh.read_u16()
            depth = fh.read_u16()
Пример #17
0
def determine_file_type_and_size(f, file_size0):
    file_type = None
    file_sub_type = None
    file_size = file_size0

    start_pos = f.tell()
    magic = f.read(256)
    magic_int = None
    if len(magic) >= 20:
        magic_int = struct.unpack('I', magic[0:4])[0]

        if b' FDA' == magic[0:4]:
            file_type = FTYPE_ADF
        elif b'\x00FDA' == magic[0:4]:
            file_type = FTYPE_ADF0
        elif b'AVTX' == magic[0:4]:
            file_type = FTYPE_AVTX
            header = DdImageHeader()
            header.deserialize_ddsc(magic)
            file_sub_type = header.dds_header_dxt10.dxgiFormat
        elif b'DDS ' == magic[0:4]:
            file_type = FTYPE_DDS
            header = DdImageHeader()
            header.deserialize_dds(magic)
            file_sub_type = header.dds_header_dxt10.dxgiFormat
        elif b'AAF' == magic[0:3].upper():
            file_type = FTYPE_AAF
            f.seek(start_pos)
            aafh = load_aaf_header(f)
            file_size = aafh.size_u
        elif b'RTPC' == magic[0:4]:
            file_type = FTYPE_RTPC
        elif b'CFX' == magic[0:3]:
            file_type = FTYPE_GFX
        elif b'GFX' == magic[0:3]:
            file_type = FTYPE_GFX
        elif b'RIFF' == magic[0:4]:
            file_type = FTYPE_RIFF
        elif b'OggS' == magic[0:4]:
            file_type = FTYPE_OGG
        elif b'BM6' == magic[0:3]:
            file_type = FTYPE_BMP
        elif b'BM8' == magic[0:3]:
            file_type = FTYPE_BMP
        elif b'MDI\x00' == magic[0:4]:
            file_type = FTYPE_MDI
        elif b'PFX\x00' == magic[0:4]:
            file_type = FTYPE_PFX
        elif b'SARC' == magic[4:8]:
            file_type = FTYPE_SARC
        elif b'TAG0' == magic[4:8]:
            file_type = FTYPE_TAG0
        elif b'FSB5' == magic[16:20]:
            file_type = FTYPE_FSB5C
        elif b'\x57\xE0\xE0\x57\x10\xC0\xC0\x10' == magic[0:8]:
            file_type = FTYPE_H2014
        elif b'\x05\x00\x00\x00RBMDL' == magic[0:9]:
            file_type = FTYPE_RBMDL
        elif b'KB2' == magic[0:3]:
            file_type = FTYPE_BINK_KB2
        elif b'BIK' == magic[0:3]:
            file_type = FTYPE_BINK_BIK
        elif b'GT0C' == magic[0:4]:
            file_type = FTYPE_GT0C

    # need to inspect file structure

    fm = ArchiveFile(f)

    if file_type is None:
        # OBC files with (u32)4, (u32)count , 80 * count bytes, something to do with areas on the map? object placement?
        fm.seek(start_pos)
        ver = fm.read_u32()
        cnt = fm.read_u32()
        if ver == 4 and cnt * 80 + 8 == file_size0:
            file_type = FTYPE_OBC

    if file_type is None:  # text file only contains text bytes, json, xml, ...
        fm.seek(start_pos)
        counts = file_stats(fm, file_size0)
        all_sum = np.sum(counts)
        pri_sum = np.sum(counts[[9, 10, 13] + list(range(20, 128))])
        if all_sum == pri_sum:
            file_type = FTYPE_TXT

    return file_type, file_size, magic_int, file_sub_type