Ejemplo n.º 1
0
    def __init__(self, key, io):
        # The Key object this Bif belongs to.
        self.key = key

        # Path to .bif file.
        self.io = io

        # A hash containing the resources contained. Usually not needed,
        # accessed by the encapsulating Key object.
        self.contained = {}

        with open(self.io, 'rb') as f:
            header = f.read(4 + 4 + 3 * 4)
            hs = struct.unpack("<4s 4s L L L", header)

            self.file_type = hs[0]
            self.file_version = hs[1]
            self.var_res_count = hs[2]
            self.fix_res_count = hs[3]
            self.var_table_offset = hs[4]

            f.seek(self.var_table_offset)
            data = f.read(self.var_res_count * 16)

            for c in chunks(data, 16):
                if len(c) != 16: break

                rid, offset, size, restype = struct.unpack("<L L L L", c)
                rid &= 0xfffff
                self.contained[rid] = (offset, size, restype)
Ejemplo n.º 2
0
Archivo: key.py Proyecto: rmilne/pynwn
    def __init__(self, key, io):
        # The Key object this Bif belongs to.
        self.key = key

        # Path to .bif file.
        self.io = io

        # A hash containing the resources contained. Usually not needed,
        # accessed by the encapsulating Key object.
        self.contained = {}

        with open(self.io, 'rb') as f:
            header = f.read(4 + 4 + 3 * 4)
            hs = struct.unpack("<4s 4s L L L", header)

            self.file_type = hs[0]
            self.file_version = hs[1]
            self.var_res_count = hs[2]
            self.fix_res_count = hs[3]
            self.var_table_offset = hs[4]

            f.seek(self.var_table_offset)
            data = f.read(self.var_res_count * 16)

            for c in chunks(data, 16):
                if len(c) != 16: break

                rid, offset, size, restype = struct.unpack("<L L L L", c)
                rid &= 0xfffff
                self.contained[rid] = (offset, size, restype)
Ejemplo n.º 3
0
                tga_fname = tga + '.tga'
                if not tga_fname in tga_dict:
                    tga_dict[tga_fname] = Image.open(mgr[tga_fname].to_io())

                tga = tga_dict[tga_fname]

                # I chose here to scale all the minimap images to the
                # smallest size so if one is 8x8 they will all be scaled
                # to 8x8.
                tga_size = min(tga_size, tga.size[0])
                tile_tgas.append((tga, tile.orientation))

            # Note: The tile list begins in the bottom left corner
            # so I'm going to reverse so that it starts in the top
            # left and draw down rather than up.
            tile_tgas = chunks(tile_tgas, area.width)[::-1]

            # minimum minimap tile size 16x16, just so some of the
            # smaller 8x8s are a little larger.
            tga_size = max(minimum, tga_size * scale)

            new_im = Image.new('RGBA',
                               (area.width * tga_size, area.height * tga_size))

            for h in xrange(area.height):
                for w in xrange(area.width):

                    im, rot = tile_tgas[h][w]
                    new_loc = (w * tga_size, h * tga_size)

                    if im.size[0] != tga_size:
Ejemplo n.º 4
0
                tga_fname = tga+'.tga'
                if not tga_fname in tga_dict:
                    tga_dict[tga_fname] = Image.open(mgr[tga_fname].to_io())

                tga = tga_dict[tga_fname]

                # I chose here to scale all the minimap images to the
                # smallest size so if one is 8x8 they will all be scaled
                # to 8x8.
                tga_size = min(tga_size, tga.size[0])
                tile_tgas.append((tga, tile.orientation))

            # Note: The tile list begins in the bottom left corner
            # so I'm going to reverse so that it starts in the top
            # left and draw down rather than up.
            tile_tgas = chunks(tile_tgas, area.width)[::-1]

            # minimum minimap tile size 16x16, just so some of the
            # smaller 8x8s are a little larger.
            tga_size = max(minimum, tga_size * scale)

            new_im = Image.new('RGBA', (area.width * tga_size,
                                        area.height * tga_size))

            for h in xrange(area.height):
                for w in xrange(area.width):

                    im, rot = tile_tgas[h][w]
                    new_loc = (w * tga_size, h * tga_size)

                    if im.size[0] != tga_size:
Ejemplo n.º 5
0
Archivo: key.py Proyecto: rmilne/pynwn
    def __init__(self, fname, data_path):
        super(Key, self).__init__()

        self.root = data_path
        self.bif = []

        with open(fname, 'rb') as io:
            header = io.read(8 + (4 * 6) + 32)
            hs = struct.unpack("<4s 4s LLLLLL 32s", header)

            self.ftype = hs[0]
            self.fvers = hs[1]

            bif_count = hs[2]
            key_count = hs[3]
            offset_to_file_table = hs[4]
            offset_to_key_table = hs[5]

            self.year = hs[6]
            self.day_of_year = hs[7]
            reserved = hs[8]

            io.seek(offset_to_file_table)
            data = io.read(12 * bif_count)

            self.file_table = []
            for c in chunks(data, 12):
                if len(c) != 12: break

                size, name_offset, name_size, drives = struct.unpack("LLhh", c)
                io.seek(name_offset)
                name = io.read(name_size)
                name = struct.unpack("%ds" % name_size, name)[0]
                name = name.decode(get_encoding())
                name = name.rstrip(' \t\r\n\0')
                name = os.path.join(self.root, name.replace('\\', os.sep))
                name = os.path.abspath(name)
                self.bif.append( Bif(self, name) )
                self.file_table.append((size, name, drives))

            self.key_table = {}
            io.seek(offset_to_key_table)
            data = io.read(22 * key_count)

            for c in chunks(data, 22):
                if len(c) != 22: break
                resref, res_type, res_id = struct.unpack("<16s hL", c)
                resref = resref.decode(get_encoding())
                self.key_table[res_id] = (resref.rstrip(' \t\r\n\0'), res_type)

            self.fn_to_co = {}
            for res_id, (resref, res_type) in self.key_table.items():
                bif_idx = res_id >> 20
                bif = self.bif[bif_idx]
                res_id = res_id & 0xfffff

                #print res_id, resref, res_type, bif_idx
                if not res_id in bif.contained:
                    msg = "%s does not have %d" % (bif.io.name, res_id)
                    raise ValueError(msg)

                ofs, sz, _rt = bif.contained[res_id]
                o = res.ContentObject(resref, res_type, bif.io, ofs, sz)

                fn = o.get_filename()
                if fn in self.fn_to_co and self.fn_to_co[fn][2] < bif_idx:
                    oo, biff, unused = self.fn_to_co[fn]
                    print("%s, in %s shadowed by file in %s" % (fn, biff.io, biff.io))
                    self.content.remove(oo)

                self.fn_to_co[fn] = (o, bif, bif_idx)
                self.add(o)
Ejemplo n.º 6
0
Archivo: erf.py Proyecto: rmilne/pynwn
    def from_file(fname):
        """Create an Erf from a file handle.

        :param io: A file handle.

        """
        with open(fname, 'rb') as io:
            header = io.read(160)
            hs = struct.unpack("< 4s 4s LL LL LL LL L 116s", header)

            ftype = hs[0].decode(get_encoding()).strip()
            if not ftype in Erf.TYPES: raise ValueError("Invalid file type!")

            fvers = hs[1].decode(get_encoding())
            fname_len = Erf.filename_length(fvers)

            new_erf = Erf(ftype, fvers)
            new_erf.io = fname

            lstr_count = hs[2]
            lstr_size = hs[3]
            entry_count = hs[4]
            offset_to_lstr = hs[5]
            offset_to_keys = hs[6]
            offset_to_res = hs[7]
            new_erf.year = hs[8]
            new_erf.day_of_year = hs[9]
            new_erf.desc_strref = hs[10]

            io.seek(offset_to_lstr)
            lstr = io.read(lstr_size)

            for ls in range(lstr_count):
                if len(lstr) == 0:
                    print(
                        "locstr table: not enough entries (expected: %d, got: %d)"
                        % (lstr_count, ls))
                    break

                if len(lstr) < 8:
                    print(
                        "locstr table: not enough entries (expected: %d, got: %d)"
                        % (lstr_count, ls) + " partial data: " + lstr)
                    break

                lid, strsz = struct.unpack("<L L", lstr[:8])
                if strsz > len(lstr) - 8:
                    strsz = len(lstr) - 8

                # Necessary for hacking around the fact that erf.exe adds an extra null
                # to the end of the description string.
                try:
                    str = struct.unpack("8x %ds" % strsz,
                                        lstr)[0].decode(get_encoding())  #
                except struct.error as e:
                    str = struct.unpack("8x %ds" % (strsz + 1, ),
                                        lstr)[0].decode(get_encoding())  #

                new_erf.localized_strings[lid] = str.rstrip(' \t\r\n\0')
                lstr = lstr[8 + len(str):]

            keylist_entry_size = fname_len + 4 + 2 + 2
            io.seek(offset_to_keys)
            keylist = io.read(keylist_entry_size * entry_count)

            fmt = "%ds I h h" % fname_len
            fmt = fmt * entry_count
            fmt = '<' + fmt

            keylist = struct.unpack(fmt, keylist)

            for resref, res_id, res_type, unused in chunks(keylist, 4):
                co = res.ContentObject(
                    resref.decode(get_encoding()).rstrip(' \t\r\n\0'),
                    res_type, fname)
                new_erf.add(co)

            resourcelist_entry_size = 4 + 4
            io.seek(offset_to_res)
            resourcelist = io.read(resourcelist_entry_size * entry_count)
            resourcelist = struct.unpack("I I" * entry_count, resourcelist)
            _index = -1
            for offset, size in chunks(resourcelist, 2):
                _index += 1
                try:
                    co = new_erf.content[_index]
                    co.offset = offset
                    co.size = size
                except IndexError as e:
                    print(
                        "WARNING: Attempt to index invalid content object in '%s' at offset %X"
                        % (fname, offset))

        return new_erf
Ejemplo n.º 7
0
Archivo: gff.py Proyecto: rmilne/pynwn
    def load(self):
        """Loads the source of the associated gff file."""

        # attempt to open the gff file and load its header
        self.source = io.BytesIO(self.co.get())

        header = struct.unpack(
            self.HeaderPattern,
            self.source.read(struct.calcsize(self.HeaderPattern)))

        if (header[0].decode(get_encoding()).rstrip() == self.filetype
                and header[1].decode(get_encoding()) == self.Version):
            self.structoffset, self.structcount = header[2:4]
            self.fieldoffset, self.fieldcount = header[4:6]
            self.labeloffset, self.labelcount = header[6:8]
            self.dataoffset, self.datasize = header[8:10]
            self.indiceoffset, self.indicesize = header[10:12]
            self.listoffset, self.listsize = header[12:14]
        else:
            if header[1].decode(get_encoding()) != self.Version:
                raise ValueError(
                    "File: %s: gff file version '%s' does not match current valid version '%s'"
                    % (self.co.get_filename(), header[1], self.Version))
            else:
                raise ValueError(
                    "File: %s: gff file type '%s' does not match specified file type '%s'"
                    % (self.co.get_filename(), header[0].rstrip(),
                       self.filetype))

        # position the source file at the struct array and prepare structs list
        self.source.seek(self.structoffset)
        self.structs = []

        # parse the gff struct array
        size = struct.calcsize(self.StructPattern)
        rd = self.source.read(self.structcount * size)

        for chunk in chunks(rd, size):
            type, offset, count = struct.unpack(self.StructPattern, chunk)
            if offset == 0xffffffff:
                self.structs.append([type, -1])
            elif count == 1:
                self.structs.append([type, offset])
            else:
                pattern = "%dI" % count
                position = self.source.tell()
                self.source.seek(self.indiceoffset + offset)
                data = self.source.read(struct.calcsize(pattern))
                self.source.seek(position)

                indexes = struct.unpack(pattern, data)
                self.structs.append([type, list(indexes)])

        # position the source file at the label array and prepare labels list
        self.source.seek(self.labeloffset)
        self.labels = []

        # parse the gff label array
        size = struct.calcsize(self.LabelPattern)
        rd = self.source.read(size * self.labelcount)
        for chunk in chunks(rd, size):
            label = struct.unpack(self.LabelPattern,
                                  chunk)[0].decode(get_encoding())
            self.labels.append(label.rstrip('\x00'))

        # position the source file at the field array and prepare fields list
        self.source.seek(self.fieldoffset)
        self.fields = []

        # parse the gff field array
        size = struct.calcsize(self.FieldPattern)
        dwordsize = struct.calcsize(self.DwordPattern)
        for index in range(0, self.fieldcount):
            type, label = struct.unpack(self.FieldPattern,
                                        self.source.read(size))
            Type = self.Classes[type]

            position = None
            # False indicates there is no offset
            if not Type.at_offset is False:
                offset = struct.unpack('I', self.source.read(4))[0]
                position = self.source.tell()

            if Type.at_offset == 'data':
                offset += self.dataoffset
            elif Type.at_offset == 'list':
                offset += self.listoffset

            if position:
                data = Type.unpack(self.source, offset)
                self.source.seek(position)
            else:
                data = Type.unpack(self.source)

            type_name = Type.type
            label_name = self.labels[label]
            self.fields.append([type_name, label_name, data])

        # close the source file and build the gff structure, then indicate
        # status
        self.source.close()
        self._structure = self.build_struct(0)
        return True
Ejemplo n.º 8
0
    def __init__(self, fname, data_path):
        super(Key, self).__init__()

        self.root = data_path
        self.bif = []

        with open(fname, 'rb') as io:
            header = io.read(8 + (4 * 6) + 32)
            hs = struct.unpack("<4s 4s LLLLLL 32s", header)

            self.ftype = hs[0]
            self.fvers = hs[1]

            bif_count = hs[2]
            key_count = hs[3]
            offset_to_file_table = hs[4]
            offset_to_key_table = hs[5]

            self.year = hs[6]
            self.day_of_year = hs[7]
            reserved = hs[8]

            io.seek(offset_to_file_table)
            data = io.read(12 * bif_count)

            self.file_table = []
            for c in chunks(data, 12):
                if len(c) != 12: break

                size, name_offset, name_size, drives = struct.unpack("LLhh", c)
                io.seek(name_offset)
                name = io.read(name_size)
                name = struct.unpack("%ds" % name_size, name)[0]
                name = name.decode(get_encoding())
                name = name.rstrip(' \t\r\n\0')
                name = os.path.join(self.root, name.replace('\\', os.sep))
                name = os.path.abspath(name)
                self.bif.append(Bif(self, name))
                self.file_table.append((size, name, drives))

            self.key_table = {}
            io.seek(offset_to_key_table)
            data = io.read(22 * key_count)

            for c in chunks(data, 22):
                if len(c) != 22: break
                resref, res_type, res_id = struct.unpack("<16s hL", c)
                resref = resref.decode(get_encoding())
                self.key_table[res_id] = (resref.rstrip(' \t\r\n\0'), res_type)

            self.fn_to_co = {}
            for res_id, (resref, res_type) in self.key_table.items():
                bif_idx = res_id >> 20
                bif = self.bif[bif_idx]
                res_id = res_id & 0xfffff

                #print res_id, resref, res_type, bif_idx
                if not res_id in bif.contained:
                    msg = "%s does not have %d" % (bif.io.name, res_id)
                    raise ValueError(msg)

                ofs, sz, _rt = bif.contained[res_id]
                o = res.ContentObject(resref, res_type, bif.io, ofs, sz)

                fn = o.get_filename()
                if fn in self.fn_to_co and self.fn_to_co[fn][2] < bif_idx:
                    oo, biff, unused = self.fn_to_co[fn]
                    print("%s, in %s shadowed by file in %s" %
                          (fn, biff.io, biff.io))
                    self.content.remove(oo)

                self.fn_to_co[fn] = (o, bif, bif_idx)
                self.add(o)
Ejemplo n.º 9
0
Archivo: gff.py Proyecto: rmilne/pynwn
    def load(self):
        """Loads the source of the associated gff file."""

        # attempt to open the gff file and load its header
        self.source = io.BytesIO(self.co.get())

        header = struct.unpack(self.HeaderPattern,
                               self.source.read(struct.calcsize(self.HeaderPattern)))

        if (header[0].decode(get_encoding()).rstrip() == self.filetype
            and header[1].decode(get_encoding()) == self.Version):
            self.structoffset, self.structcount = header[2:4]
            self.fieldoffset, self.fieldcount = header[4:6]
            self.labeloffset, self.labelcount = header[6:8]
            self.dataoffset, self.datasize = header[8:10]
            self.indiceoffset, self.indicesize = header[10:12]
            self.listoffset, self.listsize = header[12:14]
        else:
            if header[1].decode(get_encoding()) != self.Version:
                raise ValueError("File: %s: gff file version '%s' does not match current valid version '%s'" % (self.co.get_filename(), header[1], self.Version))
            else:
                raise ValueError("File: %s: gff file type '%s' does not match specified file type '%s'" % (self.co.get_filename(), header[0].rstrip(), self.filetype))

        # position the source file at the struct array and prepare structs list
        self.source.seek(self.structoffset)
        self.structs = []

        # parse the gff struct array
        size = struct.calcsize(self.StructPattern)
        rd = self.source.read(self.structcount * size)

        for chunk in chunks(rd, size):
            type, offset, count = struct.unpack(self.StructPattern, chunk)
            if offset == 0xffffffff:
                self.structs.append([type, -1])
            elif count == 1:
                self.structs.append([type, offset])
            else:
                pattern = "%dI" % count
                position = self.source.tell()
                self.source.seek(self.indiceoffset + offset)
                data = self.source.read(struct.calcsize(pattern))
                self.source.seek(position)

                indexes = struct.unpack(pattern, data)
                self.structs.append([type, list(indexes)])


        # position the source file at the label array and prepare labels list
        self.source.seek(self.labeloffset)
        self.labels = []

        # parse the gff label array
        size = struct.calcsize(self.LabelPattern)
        rd = self.source.read(size * self.labelcount)
        for chunk in chunks(rd, size):
            label = struct.unpack(self.LabelPattern, chunk)[0].decode(get_encoding())
            self.labels.append(label.rstrip('\x00'))

        # position the source file at the field array and prepare fields list
        self.source.seek(self.fieldoffset)
        self.fields = []

        # parse the gff field array
        size = struct.calcsize(self.FieldPattern)
        dwordsize = struct.calcsize(self.DwordPattern)
        for index in range(0, self.fieldcount):
            type, label = struct.unpack(self.FieldPattern,
                                        self.source.read(size))
            Type = self.Classes[type]

            position = None
            # False indicates there is no offset
            if not Type.at_offset is False:
                offset = struct.unpack('I', self.source.read(4))[0]
                position = self.source.tell()

            if Type.at_offset == 'data':
                offset += self.dataoffset
            elif Type.at_offset == 'list':
                offset += self.listoffset

            if position:
                data = Type.unpack(self.source, offset)
                self.source.seek(position)
            else:
                data = Type.unpack(self.source)

            type_name  = Type.type
            label_name = self.labels[label]
            self.fields.append([type_name, label_name, data])

        # close the source file and build the gff structure, then indicate
        # status
        self.source.close()
        self._structure = self.build_struct(0)
        return True
Ejemplo n.º 10
0
Archivo: erf.py Proyecto: rmilne/pynwn
    def from_file(fname):
        """Create an Erf from a file handle.

        :param io: A file handle.

        """
        with open(fname, 'rb') as io:
            header = io.read(160)
            hs = struct.unpack("< 4s 4s LL LL LL LL L 116s", header)

            ftype = hs[0].decode(get_encoding()).strip()
            if not ftype in Erf.TYPES: raise ValueError("Invalid file type!")

            fvers = hs[1].decode(get_encoding())
            fname_len = Erf.filename_length(fvers)

            new_erf = Erf(ftype, fvers)
            new_erf.io = fname

            lstr_count = hs[2]
            lstr_size = hs[3]
            entry_count = hs[4]
            offset_to_lstr = hs[5]
            offset_to_keys = hs[6]
            offset_to_res = hs[7]
            new_erf.year = hs[8]
            new_erf.day_of_year = hs[9]
            new_erf.desc_strref = hs[10]

            io.seek(offset_to_lstr)
            lstr = io.read(lstr_size)

            for ls in range(lstr_count):
                if len(lstr) == 0:
                    print("locstr table: not enough entries (expected: %d, got: %d)" % (lstr_count, ls))
                    break

                if len(lstr) < 8:
                    print("locstr table: not enough entries (expected: %d, got: %d)" % (lstr_count, ls) + " partial data: " + lstr)
                    break

                lid, strsz = struct.unpack("<L L", lstr[:8])
                if strsz > len(lstr) - 8:
                    strsz = len(lstr) - 8

                # Necessary for hacking around the fact that erf.exe adds an extra null
                # to the end of the description string.
                try:
                    str = struct.unpack("8x %ds" % strsz, lstr)[0].decode(get_encoding()) #
                except struct.error as e:
                    str = struct.unpack("8x %ds" % (strsz + 1,), lstr)[0].decode(get_encoding()) #

                new_erf.localized_strings[lid] = str.rstrip(' \t\r\n\0')
                lstr = lstr[8 + len(str):]

            keylist_entry_size = fname_len + 4 + 2 + 2
            io.seek(offset_to_keys)
            keylist = io.read(keylist_entry_size * entry_count)

            fmt = "%ds I h h" % fname_len
            fmt = fmt * entry_count
            fmt = '<' + fmt

            keylist = struct.unpack(fmt, keylist)

            for resref, res_id, res_type, unused in chunks(keylist, 4):
                co = res.ContentObject(resref.decode(get_encoding()).rstrip(' \t\r\n\0'),
                                       res_type, fname)
                new_erf.add(co)

            resourcelist_entry_size = 4 + 4
            io.seek(offset_to_res)
            resourcelist = io.read(resourcelist_entry_size * entry_count)
            resourcelist = struct.unpack("I I" * entry_count, resourcelist)
            _index = -1
            for offset, size in chunks(resourcelist, 2):
                _index += 1
                try:
                    co = new_erf.content[_index]
                    co.offset = offset
                    co.size = size
                except IndexError as e:
                    print("WARNING: Attempt to index invalid content object in '%s' at offset %X" % (fname, offset))

        return new_erf