示例#1
0
def winfnt_read(stream):
    start = stream.tell()
    fields = collections.OrderedDict((name, stream_unpack(fmt, stream)[0])
                                     for name, fmt in WINFNT_HEADER_FMT)
    fields['dfCopyright'] = fields['dfCopyright'].rstrip(b'\0')
    count = fields['dfLastChar'] - fields['dfFirstChar'] + 2
    fields['dfCharTable'] = [stream_unpack('<HH', stream) for _ in range(count)]
    height = fields['dfPixHeight']

    palette = (0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF)
    images = []

    for width, offset in fields['dfCharTable']:
        stream.seek(start + offset)
        lines = [[] for y in range(height)]
        padded_width = (width + 7) & 0xFFF8

        for _ in range(padded_width >> 3):
            for y in range(height):
                mask = stream.read(1)
                lines[y].append(mask)

        bitmap = b''.join(b''.join(line) for line in lines)
        pixels = b''.join(BYTE_MASK_EXPANDED[mask] for mask in bitmap)
        image = make_8bit_image((padded_width, height), pixels, palette)
        image = image.crop((0, 0, width, height))
        images.append(image)

    return fields, images
示例#2
0
文件: game.py 项目: TexZK/pywolf
 def from_stream(cls, stream, planes_count=3):
     planes_count = int(planes_count)
     assert planes_count > 0
     plane_offsets = tuple(stream_unpack_array('<L', stream, planes_count))
     plane_sizes = tuple(stream_unpack_array('<H', stream, planes_count))
     dimensions = stream_unpack('<HH', stream)
     name = stream_unpack('<16s', stream)[0].decode('ascii')
     null_char_index = name.find('\0')
     if null_char_index >= 0:
         name = name[:null_char_index]
     name = name.rstrip(' \t\r\n\v\0')
     return cls(plane_offsets, plane_sizes, dimensions, name)
示例#3
0
 def from_stream(cls, stream, planes_count=3):
     planes_count = int(planes_count)
     assert planes_count > 0
     plane_offsets = tuple(stream_unpack_array('<L', stream, planes_count))
     plane_sizes = tuple(stream_unpack_array('<H', stream, planes_count))
     size = stream_unpack('<HH', stream)
     name = stream_unpack('<16s', stream)[0].decode('ascii')
     null_char_index = name.find('\0')
     if null_char_index >= 0:
         name = name[:null_char_index]
     name = name.rstrip(' \t\r\n\v\0')
     return cls(plane_offsets, plane_sizes, size, name)
示例#4
0
    def _read_sizes(self, index):
        logger = logging.getLogger()
        data_stream = self._data_stream
        partition_map = self._partition_map

        BLOCK_SIZE = 8 * 8 * struct.calcsize('<B')
        MASKBLOCK_SIZE = 8 * 8 * struct.calcsize('<H')
        compressed_size = self.sizeof(index)
        key, *value = self.find_partition(partition_map, index)

        if key == 'tile8':  # tile 8s are all in one chunk!
            expanded_size = BLOCK_SIZE * partition_map[key][1]
        elif key == 'tile8m':
            expanded_size = MASKBLOCK_SIZE * partition_map[key][1]
        elif key == 'tile16':  # all other tiles are one/chunk
            expanded_size = BLOCK_SIZE * 4
        elif key == 'tile16m':
            expanded_size = MASKBLOCK_SIZE * 4
        elif key == 'tile32':
            expanded_size = BLOCK_SIZE * 16
        elif key == 'tile32m':
            expanded_size = MASKBLOCK_SIZE * 16
        else:  # everything else has an explicit size longword
            expanded_size = stream_unpack('<L', data_stream)[0]
            compressed_size -= struct.calcsize('<L')

        logger.debug(('%r._read_sizes(index=%d), partition_map[%r]=%r, compressed_size=0x%X, expanded_size=0x%X'),
                     self, index, key, value, compressed_size, expanded_size)

        return compressed_size, expanded_size
示例#5
0
    def load(self, data_stream, data_base=None, data_size=None, dimensions=(64, 64), alpha_index=0xFF,
             data_size_guard=None):
        super().load(data_stream, data_base, data_size)
        data_stream = self._data_stream
        data_base = self._data_base
        data_size = self._data_size
        assert data_size % struct.calcsize('<LH') == 0
        alpha_index = int(alpha_index)
        assert 0x00 <= alpha_index <= 0xFF

        chunk_count, sprites_start, sounds_start = stream_unpack('<HHH', data_stream)
        chunk_offsets = list(stream_unpack_array('<L', data_stream, chunk_count))
        chunk_offsets.append(data_size)

        pages_offset = chunk_offsets[0]
        pages_size = data_size - pages_offset
        assert data_size_guard is None or data_size < data_size_guard  # 100 MiB
        for i in reversed(range(chunk_count)):
            if not chunk_offsets[i]:
                chunk_offsets[i] = chunk_offsets[i + 1]
        assert all(pages_offset <= chunk_offsets[i] <= data_size for i in range(chunk_count))
        assert all(chunk_offsets[i] <= chunk_offsets[i + 1] for i in range(chunk_count))

        self._chunk_count = chunk_count
        self._chunk_offsets = chunk_offsets
        self._pages_offset = pages_offset
        self._pages_size = pages_size
        self._dimensions = dimensions
        self._alpha_index = alpha_index
        self.sprites_start = sprites_start
        self.sounds_start = sounds_start
        self.sounds_infos = self._read_sounds_infos()
        return self
示例#6
0
    def _read_sizes(self, index):
        data_stream = self._data_stream
        partition_map = self._partition_map

        BLOCK_SIZE = (8 * 8) * 1
        MASKBLOCK_SIZE = (8 * 8) * 2
        compressed_size = self.sizeof(index)
        key = self.find_partition(partition_map, index)[0]

        if key == 'tile8':  # tile 8s are all in one chunk!
            expanded_size = BLOCK_SIZE * partition_map[key][1]
        elif key == 'tile8m':
            expanded_size = MASKBLOCK_SIZE * partition_map[key][1]
        elif key == 'tile16':  # all other tiles are one/chunk
            expanded_size = BLOCK_SIZE * 4
        elif key == 'tile16m':
            expanded_size = MASKBLOCK_SIZE * 4
        elif key == 'tile32':
            expanded_size = BLOCK_SIZE * 16
        elif key == 'tile32m':
            expanded_size = MASKBLOCK_SIZE * 16
        else:  # everything else has an explicit size longword
            expanded_size = stream_unpack('<L', data_stream)[0]
            compressed_size -= 4

        return compressed_size, expanded_size
示例#7
0
 def from_stream(cls, stream):
     length = stream_unpack('<H', stream)[0]
     assert length % 4 == 0
     length //= 4
     events = list(stream_unpack_array('<BBH', stream, length,
                                       scalar=False))
     return cls(events)
示例#8
0
    def load(self,
             data_stream,
             header_stream,
             huffman_stream,
             partition_map,
             pics_size_index=0,
             data_base=None,
             data_size=None,
             header_base=None,
             header_size=None,
             huffman_offset=None,
             huffman_size=None):

        super().load(data_stream, data_base, data_size)
        data_size = self._data_size
        pics_size_index = int(pics_size_index)
        assert pics_size_index >= 0
        header_base, header_size = stream_fit(header_stream, header_base,
                                              header_size)
        huffman_offset, huffman_size = stream_fit(huffman_stream,
                                                  huffman_offset, huffman_size)
        assert header_size % 3 == 0
        assert huffman_size >= 4 * HUFFMAN_NODE_COUNT

        chunk_count = header_size // 3
        chunk_offsets = [None] * chunk_count
        for i in range(chunk_count):
            byte0, byte1, byte2 = stream_unpack('<BBB', header_stream)
            offset = byte0 | (byte1 << 8) | (byte2 << 16)
            if offset < 0xFFFFFF:
                chunk_offsets[i] = offset
        chunk_offsets.append(data_size)
        for i in reversed(range(chunk_count)):
            if chunk_offsets[i] is None:
                chunk_offsets[i] = chunk_offsets[i + 1]
        assert all(0 <= chunk_offsets[i] <= data_size
                   for i in range(chunk_count))
        assert all(chunk_offsets[i] <= chunk_offsets[i + 1]
                   for i in range(chunk_count))

        huffman_nodes = list(
            stream_unpack_array('<HH',
                                huffman_stream,
                                HUFFMAN_NODE_COUNT,
                                scalar=False))
        self._chunk_count = chunk_count
        self._chunk_offsets = chunk_offsets
        self._header_stream = header_stream
        self._header_base = header_base
        self._header_size = header_size
        self._huffman_stream = huffman_stream
        self._huffman_offset = huffman_offset
        self._huffman_size = huffman_size
        self._partition_map = partition_map
        self._pics_size_index = pics_size_index
        self._huffman_nodes = huffman_nodes
        self.pics_size = self._build_pics_size()
        return self
示例#9
0
    def load(self,
             data_stream,
             header_stream,
             data_base=None,
             data_size=None,
             header_base=None,
             header_size=None,
             planes_count=3,
             carmacized=True):

        super().load(data_stream, data_base, data_size)
        data_size = self._data_size
        planes_count = int(planes_count)
        carmacized = bool(carmacized)
        assert planes_count > 0
        header_base, header_size = stream_fit(header_stream, header_base,
                                              header_size)

        rlew_tag = stream_unpack('<H', header_stream)[0]

        assert (header_size - 2) % 4 == 0
        chunk_count = (header_size - 2) // 4
        chunk_offsets = [None] * chunk_count
        for i in range(chunk_count):
            offset = stream_unpack('<L', header_stream)[0]
            if 0 < offset < 0xFFFFFFFF:
                chunk_offsets[i] = offset
        chunk_offsets.append(data_size)
        for i in reversed(range(chunk_count)):
            if chunk_offsets[i] is None:
                chunk_offsets[i] = chunk_offsets[i + 1]
        assert all(0 < chunk_offsets[i] <= data_size
                   for i in range(chunk_count))
        assert all(chunk_offsets[i] <= chunk_offsets[i + 1]
                   for i in range(chunk_count))

        self._chunk_count = chunk_count
        self._chunk_offsets = chunk_offsets
        self._header_stream = header_stream
        self._header_base = header_base
        self._header_size = header_size
        self._carmacized = carmacized
        self._rlew_tag = rlew_tag
        self.planes_count = planes_count
        return self
示例#10
0
    def load(self, data_stream, header_stream,
             data_base=None, data_size=None,
             header_base=None, header_size=None,
             planes_count=3, carmacized=True):

        super().load(data_stream, data_base, data_size)
        data_size = self._data_size
        planes_count = int(planes_count)
        carmacized = bool(carmacized)
        assert planes_count > 0
        header_base, header_size = stream_fit(header_stream, header_base, header_size)

        rlew_tag = stream_unpack('<H', header_stream)[0]

        assert (header_size - struct.calcsize('<H')) % struct.calcsize('<L') == 0
        chunk_count = (header_size - struct.calcsize('<H')) // struct.calcsize('<L')
        chunk_offsets = [None] * chunk_count
        for i in range(chunk_count):
            offset = stream_unpack('<L', header_stream)[0]
            if 0 < offset < 0xFFFFFFFF:
                chunk_offsets[i] = offset
        chunk_offsets.append(data_size)
        for i in reversed(range(chunk_count)):
            if chunk_offsets[i] is None:
                chunk_offsets[i] = chunk_offsets[i + 1]
        assert all(0 < chunk_offsets[i] <= data_size for i in range(chunk_count))
        assert all(chunk_offsets[i] <= chunk_offsets[i + 1] for i in range(chunk_count))

        self._chunk_count = chunk_count
        self._chunk_offsets = chunk_offsets
        self._header_stream = header_stream
        self._header_base = header_base
        self._header_size = header_size
        self._carmacized = carmacized
        self._rlew_tag = rlew_tag
        self.planes_count = planes_count
        return self
示例#11
0
    def load(self, data_stream, header_stream, huffman_stream,
             partition_map, pics_dimensions_index=0,
             data_base=None, data_size=None,
             header_base=None, header_size=None,
             huffman_offset=None, huffman_size=None):

        super().load(data_stream, data_base, data_size)
        data_size = self._data_size
        pics_dimensions_index = int(pics_dimensions_index)
        assert pics_dimensions_index >= 0
        header_base, header_size = stream_fit(header_stream, header_base, header_size)
        huffman_offset, huffman_size = stream_fit(huffman_stream, huffman_offset, huffman_size)
        assert header_size % struct.calcsize('<BBB') == 0
        assert huffman_size >= struct.calcsize('<HH') * HUFFMAN_NODES_COUNT

        chunk_count = header_size // struct.calcsize('<BBB')
        chunk_offsets = [None] * chunk_count
        for i in range(chunk_count):
            byte0, byte1, byte2 = stream_unpack('<BBB', header_stream)
            offset = byte0 | (byte1 << 8) | (byte2 << 16)
            if offset < 0xFFFFFF:
                chunk_offsets[i] = offset
        chunk_offsets.append(data_size)
        for i in reversed(range(chunk_count)):
            if chunk_offsets[i] is None:
                chunk_offsets[i] = chunk_offsets[i + 1]
        assert all(0 <= chunk_offsets[i] <= data_size for i in range(chunk_count))
        assert all(chunk_offsets[i] <= chunk_offsets[i + 1] for i in range(chunk_count))

        huffman_nodes = list(stream_unpack_array('<HH', huffman_stream, HUFFMAN_NODES_COUNT, scalar=False))
        self._chunk_count = chunk_count
        self._chunk_offsets = chunk_offsets
        self._header_stream = header_stream
        self._header_base = header_base
        self._header_size = header_size
        self._huffman_stream = huffman_stream
        self._huffman_offset = huffman_offset
        self._huffman_size = huffman_size
        self._partition_map = partition_map
        self._pics_dimensions_index = pics_dimensions_index
        self._huffman_nodes = huffman_nodes
        self.pics_dimensions = self._build_pics_dimensions()
        return self
示例#12
0
    def load(self,
             data_stream,
             data_base=None,
             data_size=None,
             image_size=(64, 64),
             alpha_index=0xFF,
             data_size_guard=None):
        super().load(data_stream, data_base, data_size)
        data_stream = self._data_stream
        data_base = self._data_base
        data_size = self._data_size
        assert data_size % 6 == 0
        alpha_index = int(alpha_index)
        assert 0x00 <= alpha_index <= 0xFF

        chunk_count, sprites_start, sounds_start = stream_unpack(
            '<HHH', data_stream)
        chunk_offsets = list(
            stream_unpack_array('<L', data_stream, chunk_count))
        chunk_offsets.append(data_size)

        pages_offset = chunk_offsets[0]
        pages_size = data_size - pages_offset
        assert data_size_guard is None or data_size < data_size_guard
        for i in reversed(range(chunk_count)):
            if not chunk_offsets[i]:
                chunk_offsets[i] = chunk_offsets[i + 1]
        assert all(pages_offset <= chunk_offsets[i] <= data_size
                   for i in range(chunk_count))
        assert all(chunk_offsets[i] <= chunk_offsets[i + 1]
                   for i in range(chunk_count))

        self._chunk_count = chunk_count
        self._chunk_offsets = chunk_offsets
        self._pages_offset = pages_offset
        self._pages_size = pages_size
        self._image_size = image_size
        self._alpha_index = alpha_index
        self.sprites_start = sprites_start
        self.sounds_start = sounds_start
        self.sounds_infos = self._read_sounds_infos()
        return self
示例#13
0
    def extract_chunk(self, index):
        data_stream = self._data_stream
        carmacized = self._carmacized
        rlew_tag = self._rlew_tag
        planes_count = self.planes_count

        header = None
        planes = [None] * planes_count
        chunk_size = self.sizeof(index)
        if chunk_size:
            self._seek(index)
            header = TileMapHeader.from_stream(data_stream, planes_count)

            for i in range(planes_count):
                self._seek(i, header.plane_offsets)
                expanded_size = stream_unpack('<H', data_stream)[0]
                compressed_size = header.plane_sizes[i] - 2
                chunk = stream_read(data_stream, compressed_size)
                if carmacized:
                    chunk = carmack_expand(chunk, expanded_size)[2:]
                planes[i] = rlew_expand(chunk, rlew_tag)
        return (header, planes)
示例#14
0
    def extract_chunk(self, index):
        self._log_extract_chunk(index)
        data_stream = self._data_stream
        carmacized = self._carmacized
        rlew_tag = self._rlew_tag
        planes_count = self.planes_count

        header = None
        planes = [None] * planes_count
        chunk_size = self.sizeof(index)
        if chunk_size:
            self._seek(index)
            header = TileMapHeader.from_stream(data_stream, planes_count)

            for i in range(planes_count):
                self._seek(i, header.plane_offsets)
                expanded_size = stream_unpack('<H', data_stream)[0]
                compressed_size = header.plane_sizes[i] - struct.calcsize('<H')
                chunk = stream_read(data_stream, compressed_size)
                if carmacized:
                    chunk = carmack_expand(chunk, expanded_size)[struct.calcsize('<H'):]
                planes[i] = rlew_expand(chunk, rlew_tag)
        return (header, planes)
示例#15
0
文件: graphics.py 项目: TexZK/pywolf
 def from_stream(cls, chunk_stream):
     height = stream_unpack('<H', chunk_stream)[0]
     offsets = list(stream_unpack_array('<H', chunk_stream, cls.CHARACTER_COUNT))
     widths = list(stream_unpack_array('<B', chunk_stream, cls.CHARACTER_COUNT))
     return cls(height, offsets, widths)
示例#16
0
文件: graphics.py 项目: TexZK/pywolf
 def from_stream(cls, chunk_stream):
     left, right = stream_unpack('<HH', chunk_stream)
     width = right - left + 1
     offsets = list(stream_unpack_array('<H', chunk_stream, width))
     return cls(left, right, offsets)
示例#17
0
 def from_bytes(cls, data, offset=0):
     args = struct.unpack_from('<LH13B', data, offset)
     offset += cls.SIZE - 1
     args += stream_unpack('<B', data, offset)
     return cls(*args)
示例#18
0
 def from_stream(cls, stream):
     args = list(stream_unpack('<LH13B', stream))
     stream_unpack('<3B', stream)  # unused
     args += stream_unpack('<B', stream)
     return cls(*args)
示例#19
0
 def from_stream(cls, chunk_stream):
     height = stream_unpack('<H', chunk_stream)[0]
     offsets = list(stream_unpack_array('<H', chunk_stream, cls.CHARACTER_COUNT))
     widths = list(stream_unpack_array('<B', chunk_stream, cls.CHARACTER_COUNT))
     return cls(height, offsets, widths)
示例#20
0
 def from_stream(cls, chunk_stream):
     left, right = stream_unpack('<HH', chunk_stream)
     width = right - left + 1
     offsets = list(stream_unpack_array('<H', chunk_stream, width))
     return cls(left, right, offsets)