Beispiel #1
0
def read_hd_language_file_old(fileobj: GuardedFile,
                              langcode: str,
                              enc: str = 'utf-8') -> dict[str, StringResource]:
    """
    Takes a file object, and the file's language code.
    """
    dbg("parse HD Language file %s", langcode)
    strings = {}

    for line in fileobj.read().decode(enc).split('\n'):
        line = line.strip()

        # skip comments & empty lines
        if not line or line.startswith('//'):
            continue

        string_id, string = line.split(None, 1)

        # strings that were added in the HD edition release have
        # UPPERCASE_STRINGS as names, instead of the numeric ID stuff
        # of AoC.
        strings[string_id] = string

    fileobj.close()

    lang = LANGCODES_HD.get(langcode, langcode)

    return {lang: strings}
Beispiel #2
0
    def __init__(self, fileobj: GuardedFile, custom_mode_count: int = None):
        super().__init__()

        buf = fileobj.read(Blendomatic.blendomatic_header.size)
        self.header = Blendomatic.blendomatic_header.unpack_from(buf)

        blending_mode_count, tile_count = self.header

        dbg("%d blending modes, each %d tiles",
            blending_mode_count, tile_count)

        if custom_mode_count:
            blending_mode_count = custom_mode_count

            dbg("reading only the first %d blending modes",
                custom_mode_count)

        blending_mode = Struct(f"< I {tile_count:d}B")

        self.blending_modes = []

        for i in range(blending_mode_count):
            header_data = fileobj.read(blending_mode.size)
            bmode_header = blending_mode.unpack_from(header_data)

            new_mode = BlendingMode(i, fileobj, tile_count, bmode_header)

            self.blending_modes.append(new_mode)

        fileobj.close()
Beispiel #3
0
def load_gamespec(
    fileobj: GuardedFile,
    game_version: GameVersion,
    cachefile_name: str = None,
    pickle_cache: bool = False,
    dynamic_load = False
) -> ArrayMember:
    """
    Helper method that loads the contents of a 'empires.dat' gzipped wrapper
    file.

    If cachefile_name is given, this file is consulted before performing the
    load.
    """
    # try to use the cached result from a previous run
    if cachefile_name:
        try:
            with open(cachefile_name, "rb") as cachefile:
                # pickle.load() can fail in many ways, we need to catch all.
                # pylint: disable=broad-except
                try:
                    gamespec = pickle.load(cachefile)
                    info("using cached wrapper: %s", cachefile_name)
                    return gamespec
                except Exception:
                    warn("could not use cached wrapper:")
                    import traceback
                    traceback.print_exc()
                    warn("we will just skip the cache, no worries.")

        except FileNotFoundError:
            pass

    # read the file ourselves

    dbg("reading dat file")
    compressed_data = fileobj.read()
    fileobj.close()

    dbg("decompressing dat file")
    # -15: there's no header, window size is 15.
    file_data = decompress(compressed_data, -15)
    del compressed_data

    spam("length of decompressed data: %d", len(file_data))

    wrapper = EmpiresDatWrapper()
    _, gamespec = wrapper.read(file_data, 0, game_version, dynamic_load=dynamic_load)

    # Remove the list sorrounding the converted data
    gamespec = gamespec[0]
    del wrapper

    if cachefile_name and pickle_cache:
        dbg("dumping dat file contents to cache file: %s", cachefile_name)
        with open(cachefile_name, "wb") as cachefile:
            pickle.dump(gamespec, cachefile)

    return gamespec
Beispiel #4
0
 def readall(cls, fileobj: GuardedFile) -> StringLiteral:
     """
     In addition to the static data, reads the string.
     """
     result = cls.read(fileobj)
     result.value = fileobj.read(result.length * 2).decode('utf-16-le')
     return result
Beispiel #5
0
    def __init__(self, fileobj: GuardedFile):
        # read DOS header
        doshdr = PEDOSHeader.read(fileobj)
        if doshdr.signature != b'MZ':
            raise Exception("not a PE file")

        # read COFF header
        fileobj.seek(doshdr.coffheaderpos)
        coffhdr = PECOFFHeader.read(fileobj)

        if coffhdr.signature != b'PE\0\0':
            raise Exception("not a Win32 PE file")

        if coffhdr.opt_header_size != 224:
            raise Exception("unknown optional header size")

        # read optional header
        opthdr = PEOptionalHeader.read(fileobj)

        if opthdr.signature not in {267, 523}:
            raise Exception("Not an x86{_64} file")

        # read data directories
        opthdr.data_directories = []
        for _ in range(opthdr.data_directory_count):
            opthdr.data_directories.append(PEDataDirectory.read(fileobj))

        # read section headers
        sections: dict[str, tuple] = {}

        for _ in range(coffhdr.number_of_sections):
            section = PESection.read(fileobj)

            section.name = section.name.decode('ascii').rstrip('\0')
            if not section.name.startswith('.'):
                raise Exception("Invalid section name: " + section.name)

            sections[section.name] = section

        # store all read header info
        self.fileobj = fileobj

        self.doshdr = doshdr
        self.coffhdr = coffhdr
        self.opthdr = opthdr

        self.sections = sections
Beispiel #6
0
def read_de1_language_file(
    srcdir: Directory,
    language_file: GuardedFile
) -> dict[str, StringResource]:
    """
    Definitve Edition stores language .txt files in the Localization folder.
    Specific language strings are in Data/Localization/$LANG/strings.txt.

    The data is stored in the `stringres` storage.
    """
    # Langcode is folder name
    langcode = language_file.split("/")[2]

    dbg("parse DE1 Language file %s", langcode)
    strings = {}

    fileobj = srcdir[language_file].open('rb')

    for line in fileobj.read().decode('utf-8').split('\n'):
        line = line.strip()

        # skip comments & empty lines
        if not line or line.startswith('//'):
            continue

        # Brilliant idea to split by command AND space!!
        string_id, string = re.split(r",|\s", line, maxsplit=1)

        # strings that were added in the DE2 edition release have
        # UPPERCASE_STRINGS as names, instead of the numeric ID stuff
        # of AoC.
        strings[string_id] = string

    fileobj.close()

    lang = LANGCODES_DE1.get(langcode, langcode)

    return {lang: strings}
Beispiel #7
0
def read_hd_language_file(
    srcdir: Directory,
    language_file: GuardedFile,
    enc: str = 'utf-8'
) -> dict[str, StringResource]:
    """
    HD Edition stores language .txt files in the resources/ folder.
    Specific language strings are in resources/$LANG/strings/key-value/*.txt.

    The data is stored in the `stringres` storage.
    """
    # Langcode is folder name
    langcode = language_file.split("/")[1]

    dbg("parse HD Language file %s", langcode)
    strings = {}

    fileobj = srcdir[language_file].open('rb')

    for line in fileobj.read().decode(enc).split('\n'):
        line = line.strip()

        # skip comments & empty lines
        if not line or line.startswith('//'):
            continue

        string_id, string = line.split(None, 1)

        # strings that were added in the HD edition release have
        # UPPERCASE_STRINGS as names, instead of the numeric ID stuff
        # of AoC.
        strings[string_id] = string

    fileobj.close()

    lang = LANGCODES_HD.get(langcode, langcode)

    return {lang: strings}
Beispiel #8
0
    def __init__(
        self,
        idx: int,
        data_file: GuardedFile,
        tile_count: int,
        header: tuple
    ):
        """
        initialize one blending mode,
        consisting of multiple frames for all blending directions

        the bitmasks were used to decide whether this pixel has
        to be used for calculations.

        the alphamask is used to determine the alpha amount for blending.
        """

        # should be 2353 -> number of pixels (single alpha byte values)
        self.pxcount = header[0]
        # tile_flags = header[1:]  # TODO what do they do?

        dbg("blending mode %d tiles have %d pixels", idx, self.pxcount)

        # as we draw in isometric tile format, this is the row count
        self.row_count = int(sqrt(self.pxcount)) + 1  # should be 49

        # alpha_masks_raw is an array of bytes that will draw 32 images,
        # which are bit masks.
        #
        # one of these masks also has 2353 pixels
        # the storage of the bit masks is 4*tilesize, here's why:
        #
        # 4 * 8bit * 2353 pixels = 75296 bitpixels
        # ==> 75296/(32 images) = 2353 bitpixel/image
        #
        # this means if we interprete the 75296 bitpixels as 32 images,
        # each of these images gets 2353 bit as data.
        # TODO: why 32 images? isn't that depending on tile_count?

        alpha_masks_raw = unpack_from(f"{self.pxcount * 4:d}B",
                                      data_file.read(self.pxcount * 4))

        # list of alpha-mask tiles
        self.alphamasks = []

        # draw mask tiles for this blending mode
        for _ in range(tile_count):
            pixels = unpack_from(f"{self.pxcount:d}B",
                                 data_file.read(self.pxcount))
            self.alphamasks.append(self.get_tile_from_data(pixels))

        bitvalues = []
        for i in alpha_masks_raw:
            for b_id in range(7, -1, -1):
                # bitmask from 0b00000001 to 0b10000000
                bit_mask = 2 ** b_id
                bitvalues.append(i & bit_mask)

        # list of bit-mask tiles
        self.bitmasks = []

        # TODO: is 32 really hardcoded?
        for i in range(32):
            pixels = bitvalues[i * self.pxcount:(i + 1) * self.pxcount]

            self.bitmasks.append(self.get_tile_from_data(pixels))