コード例 #1
0
def export_texture_wrapped(file_data, drsname, file_id, file_extension, fname, palette, output_formats):
    try:
        export_texture(file_data, drsname, file_id, file_extension, fname, palette, output_formats)
    except:
        val = "%s, %s" % (fname, file_id)
        print("exception: " + val)
        dbg("exception: " + val, 1)
コード例 #2
0
ファイル: util.py プロジェクト: zhangf911/openage
def file_write_multi(file_dict, output_folder=False):
    """
    save the given file dictionary to files

    key: filename
    value: file content
    """

    for file_name, file_data in file_dict.items():

        if output_folder:
            file_path = os.path.join(output_folder, file_name)
        else:
            file_path = file_name

        file_folder, _ = os.path.split(file_path)
        mkdirs(file_folder)

        if type(file_data) == bytes:
            mode = "wb"
        else:
            mode = "w"

        dbg("saving %s.." % file_path, 1)
        with open(file_path, mode) as f:
            f.write(file_data)
コード例 #3
0
    def save(self, output_folder, save_format):
        for idx, texture in enumerate(self.get_textures()):
            fname = os.path.join(output_folder, "mode%02d" % idx)
            dbg("saving blending mode%02d texture -> %s.png" % (idx, fname), 1)
            texture.save(fname, save_format)

        dbg("blending masks exported successfully!", 1)
コード例 #4
0
    def __init__(self, input_data, palette=None):

        dbg("creating Texture from %s" % (repr(input_data)), 3)

        if isinstance(input_data, SLP):

            if palette is None:
                raise Exception("creating a texture from a SLP requires a palette")
            frames = [
                TextureImage(
                    frame.get_picture_data(palette, self.player_id),
                    hotspot=frame.info.hotspot,
                )
                for frame in input_data.frames
            ]
        elif isinstance(input_data, BlendingMode):
            frames = [
                TextureImage(
                    tile.get_picture_data(),
                    hotspot=tuple(terrain_tile_size.tile_halfsize.values())
                )
                for tile in input_data.alphamasks
            ]
        else:
            raise Exception("cannot create Texture from unknown source type")

        self.image_data, (self.width, self.height), self.image_metadata = merge_frames(frames)
コード例 #5
0
ファイル: media_exporter.py プロジェクト: duanqn/openage
    def log_fileinfo(source_file: Path, target_file: Path) -> None:
        """
        Log source and target file information to the shell.
        """
        source_format = source_file.suffix[1:].upper()
        target_format = target_file.suffix[1:].upper()

        source_path = source_file.resolve_native_path()
        if source_path:
            source_size = os.path.getsize(source_path)

        else:
            with source_file.open('r') as src:
                src.seek(0, os.SEEK_END)
                source_size = src.tell()

        target_path = target_file.resolve_native_path()
        target_size = os.path.getsize(target_path)

        log = ("Converted: "
               f"{source_file.name} "
               f"({source_format}, {source_size}B) "
               f"-> {target_file.name} "
               f"({target_format}, {target_size}B | "
               f"{(target_size / source_size * 100) - 100:+.1f}%)")

        dbg(log)
コード例 #6
0
ファイル: blendomatic.py プロジェクト: CodyKelly/openage
    def save(self, output_folder, save_format):
        for idx, texture in enumerate(self.get_textures()):
            fname = os.path.join(output_folder, "mode%02d" % idx)
            dbg("saving blending mode%02d texture -> %s.png" % (idx, fname), 1)
            texture.save(fname, save_format)

        dbg("blending masks exported successfully!", 1)
コード例 #7
0
ファイル: empiresdat.py プロジェクト: Dotile/openage
    def raw_dump(self, fname):
        """
        save the dat file in uncompressed format.
        """

        dbg("saving uncompressed %s file to %s" % (self.fname, fname), 1)
        with open(fname, "rb") as f:
            f.write(self.content)
コード例 #8
0
ファイル: empiresdat.py プロジェクト: abhipranay/openage
    def raw_dump(self, filename):
        """
        save the dat file in uncompressed format.
        """

        rawfile_writepath = file_get_path(filename, write=True)
        dbg("saving uncompressed %s file to %s" % (self.fname, rawfile_writepath), 1)
        file_write(rawfile_writepath, self.content)
コード例 #9
0
ファイル: empiresdat.py プロジェクト: zhangf911/openage
    def raw_dump(self, fname):
        """
        save the dat file in uncompressed format.
        """

        dbg("saving uncompressed %s file to %s" % (self.fname, fname), 1)
        with open(fname, "rb") as f:
            f.write(self.content)
コード例 #10
0
ファイル: empiresdat.py プロジェクト: CodyKelly/openage
    def raw_dump(self, filename):
        """
        save the dat file in uncompressed format.
        """

        rawfile_writepath = file_get_path(filename, write=True)
        dbg(
            "saving uncompressed %s file to %s" %
            (self.fname, rawfile_writepath), 1)
        file_write(rawfile_writepath, self.content)
コード例 #11
0
    def gen_image(self, draw_text = True, squaresize = 100):
        #writes this color table (palette) to a png image.

        from PIL import Image, ImageDraw

        imgside_length = math.ceil(math.sqrt(len(self.palette)))
        imgsize = imgside_length * squaresize

        dbg("generating palette image with size %dx%d" % (imgsize, imgsize))

        palette_image = Image.new('RGBA', (imgsize, imgsize), (255, 255, 255, 0))
        draw = ImageDraw.ImageDraw(palette_image)

        text_padlength = len(str(len(self.palette))) #dirty, i know.
        text_format = "%0" + str(text_padlength) + "d"

        drawn = 0

        #squaresize 1 means draw single pixels
        if squaresize == 1:
            for y in range(imgside_length):
                    for x in range(imgside_length):
                        if drawn < len(self.palette):
                            r,g,b = self.palette[drawn]
                            draw.point((x, y), fill=(r, g, b, 255))
                            drawn = drawn + 1

        #draw nice squares with given side length
        elif squaresize > 1:
            for y in range(imgside_length):
                    for x in range(imgside_length):
                        if drawn < len(self.palette):
                            sx = x * squaresize - 1
                            sy = y * squaresize - 1
                            ex = sx + squaresize - 1
                            ey = sy + squaresize
                            r,g,b = self.palette[drawn]
                            vertices = [(sx, sy), (ex, sy), (ex, ey), (sx, ey)] #(begin top-left, go clockwise)
                            draw.polygon(vertices, fill=(r, g, b, 255))

                            if draw_text and squaresize > 40:
                                #draw the color id

                                ctext = text_format % drawn #insert current color id into string
                                tcolor = (255-r, 255-b, 255-g, 255)

                                #draw the text  #TODO: use customsized font
                                draw.text((sx+3, sy+1),ctext,fill=tcolor,font=None)

                            drawn = drawn + 1

        else:
            raise Exception("fak u, no negative values for the squaresize pls.")

        return palette_image
コード例 #12
0
def generate_all(cpp_src_dir):
    for filename, content in generate_all_raw(cpp_src_dir):
        # test whether filename matches the pattern *.gen.*
        basename = os.path.basename(filename)
        try:
            # this will raise a ValueError if basename doesn't have exactly
            # three dot-separated components
            basename, marker, suffix = basename.split('.')

            # if the middle component isn't "gen", raise a ValueError
            # manually
            if not (basename and marker and suffix) or marker != 'gen':
                raise ValueError()
        except ValueError:
            dbg(
                "error in codegen: required filename format is " +
                "[base].gen.[suffix], but filename is %s" % filename, 0)
            exit(1)

        # get absolute filename
        absfilename = "".join((cpp_src_dir, "/", filename))

        # post-process content
        extension = os.path.splitext(filename)[1].lower().strip()
        if extension in {'.h', '.hpp', '.c', '.cpp'}:
            comment_prefix = '//'
        else:
            comment_prefix = '#'

        splitcontent = content.split('\n')
        if "copyright" in splitcontent[0].lower():
            # the content already contains a copyright line
            if splitcontent[1].strip() in {'', comment_prefix}:
                content = '\n'.join(splitcontent[2:])
            else:
                content = '\n'.join(splitcontent[1:])

            copyright = splitcontent[0]
            if copyright.startswith(comment_prefix):
                copyright = copyright[len(comment_prefix):]
            copyright = copyright.strip()

        else:
            year = datetime.datetime.now().year
            copyright = ("Copyright 2013-{} the openage authors. "
                         "See copying.md for legal info.").format(year)

        content = contenttemplate.format(copyrightline=copyright,
                                         prefix=comment_prefix,
                                         code=content)

        yield absfilename, filename, content
コード例 #13
0
    def __init__(self, picture_data, hotspot=None):

        self.width  = picture_data.shape[1]
        self.height = picture_data.shape[0]

        dbg("creating TextureImage with size %d x %d" % (self.width, self.height), 3)

        if hotspot is None:
            self.hotspot = (0, 0)
        else:
            self.hotspot = hotspot

        self.data = picture_data
コード例 #14
0
ファイル: texture.py プロジェクト: MrBeardy/openage
    def __init__(self, picture_data, hotspot=None):

        self.width = picture_data.shape[1]
        self.height = picture_data.shape[0]

        dbg("creating TextureImage with size %d x %d" % (self.width, self.height), 3)

        if hotspot is None:
            self.hotspot = (0, 0)
        else:
            self.hotspot = hotspot

        self.data = picture_data
コード例 #15
0
ファイル: codegen.py プロジェクト: ArseniyShestakov/openage
def generate_all(cpp_src_dir):
    for filename, content in generate_all_raw(cpp_src_dir):
        # test whether filename matches the pattern *.gen.*
        basename = os.path.basename(filename)
        try:
            # this will raise a ValueError if basename doesn't have exactly
            # three dot-separated components
            basename, marker, suffix = basename.split('.')

            # if the middle component isn't "gen", raise a ValueError
            # manually
            if not (basename and marker and suffix) or marker != 'gen':
                raise ValueError()
        except ValueError:
            dbg("error in codegen: required filename format is " +
                "[base].gen.[suffix], but filename is %s" % filename, 0)
            exit(1)

        # get absolute filename
        absfilename = "".join((cpp_src_dir, "/", filename))

        # post-process content
        extension = os.path.splitext(filename)[1].lower().strip()
        if extension in {'.h', '.hpp', '.c', '.cpp'}:
            comment_prefix = '//'
        else:
            comment_prefix = '#'

        splitcontent = content.split('\n')
        if "copyright" in splitcontent[0].lower():
            # the content already contains a copyright line
            if splitcontent[1].strip() in {'', comment_prefix}:
                content = '\n'.join(splitcontent[2:])
            else:
                content = '\n'.join(splitcontent[1:])

            copyright = splitcontent[0]
            if copyright.startswith(comment_prefix):
                copyright = copyright[len(comment_prefix):]
            copyright = copyright.strip()

        else:
            year = datetime.datetime.now().year
            copyright = ("Copyright 2013-{} the openage authors. "
                         "See copying.md for legal info.").format(year)

        content = contenttemplate.format(copyrightline=copyright,
                                         prefix=comment_prefix,
                                         code=content)

        yield absfilename, filename, content
コード例 #16
0
ファイル: empiresdat.py プロジェクト: zhangf911/openage
    def __init__(self, fname):
        self.fname = fname
        dbg("reading empires2*.dat from %s..." % fname, lvl=1)

        f = open(fname, "rb")

        dbg("decompressing data from %s" % fname, lvl=2)

        compressed_data = f.read()
        # decompress content with zlib (note the magic -15)
        # -15: - -> there is no header, 15 is the max windowsize
        self.content = zlib.decompress(compressed_data, -15)
        f.close()

        self.compressed_size = len(compressed_data)
        self.decompressed_size = len(self.content)

        # compressed data no longer needed
        del compressed_data

        dbg("length of compressed data: %d = %d kB" %
            (self.compressed_size, self.compressed_size / 1024),
            lvl=2)
        dbg("length of decompressed data: %d = %d kB" %
            (self.decompressed_size, self.decompressed_size / 1024),
            lvl=2)
コード例 #17
0
ファイル: generated_file.py プロジェクト: CodyKelly/openage
    def create_xref_headers(self, file_pool):
        """
        discover and add needed header snippets for type references accross files.
        """

        dbg("%s typerefs %s" % (repr(self), repr(self.typerefs)), lvl=3)
        dbg("%s typedefs %s" % (repr(self), repr(self.typedefs)), lvl=3)

        new_resolves = set()
        for include_candidate in file_pool:
            candidate_resolves = include_candidate.typedefs & (self.typerefs -
                                                               self.typedefs)

            if len(candidate_resolves) > 0:
                new_header = include_candidate.get_include_snippet()

                dbg(lazymsg=lambda: "%s: to resolve %s" %
                    (repr(self), candidate_resolves),
                    push="add_header",
                    lvl=3)
                self.add_snippet(new_header, inherit_typedefs=False)
                dbg(pop="add_header")

                new_resolves |= candidate_resolves

        still_missing = ((self.typerefs - self.typedefs) -
                         self.included_typedefs) - new_resolves
        if len(still_missing) > 0:
            raise Exception("still missing types for %s:\n%s" %
                            (self, still_missing))
コード例 #18
0
ファイル: slp.py プロジェクト: shuozhifenxi/openage
    def __init__(self, frame_info, data):
        self.info = frame_info

        # for each row:
        # contains (left, right) number of boundary pixels
        self.boundaries = []
        # stores the file offset for the first drawing command
        self.cmd_offsets = []

        # palette index matrix representing the final image
        self.pcolor = list()

        dbg(push="frame", lvl=3)

        # process bondary table
        for i in range(self.info.size[1]):
            outline_entry_position = (self.info.outline_table_offset + i
                                      * SLPFrame.slp_frame_row_edge.size)

            left, right = SLPFrame.slp_frame_row_edge.unpack_from(
                data, outline_entry_position
            )

            # is this row completely transparent?
            if left == 0x8000 or right == 0x8000:
                # TODO: -1 or like should be enough
                self.boundaries.append(SpecialColor.transparent)
            else:
                self.boundaries.append((left, right))

        dbg("boundary values: %s" % self.boundaries)

        # process cmd table
        for i in range(self.info.size[1]):
            cmd_table_position = (self.info.qdl_table_offset + i
                                  * SLPFrame.slp_command_offset.size)
            cmd_offset, = SLPFrame.slp_command_offset.unpack_from(
                data, cmd_table_position
            )
            self.cmd_offsets.append(cmd_offset)

        dbg("cmd_offsets:     %s" % self.cmd_offsets)

        for i in range(self.info.size[1]):
            self.pcolor.append(self.create_palette_color_row(data, i))

        dbg(lazymsg=lambda: "frame color index data:\n%s" % str(self.pcolor),
            lvl=4)
        dbg(pop="frame")
コード例 #19
0
    def __lt__(self, other):
        """
        comparison of two snippets for their ordering
        """

        if isinstance(other, type(self)) or isinstance(self, type(other)):
            if not (other.orderby and self.orderby):
                faild = self if other.orderby else other
                raise Exception("%s doesn't have orderby member set" % (repr(faild)))
            else:
                ret = self.orderby < other.orderby
                dbg(lazymsg=lambda: "%s < %s = %s" % (repr(self), repr(other), ret), lvl=4)
                return ret
        else:
            raise TypeError("unorderable types: %s < %s" % (type(self), type(other)))
コード例 #20
0
    def read_rsrc_tree(self, pos):
        """
        reads a resource directory
        note that the directory may contain subdirectories,
        in which case the function is called recursively

        pos
            position of directory in rsrc section
        returns
            resource directory structure dict
        """

        rdir = resource_directory_table.unpack_from(self.rsrcdata, pos)
        characteristics, timestamp, maj_ver, min_ver,\
            name_entry_count, id_entry_count = rdir
        pos += resource_directory_table.size

        dbg(push="rsrctree", lvl=4)

        entries = {}
        for i in range(name_entry_count + id_entry_count):
            name, rva = resource_directory_entry.unpack_from(
                self.rsrcdata, pos
            )
            pos += resource_directory_entry.size

            # process name
            if i < name_entry_count:
                # TODO get name from RVA entry[0]
                name = 'name@' + str(name)

            # process rva
            if rva & (2 ** 31):
                dbg("dir: " + str(name))
                rva -= 2 ** 31
                # rva points to a subdirectory
                entry = self.read_rsrc_tree(rva)
            else:
                dataentry = resource_data_entry.unpack_from(
                    self.rsrcdata, rva
                )
                data_rva, size, codepage, _ = dataentry
                data_absa = data_rva - self.resdatava
                # rva points to a leaf node
                entry = self.rsrcdata[data_absa:data_absa + size]
                dbg("leaf: %s, metadata @%#x, cp: %d, size: %#x,\
                     addr = %#x, absa = %#x" % (
                    name, rva, codepage, size, data_rva, data_absa
                ))

            entries[name] = entry

        dbg(pop="rsrctree")

        return entries
コード例 #21
0
    def add_required_snippets(self, snippet_list):
        """
        save required snippets for this one by looking at wanted type references

        the available candidates have to be passed as argument
        """

        self.required_snippets |= {s for s in snippet_list if len(self.typerefs & s.typedefs) > 0}

        dbg(lazymsg=lambda: "snippet %s requires %s" % (repr(self), repr(self.required_snippets)), lvl=3)

        resolved_types = set()
        for s in self.required_snippets:
            resolved_types |= (self.typerefs & s.typedefs)

        missing_types  = self.typerefs - resolved_types

        return missing_types
コード例 #22
0
ファイル: util.py プロジェクト: abhipranay/openage
def file_write_multi(file_dict, file_prefix=""):
    """
    save the given file dictionary to files

    key: filename
    value: file content
    """

    written_files = list()

    for file_name, file_data in file_dict.items():
        file_name = file_prefix + file_name
        dbg("saving %s.." % file_name, 1)
        file_name = file_get_path(file_name, write=True, mkdir=True)
        file_write(file_name, file_data)

        written_files.append(file_name)

    return written_files
コード例 #23
0
    def __lt__(self, other):
        """
        comparison of two snippets for their ordering
        """

        if isinstance(other, type(self)) or isinstance(self, type(other)):
            if not (other.orderby and self.orderby):
                faild = self if other.orderby else other
                raise Exception("%s doesn't have orderby member set" %
                                (repr(faild)))
            else:
                ret = self.orderby < other.orderby
                dbg(lazymsg=lambda: "%s < %s = %s" %
                    (repr(self), repr(other), ret),
                    lvl=4)
                return ret
        else:
            raise TypeError("unorderable types: %s < %s" %
                            (type(self), type(other)))
コード例 #24
0
def file_write_multi(file_dict, file_prefix=""):
    """
    save the given file dictionary to files

    key: filename
    value: file content
    """

    written_files = list()

    for file_name, file_data in file_dict.items():
        file_name = file_prefix + file_name
        dbg("saving %s.." % file_name, 1)
        file_name = file_get_path(file_name, write=True, mkdir=True)
        file_write(file_name, file_data)

        written_files.append(file_name)

    return written_files
コード例 #25
0
def export_texture(file_data, drsname, file_id, file_extension, fname, palette, output_formats):
    from .slp import SLP
    s = SLP(file_data)

    dbg("%s: %d.%s -> %s -> generating atlas" % (
        drsname, file_id, file_extension, fname), 1)

    # create exportable texture from the slp
    texture = Texture(s, palette)

    # the hotspots of terrain textures have to be fixed:
    if drsname == "terrain":
        for entry in texture.image_metadata:
            entry["cx"] = terrain_tile_size.tile_halfsize["x"]
            entry["cy"] = terrain_tile_size.tile_halfsize["y"]

    # save the image and the corresponding metadata file
    texture.save(fname, output_formats)
    dbg("texture: finished", 1)
    return True
コード例 #26
0
ファイル: exportable.py プロジェクト: CodyKelly/openage
    def structs(cls):
        """
        create struct definitions for this class and its subdata references.
        """

        ret = list()
        self_member_count = 0

        dbg(lazymsg=lambda: "%s: generating structs" % (repr(cls)), lvl=2)

        #acquire all struct members, including the included members
        members = cls.get_data_format(allowed_modes=(True, READ_EXPORT,
                                                     NOREAD_EXPORT),
                                      flatten_includes=False)
        for is_parent, export, member_name, member_type in members:
            self_member_count += 1
            dbg(lazymsg=lambda: "%s: exporting member %s<%s>" %
                (repr(cls), member_name, member_type),
                lvl=3)

            if isinstance(member_type, MultisubtypeMember):
                for subtype_name, subtype_class in member_type.class_lookup.items(
                ):
                    if not issubclass(subtype_class, Exportable):
                        raise Exception(
                            "tried to export structs from non-exportable %s" %
                            subtype_class)
                    ret += subtype_class.structs()

            elif isinstance(member_type, GroupMember):
                dbg("entering group/include member %s of %s" %
                    (member_name, cls),
                    lvl=3)
                if not issubclass(member_type.cls, Exportable):
                    raise Exception(
                        "tried to export structs from non-exportable member included class %s"
                        % repr(member_type.cls))
                ret += member_type.cls.structs()

            else:
                continue

        #create struct only when it has members?
        if True or self_member_count > 0:
            new_def = StructDefinition(cls)
            dbg(lazymsg=lambda: "=> %s: created new struct definition: %s" %
                (repr(cls), str(new_def)),
                lvl=3)
            ret.append(new_def)

        return ret
コード例 #27
0
ファイル: blendomatic.py プロジェクト: CodyKelly/openage
    def __init__(self, fname):
        self.fname = fname
        dbg("reading blendomatic data from %s" % fname, 1, push="blendomatic")

        fname = file_get_path(fname, write=False)
        f = file_open(fname, binary=True, write=False)

        buf = f.read(Blendomatic.blendomatic_header.size)
        self.header = Blendomatic.blendomatic_header.unpack_from(buf)

        blending_mode_count, tile_count = self.header

        dbg(
            "%d blending modes, each %d tiles" %
            (blending_mode_count, tile_count), 2)

        blending_mode = Struct(endianness + "I %dB" % (tile_count))

        self.blending_modes = list()

        for i in range(blending_mode_count):
            header_data = f.read(blending_mode.size)
            bmode_header = blending_mode.unpack_from(header_data)

            new_mode = BlendingMode(i, f, tile_count, bmode_header)

            self.blending_modes.append(new_mode)

        f.close()
        dbg(pop="blendomatic")
コード例 #28
0
    def __init__(self, fname):
        self.fname = fname
        dbg("reading blendomatic data from %s" % fname, 1, push="blendomatic")

        fname = file_get_path(fname, write=False)
        f = file_open(fname, binary=True, write=False)

        buf = f.read(Blendomatic.blendomatic_header.size)
        self.header = Blendomatic.blendomatic_header.unpack_from(buf)

        blending_mode_count, tile_count = self.header

        dbg("%d blending modes, each %d tiles" % (blending_mode_count, tile_count), 2)

        blending_mode = Struct(endianness + "I %dB" % (tile_count))

        self.blending_modes = list()

        for i in range(blending_mode_count):
            header_data  = f.read(blending_mode.size)
            bmode_header = blending_mode.unpack_from(header_data)

            new_mode = BlendingMode(i, f, tile_count, bmode_header)

            self.blending_modes.append(new_mode)

        f.close()
        dbg(pop="blendomatic")
コード例 #29
0
    def add_required_snippets(self, snippet_list):
        """
        save required snippets for this one by looking at wanted type references

        the available candidates have to be passed as argument
        """

        self.required_snippets |= {
            s
            for s in snippet_list if len(self.typerefs & s.typedefs) > 0
        }

        dbg(lazymsg=lambda: "snippet %s requires %s" %
            (repr(self), repr(self.required_snippets)),
            lvl=3)

        resolved_types = set()
        for s in self.required_snippets:
            resolved_types |= (self.typerefs & s.typedefs)

        missing_types = self.typerefs - resolved_types

        return missing_types
コード例 #30
0
ファイル: empiresdat.py プロジェクト: Dotile/openage
    def __init__(self, fname):
        self.fname = fname
        dbg("reading empires2*.dat from %s..." % fname, lvl=1)

        f = open(fname, "rb")

        dbg("decompressing data from %s" % fname, lvl=2)

        compressed_data = f.read()
        # decompress content with zlib (note the magic -15)
        # -15: - -> there is no header, 15 is the max windowsize
        self.content = zlib.decompress(compressed_data, -15)
        f.close()

        self.compressed_size   = len(compressed_data)
        self.decompressed_size = len(self.content)

        # compressed data no longer needed
        del compressed_data

        dbg("length of compressed data: %d = %d kB" % (self.compressed_size, self.compressed_size / 1024), lvl=2)
        dbg("length of decompressed data: %d = %d kB" % (self.decompressed_size, self.decompressed_size / 1024), lvl=2)
コード例 #31
0
ファイル: exportable.py プロジェクト: Dotile/openage
    def structs(cls):
        """
        create struct definitions for this class and its subdata references.
        """

        ret = list()
        self_member_count = 0

        dbg(lazymsg=lambda: "%s: generating structs" % (repr(cls)), lvl=2)

        # acquire all struct members, including the included members
        members = cls.get_data_format(allowed_modes=(True, READ_EXPORT, NOREAD_EXPORT), flatten_includes=False)
        for is_parent, export, member_name, member_type in members:
            self_member_count += 1
            dbg(lazymsg=lambda: "%s: exporting member %s<%s>" % (repr(cls), member_name, member_type), lvl=3)

            if isinstance(member_type, MultisubtypeMember):
                for subtype_name, subtype_class in member_type.class_lookup.items():
                    if not issubclass(subtype_class, Exportable):
                        raise Exception("tried to export structs from non-exportable %s" % subtype_class)
                    ret += subtype_class.structs()

            elif isinstance(member_type, GroupMember):
                dbg("entering group/include member %s of %s" % (member_name, cls), lvl=3)
                if not issubclass(member_type.cls, Exportable):
                    raise Exception("tried to export structs from non-exportable member included class %s" % repr(member_type.cls))
                ret += member_type.cls.structs()

            else:
                continue

        # create struct only when it has members?
        if True or self_member_count > 0:
            new_def = StructDefinition(cls)
            dbg(lazymsg=lambda: "=> %s: created new struct definition: %s" % (repr(cls), str(new_def)), lvl=3)
            ret.append(new_def)

        return ret
コード例 #32
0
ファイル: generated_file.py プロジェクト: Dotile/openage
    def create_xref_headers(self, file_pool):
        """
        discover and add needed header snippets for type references accross files.
        """

        dbg("%s typerefs %s" % (repr(self), repr(self.typerefs)), lvl=3)
        dbg("%s typedefs %s" % (repr(self), repr(self.typedefs)), lvl=3)

        new_resolves = set()
        for include_candidate in file_pool:
            candidate_resolves = include_candidate.typedefs & (self.typerefs - self.typedefs)

            if len(candidate_resolves) > 0:
                new_header = include_candidate.get_include_snippet()

                dbg(lazymsg=lambda: "%s: to resolve %s" % (repr(self), candidate_resolves), push="add_header", lvl=3)
                self.add_snippet(new_header, inherit_typedefs=False)
                dbg(pop="add_header")

                new_resolves |= candidate_resolves

        still_missing = ((self.typerefs - self.typedefs) - self.included_typedefs) - new_resolves
        if len(still_missing) > 0:
            raise Exception("still missing types for %s:\n%s" % (self, still_missing))
コード例 #33
0
    def get_required_snippets(self, defined=None):
        """
        return all referenced and the snippet itself in the order they
        need to be put in the file.
        """

        #TODO: loop detection
        ret = list()

        dbg(lazymsg=lambda: "required snippets for %s {" % (repr(self)), push=True, lvl=4)

        # sort snippets deterministically by __lt__ function
        for s in sorted(self.required_snippets):
            ret += s.get_required_snippets()

        dbg(pop=True, lvl=4)
        dbg(lazymsg=lambda: "}", lvl=4)

        ret.append(self)
        return ret
コード例 #34
0
    def get_required_snippets(self, defined=None):
        """
        return all referenced and the snippet itself in the order they
        need to be put in the file.
        """

        #TODO: loop detection
        ret = list()

        dbg(lazymsg=lambda: "required snippets for %s {" % (repr(self)),
            push=True,
            lvl=4)

        # sort snippets deterministically by __lt__ function
        for s in sorted(self.required_snippets):
            ret += s.get_required_snippets()

        dbg(pop=True, lvl=4)
        dbg(lazymsg=lambda: "}", lvl=4)

        ret.append(self)
        return ret
コード例 #35
0
ファイル: generated_file.py プロジェクト: Dotile/openage
    def generate(self):
        """
        actually generate the content for this file.
        """

        # TODO: create new snippets for resolving cyclic dependencies (forward declarations)

        dbg(push="generation", lvl=2)

        dbg(lazymsg=lambda: "".join((
            "\n=========== generating %s\n" % (repr(self)),
            "content snippets stored to be inserted:\n",
            pprint.pformat(self.snippets),
            "\n-----------",
        )), lvl=3)

        # apply preference overrides
        prefs = self.default_preferences.copy()
        prefs.update(self.output_preferences[self.format])

        snippets_header = {s for s in self.snippets if s.section == ContentSnippet.section_header}
        snippets_body   = self.snippets - snippets_header

        if len(snippets_body) == 0:
            raise Exception("generated file %s has no body snippets!" % (repr(self)))

        # type references in this file that could not be resolved
        missing_types = set()

        # put snippets into list in correct order
        # snippets will be written according to this [(snippet, prio), ...] list
        snippets_priorized = list()

        # determine each snippet's priority by number of type references and definitions
        # smaller prio means written earlier in the file.
        # also, find snippet dependencies
        dbg("assigning snippet priorities:", push="snippetprio", lvl=4)
        for s in sorted(snippets_body):
            snippet_prio = len(s.typerefs) - len(s.typedefs)
            snippets_priorized.append((s, snippet_prio))
            dbg(lazymsg=lambda: "prio %3.d => %s" % (snippet_prio, repr(s)), lvl=4)

            # let each snippet find others as dependencies
            missing_types |= s.add_required_snippets(self.snippets)

        dbg(pop="snippetprio")

        if len(missing_types) > 0:
            raise Exception("missing types for %s:\n%s" % (repr(self), pprint.pformat(missing_types)))

        # sort snippets according to their priority determined above
        snippets_priorized_sorted = sorted(snippets_priorized, key=lambda s: s[1])

        # create list of snippets to be put in the generated file.
        # [(snippet, prio)]
        snippets_body_sorted = list()
        snippets_body_set = set()

        # fetch list of all required snippets for all snippets to put in the file
        for snippet, prio in snippets_priorized_sorted:
            snippet_candidates = snippet.get_required_snippets()

            dbg(lazymsg=lambda: "required dependency snippet candidates: %s" % (pprint.pformat(snippet_candidates)), lvl=3)
            for s in snippet_candidates:
                if s.section == ContentSnippet.section_header:
                    if s not in snippets_header:
                        dbg(lazymsg=lambda: " `-> ADD  header snippet %s" % (repr(s)), lvl=4)
                        snippets_header.add(s)
                        continue

                elif s.section == ContentSnippet.section_body:
                    if s not in snippets_body_set:
                        snippets_body_sorted.append(s)
                        snippets_body_set.add(s)
                        dbg(lazymsg=lambda: " `-> ADD  body snippet %s" % (repr(s)), lvl=4)
                        continue

                dbg(lazymsg=lambda: " `-> SKIP snippet %s" % (repr(s)), lvl=4)

        # these snippets will be written outside the namespace
        # in the #include section
        snippets_header_sorted = sorted(snippets_header, key=lambda h: (not h.is_global, h.name))

        dbg(lazymsg=lambda: "".join((
            "\n-----------\n",
            "snippets after ordering for %s:\n" % (repr(self)),
            pprint.pformat(snippets_header_sorted + snippets_body_sorted),
            "\n===========",
        )), lvl=3)

        # merge file contents
        header_data = "".join(header.get_data() for header in snippets_header_sorted)
        file_data   = "\n".join(snippet.get_data() for snippet in snippets_body_sorted)

        namespace    = self.namespace
        header_guard = "".join((namespace.upper(), "_", self.file_name.replace("/", "_").upper()))

        # fill file header and footer with the generated file_name
        content_prefix = prefs["content_prefix"].substitute(header_guard=header_guard, namespace=namespace, headers=header_data)
        content_suffix = prefs["content_suffix"].substitute(header_guard=header_guard, namespace=namespace)

        # this is the final file content
        file_data = "".join((content_prefix, file_data, content_suffix))

        # determine output file name
        output_file_name_parts = [
            prefs["folder"],
            "%s%s" % (self.file_name, prefs["file_suffix"])
        ]

        dbg(pop="generation")

        # whee, return the (file_name, content)
        return (os.path.join(*output_file_name_parts), file_data)
コード例 #36
0
ファイル: slp.py プロジェクト: abhipranay/openage
    def process_drawing_cmds(self, data, rowid,
                             first_cmd_offset, missing_pixels):
        """
        create palette indices (colors) for the drawing commands
        found for this row in the SLP frame.
        """

        # position in the data blob, we start at the first command of this row
        dpos = first_cmd_offset

        # this array gets filled with palette indices by the cmds
        pcolor_list = []

        # is the end of the current row reached?
        eor = False

        # work through commands till end of row.
        while not eor:
            if len(pcolor_list) > missing_pixels:
                raise Exception("Only %d pixels should be drawn in row %d!" % (
                    missing_pixels, rowid))

            # fetch drawing instruction
            cmd = self.get_byte_at(data, dpos)

            lower_nibble = 0x0f & cmd
            higher_nibble = 0xf0 & cmd
            lower_bits = 0b00000011 & cmd

            dbg(lazymsg=lambda: "opcode: %#x, rowid: %d" % (cmd, rowid), lvl=4)

            if lower_nibble == 0x0f:
                # eol (end of line) command, this row is finished now.

                dbg("end of row reached.", 4)
                eor = True
                continue

            elif lower_bits == 0b00000000:
                # color_list command
                # draw the following bytes as palette colors

                pixel_count = cmd >> 2
                for i in range(pixel_count):
                    dpos += 1
                    color = self.get_byte_at(data, dpos)
                    pcolor_list.append(color)

            elif lower_bits == 0b00000001:
                # skip command
                # draw 'count' transparent pixels
                # count = cmd >> 2; if count == 0: count = nextbyte

                pixel_count, dpos = self.cmd_or_next(cmd, 2, data, dpos)
                pcolor_list += [SpecialColor.transparent] * pixel_count

            elif lower_nibble == 0x02:
                # big_color_list command
                # draw (higher_nibble << 4 + nextbyte) following palette colors

                dpos += 1
                nextbyte = self.get_byte_at(data, dpos)
                pixel_count = (higher_nibble << 4) + nextbyte

                for i in range(pixel_count):
                    dpos += 1
                    color = self.get_byte_at(data, dpos)
                    pcolor_list.append(color)

            elif lower_nibble == 0x03:
                # big_skip command
                # draw (higher_nibble << 4 + nextbyte)
                # transparent pixels

                dpos += 1
                nextbyte = self.get_byte_at(data, dpos)
                pixel_count = (higher_nibble << 4) + nextbyte

                pcolor_list += [SpecialColor.transparent] * pixel_count

            elif lower_nibble == 0x06:
                # player_color_list command
                # we have to draw the player color for cmd>>4 times,
                # or if that is 0, as often as the next byte says.

                pixel_count, dpos = self.cmd_or_next(cmd, 4, data, dpos)
                for i in range(pixel_count):
                    dpos += 1
                    color = self.get_byte_at(data, dpos)

                    # the SpecialColor class preserves the calculation with
                    # player * 16 + color, this is the palette offset
                    # for tinted player colors.
                    entry = SpecialColor(special_id=SpecialColor.player_color,
                                         base_color=color)
                    pcolor_list.append(entry)

            elif lower_nibble == 0x07:
                # fill command
                # draw 'count' pixels with color of next byte

                pixel_count, dpos = self.cmd_or_next(cmd, 4, data, dpos)

                dpos += 1
                color = self.get_byte_at(data, dpos)

                pcolor_list += [color] * pixel_count

            elif lower_nibble == 0x0A:
                # fill player color command
                # draw the player color for 'count' times

                pixel_count, dpos = self.cmd_or_next(cmd, 4, data, dpos)

                dpos += 1
                color = self.get_byte_at(data, dpos)

                # TODO: verify this. might be incorrect.
                # color = ((color & 0b11001100) | 0b00110011)

                # SpecialColor class preserves the calculation of
                # player*16 + color
                entry = SpecialColor(special_id=SpecialColor.player_color,
                                     base_color=color)
                pcolor_list += [entry] * pixel_count

            elif lower_nibble == 0x0B:
                # shadow command
                # draw a transparent shadow pixel for 'count' times

                pixel_count, dpos = self.cmd_or_next(cmd, 4, data, dpos)

                pcolor_list += [SpecialColor.shadow] * pixel_count

            elif lower_nibble == 0x0E:
                # "extended" commands. higher nibble specifies the instruction.

                if higher_nibble == 0x00:
                    # render hint xflip command
                    # render hint: only draw the following command,
                    # if this sprite is not flipped left to right
                    dbg("render hint: xfliptest", 2)

                elif higher_nibble == 0x10:
                    # render h notxflip command
                    # render hint: only draw the following command,
                    # if this sprite IS flipped left to right.
                    dbg("render hint: !xfliptest", 2)

                elif higher_nibble == 0x20:
                    # table use normal command
                    # set the transform color table to normal,
                    # for the standard drawing commands
                    dbg("image wants normal color table now", 2)

                elif higher_nibble == 0x30:
                    # table use alternat command
                    # set the transform color table to alternate,
                    # this affects all following standard commands
                    dbg("image wants alternate color table now", 2)

                elif higher_nibble == 0x40:
                    # outline_1 command
                    # the next pixel shall be drawn as special color 1,
                    # if it is obstructed later in rendering
                    pcolor_list.append(SpecialColor(1))

                elif higher_nibble == 0x60:
                    # outline_2 command
                    # same as above, but special color 2
                    pcolor_list.append(SpecialColor(2))

                elif higher_nibble == 0x50:
                    # outline_span_1 command
                    # same as above, but span special color 1 nextbyte times.

                    dpos += 1
                    pixel_count = self.get_byte_at(data, dpos)

                    pcolor_list += [SpecialColor(1)] * pixel_count

                elif higher_nibble == 0x70:
                    # outline_span_2 command
                    # same as above, using special color 2

                    dpos += 1
                    pixel_count = self.get_byte_at(data, dpos)

                    pcolor_list += [SpecialColor(2)] * pixel_count

            else:
                dbg("stored in this row so far: %s" % pcolor_list, 2)
                raise Exception("wtf! unknown slp drawing command read:"
                                " %#x in row %d" % (cmd, rowid))

            dpos += 1

        # end of row reached, return the created pixel array.
        return pcolor_list
コード例 #37
0
ファイル: data_formatter.py プロジェクト: MrBeardy/openage
    def export(self, requested_formats):
        """
        generate content snippets that will be saved to generated files

        output: {file_name: GeneratedFile, ...}
        """

        # storage of all needed content snippets
        generate_files = list()

        for format in requested_formats:
            files = dict()

            snippets = list()

            # iterate over all stored data sets and
            # generate all data snippets for the requested output formats.
            for data_set in self.data:

                # resolve data xrefs for this data_set
                data_set.dynamic_ref_update(self.typedefs)

                # generate one output chunk list for each requested format
                if format == "csv":
                    new_snippets = data_set.generate_csv(self)

                elif format == "struct":
                    new_snippets = data_set.generate_struct(self)

                elif format == "structimpl":
                    new_snippets = data_set.generate_struct_implementation(self)

                else:
                    raise Exception("unknown export format %s requested" % format)

                snippets.extend(new_snippets)

            # create snippets for the encountered type definitions
            for type_name, type_definition in sorted(self.typedefs.items()):
                dbg(
                    lazymsg=lambda: "getting type definition snippets for %s<%s>.." % (type_name, type_definition),
                    lvl=4,
                )
                type_snippets = type_definition.get_snippets(type_definition.file_name, format)
                dbg(lazymsg=lambda: "`- got %d snippets" % (len(type_snippets)), lvl=4)

                snippets.extend(type_snippets)

            # assign all snippets to generated files
            for snippet in snippets:

                # if this file was not yet created, do it nao
                if snippet.file_name not in files:
                    files[snippet.file_name] = GeneratedFile(snippet.file_name, format)

                files[snippet.file_name].add_snippet(snippet)

            generate_files.extend(files.values())

        # files is currently:
        # [GeneratedFile, ...]

        # find xref header includes
        for gen_file in generate_files:
            # only create headers for non-data files
            if gen_file.format not in ("csv",):
                dbg("%s: creating needed xref headers:" % (repr(gen_file)), push="includegen", lvl=3)
                gen_file.create_xref_headers(generate_files)
                gen_file.create_forward_declarations(generate_files)
                dbg(pop="includegen")

        # actually generate the files
        ret = dict()

        # we now invoke the content generation for each generated file
        for gen_file in generate_files:
            file_name, content = gen_file.generate()
            ret[file_name] = content

        # return {file_name: content, ...}
        return ret
コード例 #38
0
ファイル: exportable.py プロジェクト: zhangf911/openage
    def read(self, raw, offset, cls=None, members=None):
        """
        recursively read defined binary data from raw at given offset.

        this is used to fill the python classes with data from the binary input.
        """

        if cls:
            target_class = cls
        else:
            target_class = self

        dbg(lazymsg=lambda: "-> 0x%08x => reading %s" % (offset, repr(cls)), lvl=3)

        # break out of the current reading loop when members don't exist in source data file
        stop_reading_members = False

        if not members:
            members = target_class.get_data_format(allowed_modes=(True, READ_EXPORT, READ, READ_UNKNOWN), flatten_includes=False)

        for is_parent, export, var_name, var_type in members:

            if stop_reading_members:
                if isinstance(var_type, DataMember):
                    replacement_value = var_type.get_empty_value()
                else:
                    replacement_value = 0

                setattr(self, var_name, replacement_value)
                continue

            if isinstance(var_type, GroupMember):
                if not issubclass(var_type.cls, Exportable):
                    raise Exception("class where members should be included is not exportable: %s" % var_type.cls.__name__)

                if isinstance(var_type, IncludeMembers):
                    # call the read function of the referenced class (cls),
                    # but store the data to the current object (self).
                    offset = var_type.cls.read(self, raw, offset, cls=var_type.cls)
                else:
                    # create new instance of referenced class (cls),
                    # use its read method to store data to itself,
                    # then save the result as a reference named `var_name`
                    # TODO: constructor argument passing may be required here.
                    grouped_data = var_type.cls()
                    offset = grouped_data.read(raw, offset)

                    setattr(self, var_name, grouped_data)

            elif isinstance(var_type, MultisubtypeMember):
                # subdata reference implies recursive call for reading the binary data

                # arguments passed to the next-level constructor.
                varargs = dict()

                if var_type.passed_args:
                    if type(var_type.passed_args) == str:
                        var_type.passed_args = set(var_type.passed_args)
                    for passed_member_name in var_type.passed_args:
                        varargs[passed_member_name] = getattr(self, passed_member_name)

                # subdata list length has to be defined beforehand as a object member OR number.
                # it's name or count is specified at the subdata member definition by length.
                list_len = var_type.get_length(self)

                # prepare result storage lists
                if isinstance(var_type, SubdataMember):
                    # single-subtype child data list
                    setattr(self, var_name, list())
                    single_type_subdata = True
                else:
                    # multi-subtype child data list
                    setattr(self, var_name, gen_dict_key2lists(var_type.class_lookup.keys()))
                    single_type_subdata = False

                # check if entries need offset checking
                if var_type.offset_to:
                    offset_lookup = getattr(self, var_type.offset_to[0])
                else:
                    offset_lookup = None

                for i in range(list_len):

                    # if datfile offset == 0, entry has to be skipped.
                    if offset_lookup:
                        if not var_type.offset_to[1](offset_lookup[i]):
                            continue
                        # TODO: don't read sequentially, use the lookup as new offset?

                    if single_type_subdata:
                        # append single data entry to the subdata object list
                        new_data_class = var_type.class_lookup[None]
                    else:
                        # to determine the subtype class, read the binary definition
                        # this utilizes an on-the-fly definition of the data to be read.
                        offset = self.read(
                            raw, offset, cls=target_class,
                            members=(((False,) + var_type.subtype_definition),)
                        )

                        # read the variable set by the above read call to
                        # use the read data to determine the denominaton of the member type
                        subtype_name = getattr(self, var_type.subtype_definition[1])

                        # look up the type name to get the subtype class
                        new_data_class = var_type.class_lookup[subtype_name]

                    if not issubclass(new_data_class, Exportable):
                        raise Exception("dumped data is not exportable: %s" % new_data_class.__name__)

                    # create instance of submember class
                    new_data = new_data_class(**varargs)

                    # dbg(lazymsg=lambda: "%s: calling read of %s..." % (repr(self), repr(new_data)), lvl=4)

                    # recursive call, read the subdata.
                    offset = new_data.read(raw, offset, new_data_class)

                    # append the new data to the appropriate list
                    if single_type_subdata:
                        getattr(self, var_name).append(new_data)
                    else:
                        getattr(self, var_name)[subtype_name].append(new_data)

            else:
                # reading binary data, as this member is no reference but actual content.

                data_count = 1
                is_custom_member = False

                if type(var_type) == str:
                    # TODO: generate and save member type on the fly
                    # instead of just reading
                    is_array = vararray_match.match(var_type)

                    if is_array:
                        struct_type = is_array.group(1)
                        data_count  = is_array.group(2)
                        if struct_type == "char":
                            struct_type = "char[]"

                        if integer_match.match(data_count):
                            # integer length
                            data_count = int(data_count)
                        else:
                            # dynamic length specified by member name
                            data_count = getattr(self, data_count)

                    else:
                        struct_type = var_type
                        data_count  = 1

                elif isinstance(var_type, DataMember):
                    # special type requires having set the raw data type
                    struct_type = var_type.raw_type
                    data_count  = var_type.get_length(self)
                    is_custom_member = True

                else:
                    raise Exception("unknown data member definition %s for member '%s'" % (var_type, var_name))

                if data_count < 0:
                    raise Exception("invalid length %d < 0 in %s for member '%s'" % (data_count, var_type, var_name))

                if struct_type not in struct_type_lookup:
                    raise Exception("%s: member %s requests unknown data type %s" % (repr(self), var_name, struct_type))

                if export == READ_UNKNOWN:
                    # for unknown variables, generate uid for the unknown memory location
                    var_name = "unknown-0x%08x" % offset

                # lookup c type to python struct scan type
                symbol = struct_type_lookup[struct_type]

                # read that stuff!!11
                dbg(lazymsg=lambda: "        @0x%08x: reading %s<%s> as '< %d%s'" % (offset, var_name, var_type, data_count, symbol), lvl=4)

                struct_format = "< %d%s" % (data_count, symbol)
                result        = struct.unpack_from(struct_format, raw, offset)

                dbg(lazymsg=lambda: "                \_ = %s" % (result, ), lvl=4)

                if is_custom_member:
                    if not var_type.verify_read_data(self, result):
                        raise Exception("invalid data when reading %s at offset %# 08x" % (var_name, offset))

                # TODO: move these into a read entry hook/verification method
                if symbol == "s":
                    # stringify char array
                    result = zstr(result[0])
                elif data_count == 1:
                    # store first tuple element
                    result = result[0]

                    if symbol == "f":
                        import math
                        if not math.isfinite(result):
                            raise Exception("invalid float when reading %s at offset %# 08x" % (var_name, offset))

                # increase the current file position by the size we just read
                offset += struct.calcsize(struct_format)

                # run entry hook for non-primitive members
                if is_custom_member:
                    result = var_type.entry_hook(result)

                    if result == ContinueReadMember.ABORT:
                        # don't go through all other members of this class!
                        stop_reading_members = True

                # store member's data value
                setattr(self, var_name, result)

        dbg(lazymsg=lambda: "<- 0x%08x <= finished %s" % (offset, repr(cls)), lvl=3)
        return offset
コード例 #39
0
ファイル: blendomatic.py プロジェクト: CodyKelly/openage
    def __init__(self, idx, data_file, tile_count, header):
        """
        initialize one blending mode,
        consisting of multiple frames for all blending directions

        the bitmasks were used to decide whether this pixel has
        to be used for calculations.

        the alphamask is used to determine the alpha amount for blending.
        """

        #should be 2353 -> number of pixels (single alpha byte values)
        self.pxcount = header[0]
        #tile_flags = header[1:]  #TODO what do they do?

        dbg("tiles in blending mode %d have %d pixels" % (idx, self.pxcount),
            2)

        #as we draw in isometric tile format, this is the row count
        row_count = int(math.sqrt(self.pxcount)) + 1  #should be 49

        #alpha_masks_raw is an array of bytes that will draw 32 images,
        #which are bit masks.
        #
        #one of these masks also has 2353 pixels
        #the storage of the bit masks is 4*tilesize, here's why:
        #
        #4 * 8bit * 2353 pixels = 75296 bitpixels
        #==> 75296/(32 images) = 2353 bitpixel/image
        #
        #this means if we interprete the 75296 bitpixels as 32 images,
        #each of these images gets 2353 bit as data.
        #TODO: why 32 images? isn't that depending on tile_count?

        bitmask_buf_size = self.pxcount * 4
        dbg("reading 1bit masks -> %d bytes" % (bitmask_buf_size), 2)
        alpha_masks_buf = data_file.read(bitmask_buf_size)
        alpha_masks_raw = unpack_from("%dB" % (bitmask_buf_size),
                                      alpha_masks_buf)

        #list of alpha-mask tiles
        self.alphamasks = list()

        dbg(
            "reading 8bit masks for %d tiles -> %d bytes" %
            (tile_count, self.pxcount * tile_count), 2)

        #draw mask tiles for this blending mode
        for j in range(tile_count):
            tile_buf = data_file.read(self.pxcount)
            pixels = unpack_from("%dB" % self.pxcount, tile_buf)
            self.alphamasks.append(self.get_tile_from_data(row_count, pixels))

        bitvalues = list()
        for i in alpha_masks_raw:
            for b_id in range(7, -1, -1):
                #bitmask from 0b00000001 to 0b10000000
                bit_mask = 2**b_id
                bit = i & bit_mask
                bitvalues.append(bit)

        #list of bit-mask tiles
        self.bitmasks = list()

        #TODO: is 32 really hardcoded?
        for i in range(32):
            data_begin = i * self.pxcount
            data_end = (i + 1) * self.pxcount
            pixels = bitvalues[data_begin:data_end]

            self.bitmasks.append(self.get_tile_from_data(row_count, pixels))
コード例 #40
0
def media_convert(args):
    """\
    perform asset conversion.
    requires original assets and stores them in usable and free formats.
    """

    # assume to extract all files when nothing specified.
    if not args.extract:
        args.extract.append('*:*.*')

    extraction_rules = [ExtractionRule(e) for e in args.extract]

    dbg("age2 input directory: %s" % (args.srcdir, ), 1)

    # soon to be replaced by a sane version detection
    drsmap = {
        "graphics": "graphics.drs",
        "interface": "interfac.drs",
        "sounds0": "sounds.drs",
        "sounds1": "sounds_x1.drs",
        "gamedata0": "gamedata.drs",
        "gamedata1": "gamedata_x1.drs",
        "gamedata2": "gamedata_x1_p1.drs",
        "terrain": "terrain.drs",
    }

    drsfiles = {
        k: util.ifilepath(args.srcdir, os.path.join("data", v), True)
        for k, v in drsmap.items()
    }

    # gamedata.drs does not exist in HD edition,
    # but its contents are in gamedata_x1.drs instead,
    # so we can ignore this file if it doesn't exist
    drsfiles = {k: DRS(p, drsmap[k]) for k, p in drsfiles.items() if p}

    # this is the ingame color palette file id,
    # 256 color lookup for all graphics pixels
    palette_id = 50500
    palette = ColorTable(drsfiles["interface"].get_file_data(
        'bin', palette_id))

    # metadata dumping output format, more to come?
    output_formats = ("csv", )

    termcolortable = ColorTable(termcolors.urxvtcoltable)

    # saving files is disabled by default
    write_enabled = False

    if args.output:
        dbg("storing files to %s" % args.output, 1)
        write_enabled = True

        player_palette = PlayerColorTable(palette)

        if args.extrafiles:
            palette.save_visualization('info/colortable.pal.png')
            player_palette.save_visualization('info/playercolortable.pal.png')

        from . import blendomatic

        # HD Edition has a blendomatic_x1.dat in addition to its new
        # blendomatic.dat blendomatic_x1.dat is the same file as AoK:TC's
        # blendomatic.dat, and HD does not have blendomatic.dat, so we try
        # _x1 first and fall back to the AoK:TC way if it does not exist
        blend_file = util.ifilepath(args.srcdir, "data/blendomatic_x1.dat",
                                    True)
        if not blend_file:
            blend_file = util.ifilepath(args.srcdir, "data/blendomatic.dat")

        blend_data = blendomatic.Blendomatic(blend_file)
        blend_data.save(os.path.join(args.output, "blendomatic.dat/"),
                        output_formats)

        from .stringresource import StringResource
        stringres = StringResource()

        # AoK:TC uses .DLL files for its string resources,
        # HD uses plaintext files
        lang_dll = util.ifilepath(args.srcdir, "language.dll", True)
        if lang_dll:
            from .pefile import PEFile
            for l in ["language.dll", "language_x1.dll", "language_x1_p1.dll"]:
                lpath = util.ifilepath(args.srcdir, l)
                stringres.fill_from(PEFile(lpath))

        else:
            from .hdlanguagefile import HDLanguageFile
            bindir = util.ifilepath(args.srcdir, "bin")
            for lang in os.listdir(bindir):
                langfile = "%s/%s/%s-language.txt" % (bindir, lang, lang)

                # there are some "base language" files in HD that we don't
                # need and only the dir for the language that's currently in
                # use contains a language file
                if os.path.isfile(langfile):
                    stringres.fill_from(HDLanguageFile(langfile, lang))

        # TODO: transform and cleanup the read strings...
        # (strip html, insert formatchars/identifiers, ...)

        # create the dump for the dat file
        from .gamedata import empiresdat

        # try to use cached version?
        parse_empiresdat = False
        if args.use_dat_cache:
            dbg("trying to use cache file %s..." % (dat_cache_file), lvl=1)
            try:
                with open(dat_cache_file, "rb") as f:
                    gamedata = pickle.load(f)
                    dbg("could successfully load cached gamedata!", lvl=1)

            except FileNotFoundError as err:
                parse_empiresdat = True

        if not args.use_dat_cache or parse_empiresdat:
            datfile_name = util.ifilepath(
                args.srcdir, os.path.join("data", "empires2_x1_p1.dat"))
            datfile = empiresdat.EmpiresDatGzip(datfile_name)
            gamedata = empiresdat.EmpiresDatWrapper()

            if args.extrafiles:
                datfile.raw_dump('raw/empires2x1p1.raw')

            dbg("reading main data file %s..." % (datfile_name), lvl=1)
            gamedata.read(datfile.content, 0)

            # store the datfile serialization for caching
            with open(dat_cache_file, "wb") as f:
                pickle.dump(gamedata, f)

        # modify the read contents of datfile
        dbg("repairing some values in main data file %s..." % (datfile_name),
            lvl=1)
        from . import fix_data
        gamedata.empiresdat[0] = fix_data.fix_data(gamedata.empiresdat[0])

        # dbg("transforming main data file %s..." % (datfile_name), lvl=1)
        # TODO: data transformation nao! (merge stuff, etcetc)

        dbg("formatting output data...", lvl=1)
        data_formatter = DataFormatter()

        # dump metadata information
        data_dump = list()
        data_dump += blend_data.dump("blending_modes")
        data_dump += player_palette.dump("player_palette_%d" % palette_id)
        data_dump += termcolortable.dump("termcolors")
        data_dump += stringres.dump("string_resources")
        data_formatter.add_data(data_dump)

        # dump gamedata datfile data
        gamedata_dump = gamedata.dump("gamedata")
        data_formatter.add_data(gamedata_dump[0], prefix="gamedata/")

        output_data = data_formatter.export(output_formats)

        # save the meta files
        dbg("saving output data files...", lvl=1)
        file_write_multi(output_data, args.output)

    file_list = defaultdict(lambda: list())
    media_files_extracted = 0

    # iterate over all available files in the drs, check whether they should
    # be extracted
    for drsname, drsfile in drsfiles.items():
        for file_extension, file_id in drsfile.files:
            if not any(
                    er.matches(drsname, file_id, file_extension)
                    for er in extraction_rules):
                continue

            # append this file to the list result
            if args.list_files:
                file_list[file_id].append((drsfile.fname, file_extension))
                continue

            # generate output filename where data will be stored in
            if write_enabled:
                fbase = os.path.join(args.output, "Data", drsfile.name,
                                     str(file_id))
                fname = "%s.%s" % (fbase, file_extension)

                # create output folder
                util.mkdirs(os.path.split(fbase)[0])

                dbg("Extracting to %s..." % (fname), 2)
                file_data = drsfile.get_file_data(file_extension, file_id)
            else:
                continue

            # create an image file
            if file_extension == 'slp':
                from .slp import SLP
                s = SLP(file_data)

                dbg(
                    "%s: %d.%s -> %s -> generating atlas" %
                    (drsname, file_id, file_extension, fname), 1)

                # create exportable texture from the slp
                texture = Texture(s, palette)

                # the hotspots of terrain textures have to be fixed:
                if drsname == "terrain":
                    for entry in texture.image_metadata:
                        entry["cx"] = terrain_tile_size.tile_halfsize["x"]
                        entry["cy"] = terrain_tile_size.tile_halfsize["y"]

                # save the image and the corresponding metadata file
                texture.save(fname, output_formats)

            # create a sound file
            elif file_extension == 'wav':
                sound_filename = fname

                dbg(
                    "%s: %d.%s -> %s -> storing wav file" %
                    (drsname, file_id, file_extension, fname), 1)

                with open(fname, "wb") as f:
                    f.write(file_data)

                if not args.no_opus:
                    file_extension = "opus"
                    sound_filename = "%s.%s" % (fbase, file_extension)

                    # opusenc invokation (TODO: ffmpeg? some python-lib?)
                    opus_convert_call = ('opusenc', fname, sound_filename)
                    dbg("opus convert: %s -> %s ..." % (fname, sound_filename),
                        1)

                    # TODO: when the output is big enough, this deadlocks.
                    oc = subprocess.Popen(opus_convert_call,
                                          stdout=subprocess.PIPE,
                                          stderr=subprocess.PIPE)
                    oc_out, oc_err = oc.communicate()

                    if ifdbg(2):
                        oc_out = oc_out.decode("utf-8")
                        oc_err = oc_err.decode("utf-8")

                        dbg(oc_out + "\n" + oc_err, 2)

                    # remove extracted original wave file
                    os.remove(fname)

            else:
                # format does not require conversion, store it as plain blob
                with open(fname, "wb") as f:
                    f.write(file_data)

            media_files_extracted += 1

    if write_enabled:
        dbg("media files extracted: %d" % (media_files_extracted), 0)

    # was a file listing requested?
    if args.list_files:
        for idx, f in file_list.items():
            print("%d = [ %s ]" %
                  (idx, ", ".join("%s/%d.%s" %
                                  ((file_name, idx, file_extension)
                                   for file_name, file_extension in f))))
コード例 #41
0
ファイル: exportable.py プロジェクト: zhangf911/openage
    def dump(self, filename):
        """
        main data dumping function, the magic happens in here.

        recursively dumps all object members as DataDefinitions.

        returns [DataDefinition, ..]
        """

        ret = list()        # returned list of data definitions
        self_data = dict()  # data of the current object

        members = self.get_data_format(allowed_modes=(True, READ_EXPORT, NOREAD_EXPORT), flatten_includes=True)
        for is_parent, export, member_name, member_type in members:

            # gather data members of the currently queried object
            self_data[member_name] = getattr(self, member_name)

            if isinstance(member_type, MultisubtypeMember):
                dbg(lazymsg=lambda: "%s => entering member %s" % (filename, member_name), lvl=3)

                current_member_filename = "%s-%s" % (filename, member_name)

                if isinstance(member_type, SubdataMember):
                    is_single_subdata  = True
                    subdata_item_iter  = self_data[member_name]

                    # filename for the file containing the single subdata type entries:
                    submember_filename = current_member_filename

                else:
                    is_single_subdata  = False

                    # TODO: bad design, move import to better place:
                    from .multisubtype_base import MultisubtypeBaseFile

                multisubtype_ref_file_data = list()  # file names for ref types
                subdata_definitions = list()         # subdata member DataDefitions
                for subtype_name, submember_class in member_type.class_lookup.items():
                    # if we are in a subdata member, this for loop will only run through once.
                    # else, do the actions for each subtype

                    if not is_single_subdata:
                        dbg(lazymsg=lambda: "%s => entering multisubtype member %s" % (filename, subtype_name), lvl=3)

                        # iterate over the data for the current subtype
                        subdata_item_iter  = self_data[member_name][subtype_name]

                        # filename for the file containing one of the subtype data entries:
                        submember_filename = "%s/%s" % (filename, subtype_name)

                    submember_data = list()
                    for idx, submember_data_item in enumerate(subdata_item_iter):
                        if not isinstance(submember_data_item, Exportable):
                            raise Exception("tried to dump object not inheriting from Exportable")

                        # generate output filename for next-level files
                        nextlevel_filename = "%s/%04d" % (submember_filename, idx)

                        # recursive call, fetches DataDefinitions and the next-level data dict
                        data_sets, data = submember_data_item.dump(nextlevel_filename)

                        # store recursively generated DataDefinitions to the flat list
                        ret += data_sets

                        # append the next-level entry to the list
                        # that will contain the data for the current level DataDefinition
                        if len(data.keys()) > 0:
                            submember_data.append(data)

                    # always create a file, even with 0 entries.
                    if True:  # old: len(submember_data) > 0:
                        # create DataDefinition for the next-level data pile.
                        subdata_definition = DataDefinition(
                            submember_class,
                            submember_data,
                            submember_filename,
                        )

                        if not is_single_subdata:
                            # create entry for type file index.
                            # for each subtype, create entry in the subtype data file lookup file
                            # sync this with MultisubtypeBaseFile!
                            multisubtype_ref_file_data.append({
                                MultisubtypeBaseFile.data_format[0][1]: subtype_name,
                                MultisubtypeBaseFile.data_format[1][1]: "%s%s" % (
                                    subdata_definition.name_data_file, GeneratedFile.output_preferences["csv"]["file_suffix"]
                                ),
                            })

                        subdata_definitions.append(subdata_definition)
                    else:
                        pass

                    if not is_single_subdata:
                        dbg(lazymsg=lambda: "%s => leaving multisubtype member %s" % (filename, subtype_name), lvl=3)

                # store filename instead of data list
                # is used to determine the file to read next.
                # -> multisubtype members: type file index
                # -> subdata members:      filename of subdata
                self_data[member_name] = current_member_filename

                # for multisubtype members, append data definition for storing references to all the subtype files
                if not is_single_subdata and len(multisubtype_ref_file_data) > 0:

                    # this is the type file index.
                    multisubtype_ref_file = DataDefinition(
                        MultisubtypeBaseFile,
                        multisubtype_ref_file_data,
                        self_data[member_name],                          # create file to contain refs to subtype files
                    )

                    subdata_definitions.append(multisubtype_ref_file)

                # store all created submembers to the flat list
                ret += subdata_definitions

                dbg(lazymsg=lambda: "%s => leaving member %s" % (filename, member_name), lvl=3)

        # return flat list of DataDefinitions and dict of {member_name: member_value, ...}
        return ret, self_data
コード例 #42
0
ファイル: generated_file.py プロジェクト: CodyKelly/openage
    def add_snippet(self, snippet, inherit_typedefs=True):
        if not isinstance(snippet, ContentSnippet):
            raise Exception(
                "only ContentSnippets can be added to generated files, tried %s"
                % type(snippet))

        if not snippet.file_name == self.file_name and snippet.file_name != True:
            raise Exception(
                "only snippets with the same target file_name can be put into the same generated file."
            )

        if snippet not in (self.snippets):
            self.snippets.add(snippet)

            if inherit_typedefs:
                self.typedefs |= snippet.typedefs
                self.typerefs |= snippet.typerefs
            else:
                self.included_typedefs |= snippet.typedefs

            dbg(lazymsg=lambda: "adding snippet to %s:" % (repr(self)), lvl=2)
            dbg(lazymsg=lambda: " %s" % repr(snippet), lvl=2)
            dbg(lazymsg=lambda: " `- typedefs:  %s" % snippet.typedefs, lvl=3)
            dbg(lazymsg=lambda: " `- typerefs:  %s" % snippet.typerefs, lvl=3)
            dbg(lazymsg=lambda: " `- includes:  %s {" % repr(snippet.includes),
                push="snippet_add",
                lvl=3)

            #add all included snippets, namely HeaderSnippets for #include lol.h
            for s in snippet.includes:
                self.add_snippet(s, inherit_typedefs=False)

            dbg(pop="snippet_add", lazymsg=lambda: "}", lvl=3)
        else:
            dbg(lazymsg=lambda: "skipping already present snippet %s" %
                (repr(snippet)),
                lvl=2)
コード例 #43
0
    def gen_image(self, draw_text=True, squaresize=100):
        #writes this color table (palette) to a png image.

        from PIL import Image, ImageDraw

        imgside_length = math.ceil(math.sqrt(len(self.palette)))
        imgsize = imgside_length * squaresize

        dbg("generating palette image with size %dx%d" % (imgsize, imgsize))

        palette_image = Image.new('RGBA', (imgsize, imgsize),
                                  (255, 255, 255, 0))
        draw = ImageDraw.ImageDraw(palette_image)

        text_padlength = len(str(len(self.palette)))  #dirty, i know.
        text_format = "%0" + str(text_padlength) + "d"

        drawn = 0

        #squaresize 1 means draw single pixels
        if squaresize == 1:
            for y in range(imgside_length):
                for x in range(imgside_length):
                    if drawn < len(self.palette):
                        r, g, b = self.palette[drawn]
                        draw.point((x, y), fill=(r, g, b, 255))
                        drawn = drawn + 1

        #draw nice squares with given side length
        elif squaresize > 1:
            for y in range(imgside_length):
                for x in range(imgside_length):
                    if drawn < len(self.palette):
                        sx = x * squaresize - 1
                        sy = y * squaresize - 1
                        ex = sx + squaresize - 1
                        ey = sy + squaresize
                        r, g, b = self.palette[drawn]
                        vertices = [(sx, sy), (ex, sy), (ex, ey),
                                    (sx, ey)]  #(begin top-left, go clockwise)
                        draw.polygon(vertices, fill=(r, g, b, 255))

                        if draw_text and squaresize > 40:
                            #draw the color id

                            ctext = text_format % drawn  #insert current color id into string
                            tcolor = (255 - r, 255 - b, 255 - g, 255)

                            #draw the text  #TODO: use customsized font
                            draw.text((sx + 3, sy + 1),
                                      ctext,
                                      fill=tcolor,
                                      font=None)

                        drawn = drawn + 1

        else:
            raise Exception(
                "fak u, no negative values for the squaresize pls.")

        return palette_image
コード例 #44
0
ファイル: mediafile.py プロジェクト: ArseniyShestakov/openage
def media_convert(args):
    #assume to extract all files when nothing specified.
    if not args.extract:
        args.extract.append('*:*.*')

    extraction_rules = [ ExtractionRule(e) for e in args.extract ]

    #set path in utility class
    dbg("setting age2 input directory to " + args.srcdir, 1)
    util.set_read_dir(args.srcdir)

    drsfiles = {
        "graphics":  DRS("Data/graphics.drs"),
        "interface": DRS("Data/interfac.drs"),
        "sounds0":   DRS("Data/sounds.drs"),
        "sounds1":   DRS("Data/sounds_x1.drs"),
        "gamedata1": DRS("Data/gamedata_x1.drs"),
        "gamedata2": DRS("Data/gamedata_x1_p1.drs"),
        "terrain":   DRS("Data/terrain.drs")
    }

    #gamedata.drs does not exist in HD edition, but its contents are
    #in gamedata_x1.drs instead, so we can ignore this file if it doesn't exist
    if os.path.isfile(util.file_get_path("Data/gamedata.drs")):
        drsfiles["gamedata0"] = DRS("Data/gamedata.drs")

    #this is the ingame color palette file id, 256 color lookup for all graphics pixels
    palette_id = 50500
    palette = ColorTable(drsfiles["interface"].get_file_data('bin', palette_id))

    #metadata dumping output format, more to come?
    output_formats = ("csv",)

    termcolortable = ColorTable(termcolors.urxvtcoltable)
    #write mode is disabled by default, unless destdir is set

    #saving files is disabled by default
    write_enabled = False

    if args.output:
        from .slp import SLP

        write_enabled = True

        dbg("setting write dir to " + args.output, 1)
        util.set_write_dir(args.output)

        player_palette = PlayerColorTable(palette)

        if args.extrafiles:
            palette.save_visualization('info/colortable.pal.png')
            player_palette.save_visualization('info/playercolortable.pal.png')

        from . import blendomatic

        #HD Edition has a blendomatic_x1.dat in addition to its new blendomatic.dat
        #blendomatic_x1.dat is the same file as AoK:TC's blendomatic.dat, and TC does not have
        #blendomatic.dat, so we try _x1 first and fall back to the AoK:TC way if it does not exist
        blend_file = "Data/blendomatic_x1.dat"
        if not os.path.isfile(util.file_get_path(blend_file)):
            blend_file = "Data/blendomatic.dat"
        blend_data = blendomatic.Blendomatic(blend_file)
        blend_data.save(os.path.join(asset_folder, "blendomatic.dat/"), output_formats)

        from .stringresource import StringResource
        stringres = StringResource()

        #AoK:TC uses .DLL files for its string resources,
        #HD uses plaintext files
        if os.path.isfile(util.file_get_path("language.dll")):
            from .pefile import PEFile
            stringres.fill_from(PEFile("language.dll"))
            stringres.fill_from(PEFile("language_x1.dll"))
            stringres.fill_from(PEFile("language_x1_p1.dll"))
            #stringres.fill_from(PEFile("Games/Forgotten Empires/Data/language_x1_p1.dll"))
        else:
            from .hdlanguagefile import HDLanguageFile
            for lang in os.listdir(util.file_get_path("Bin")):
                langfile = "Bin/%s/%s-language.txt" % (lang, lang)

                #there is some "base language" files in HD that we don't need
                #and only the dir for the language that's currently in use contains a language file
                if os.path.isdir(util.file_get_path("Bin/%s" % (lang))) and os.path.isfile(util.file_get_path(langfile)):
                    stringres.fill_from(HDLanguageFile(langfile, lang))

        #TODO: transform and cleanup the read strings... (strip html, insert formatchars, ...)

        #create the dump for the dat file
        from .gamedata import empiresdat
        datfile_name = "empires2_x1_p1.dat"

        #try to use cached version?
        parse_empiresdat = False
        if args.use_dat_cache:
            dbg("trying to use cache file %s..." % (dat_cache_file), lvl=1)
            try:
                with open(dat_cache_file, "rb") as f:
                    gamedata = pickle.load(f)
                    dbg("could successfully load cached gamedata!", lvl=1)

            except FileNotFoundError as err:
                parse_empiresdat = True

        if not args.use_dat_cache or parse_empiresdat:
            datfile = empiresdat.EmpiresDatGzip("Data/%s" % datfile_name)
            gamedata = empiresdat.EmpiresDatWrapper()

            if args.extrafiles:
                datfile.raw_dump('raw/empires2x1p1.raw')

            dbg("reading main data file %s..." % (datfile_name), lvl=1)
            gamedata.read(datfile.content, 0)

            #store the datfile serialization for caching
            with open(dat_cache_file, "wb") as f:
                pickle.dump(gamedata, f)

        #modify the read contents of datfile
        dbg("repairing some values in main data file %s..." % (datfile_name), lvl=1)
        from . import fix_data
        gamedata.empiresdat[0] = fix_data.fix_data(gamedata.empiresdat[0])

        #dbg("transforming main data file %s..." % (datfile_name), lvl=1)
        #TODO: data transformation nao! (merge stuff, etcetc)

        dbg("formatting output data...", lvl=1)
        data_formatter = DataFormatter()

        #dump metadata information
        data_dump = list()
        data_dump += blend_data.dump("blending_modes")
        data_dump += player_palette.dump("player_palette_%d" % palette_id)
        data_dump += termcolortable.dump("termcolors")
        data_dump += stringres.dump("string_resources")
        data_formatter.add_data(data_dump)

        #dump gamedata datfile data
        gamedata_dump = gamedata.dump("gamedata")
        data_formatter.add_data(gamedata_dump[0], prefix="gamedata/")

        output_data = data_formatter.export(output_formats)

        #save the meta files
        dbg("saving output data files...", lvl=1)
        util.file_write_multi(output_data, file_prefix=asset_folder)

    file_list = defaultdict(lambda: list())
    media_files_extracted = 0

    sound_list = filelist.SoundList()

    #iterate over all available files in the drs, check whether they should be extracted
    for drsname, drsfile in drsfiles.items():
        for file_extension, file_id in drsfile.files:
            if not any(er.matches(drsname, file_id, file_extension) for er in extraction_rules):
                continue

            #append this file to the list result
            if args.list_files:
                file_list[file_id].append((drsfile.fname, file_extension))
                continue

            #generate output filename where data will be stored in
            if write_enabled:
                fbase = os.path.join(asset_folder, drsfile.fname, str(file_id))
                fname = "%s.%s" % (fbase, file_extension)

                dbg("Extracting to %s..." % (fname), 2)
                file_data = drsfile.get_file_data(file_extension, file_id)
            else:
                continue

            if file_extension == 'slp':
                s = SLP(file_data)
                out_file_tmp = "%s: %d.%s" % (drsname, file_id, file_extension)

                dbg("%s -> %s -> generating atlas" % (out_file_tmp, fname), 1)

                #create exportable texture from the slp
                texture = Texture(s, palette)

                # the hotspots of terrain textures have to be fixed:
                if drsname == "terrain":
                    for entry in texture.image_metadata:
                        entry["cx"] = 48
                        entry["cy"] = 24

                #save the image and the corresponding metadata file
                texture.save(fname, output_formats)

            elif file_extension == 'wav':
                sound_filename = fname

                wav_output_file = util.file_get_path(fname, write=True)
                util.file_write(wav_output_file, file_data)

                if not args.no_opus:
                    file_extension   = "opus"
                    sound_filename   = "%s.%s" % (fbase, file_extension)
                    opus_output_file = util.file_get_path(sound_filename, write=True)

                    #opusenc invokation (TODO: ffmpeg?)
                    opus_convert_call = ['opusenc', wav_output_file, opus_output_file]
                    dbg("opus convert: %s -> %s ..." % (fname, sound_filename), 1)

                    oc = subprocess.Popen(opus_convert_call, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                    oc_out, oc_err = oc.communicate()

                    if ifdbg(2):
                        oc_out = oc_out.decode("utf-8")
                        oc_err = oc_err.decode("utf-8")

                        dbg(oc_out + "\n" + oc_err, 2)

                    #remove original wave file
                    os.remove(wav_output_file)

                #TODO: this is redundant here, but we need to strip the assets/ part..
                filelist_fname = "%s.%s" % (os.path.join(drsfile.fname, str(file_id)), file_extension)
                sound_list.add_sound(file_id, filelist_fname, file_extension)

            else:
                #format does not require conversion, store it as plain blob
                output_file = util.file_get_path(fname, write=True)
                util.file_write(output_file, file_data)

            media_files_extracted += 1

    if write_enabled:
        sound_formatter = DataFormatter()
        sound_formatter.add_data(sound_list.dump())
        util.file_write_multi(sound_formatter.export(output_formats), file_prefix=asset_folder)

        dbg("media files extracted: %d" % (media_files_extracted), 0)

    #was a file listing requested?
    if args.list_files:
        for idx, f in file_list.items():
            print("%d = [ %s ]" % (idx, ", ".join(
                "%s/%d.%s" % (file_name, idx, file_extension) for file_name, file_extension in f)))
コード例 #45
0
ファイル: generated_file.py プロジェクト: Dotile/openage
    def add_snippet(self, snippet, inherit_typedefs=True):
        if not isinstance(snippet, ContentSnippet):
            raise Exception("only ContentSnippets can be added to generated files, tried %s" % type(snippet))

        if not snippet.file_name == self.file_name and not snippet.file_name:
            raise Exception("only snippets with the same target file_name can be put into the same generated file.")

        if snippet not in (self.snippets):
            self.snippets.add(snippet)

            if inherit_typedefs:
                self.typedefs |= snippet.typedefs
                self.typerefs |= snippet.typerefs
            else:
                self.included_typedefs |= snippet.typedefs

            dbg(lazymsg=lambda: "adding snippet to %s:" % (repr(self)), lvl=2)
            dbg(lazymsg=lambda: " %s"                   % repr(snippet), lvl=2)
            dbg(lazymsg=lambda: " `- typedefs:  %s"     % snippet.typedefs, lvl=3)
            dbg(lazymsg=lambda: " `- typerefs:  %s"     % snippet.typerefs, lvl=3)
            dbg(lazymsg=lambda: " `- includes:  %s {"   % repr(snippet.includes), push="snippet_add", lvl=3)

            # add all included snippets, namely HeaderSnippets for #include lol.h
            for s in snippet.includes:
                self.add_snippet(s, inherit_typedefs=False)

            dbg(pop="snippet_add", lazymsg=lambda: "}", lvl=3)
        else:
            dbg(lazymsg=lambda: "skipping already present snippet %s" % (repr(snippet)), lvl=2)
コード例 #46
0
ファイル: drs.py プロジェクト: MrBeardy/openage
    def __init__(self, fname, name):
        # queried from the outside:
        self.fname = fname
        self.name = name

        # (extension, id): (data offset, size)
        self.files = dict()

        f = open(fname, "rb")

        # read header
        buf = f.read(DRS.drs_header.size)
        self.header = DRS.drs_header.unpack(buf)

        dbg("DRS header [%s]" % (name), 1, push="drs")
        dbg("copyright:          %s" % util.zstr(self.header[0]))
        dbg("version:            %s" % util.zstr(self.header[1]))
        dbg("ftype:              %s" % util.zstr(self.header[2]))
        dbg("table count:        %d" % (self.header[3]))
        dbg("file offset:        %d" % (self.header[4]))
        dbg("")

        # read table info
        table_count = self.header[3]

        table_header_buf = f.read(table_count * DRS.drs_table_info.size)
        for i in range(table_count):
            table_header = DRS.drs_table_info.unpack_from(table_header_buf, i * DRS.drs_table_info.size)

            file_type, file_extension, file_info_offset, file_count = table_header

            # flip the extension... it's stored that way...
            file_extension = file_extension.decode("latin-1").lower()[::-1]

            dbg("Table header [%d]" % i, 2, push="table")
            dbg("file type:        0x%s" % hexlify(file_type).decode("utf-8"))
            dbg("file extension:   %s" % (file_extension))
            dbg("file_info_offset: %#08x" % (file_info_offset))
            dbg("file_count:       %d" % file_count)
            dbg("")

            f.seek(file_info_offset)
            file_info_buf = f.read(file_count * DRS.drs_file_info.size)

            for j in range(file_count):
                file_header = DRS.drs_file_info.unpack_from(file_info_buf, j * DRS.drs_file_info.size)
                file_id, file_data_offset, file_size = file_header

                dbg("File info header [%d]" % j, 3, push="fileinfo")
                dbg("file id:        %d" % (file_id))
                dbg("data offset:    %d" % (file_data_offset))
                dbg("file size:      %d" % (file_size))
                dbg("")

                self.files[(file_extension, file_id)] = (file_data_offset, file_size)

                dbg(pop="fileinfo")

            dbg(pop="table")

        self.f = f

        dbg(pop="drs")
コード例 #47
0
    def __init__(self, fname):
        self.defaultcultureid = 0
        self.defaultcp = 0
        self.todefaultcharsetcd = 0
        self.fromdefaultcharsetcd = 0

        with open(fname, "rb") as f:
            data = f.read()

        # read DOS header
        dosheader = image_dos_header.unpack_from(data, 0)

        dbg("PE file [%s]" % (fname), 1)

        # read PE header (e_lfanew)
        headerpos = dosheader[30]

        peheader = image_file_header.unpack_from(data, headerpos)

        signature, machine, number_of_sections, time_stamp, symbol_table_ptr,\
            symbol_count, size_of_optional_header, characteristics = peheader

        if signature != PE_SIGNATURE:
            raise Exception("Invalid PE signature")

        dbg("DOS header: " + repr(peheader), push="pe")

        # read optional header
        optheaderpos = headerpos + image_file_header.size

        if size_of_optional_header != SUPPORTED_OPTIONAL_HEADER_SIZE:
            raise Exception("Unsupported optional header size")

        optheader = image_optional_header32.unpack_from(data, optheaderpos)
        if optheader[0] != IMAGE_OPTIONAL_HDR32_MAGIC:
            raise Exception("Bad magic number for optional header")
        number_of_rva_and_sizes = optheader[-1]

        dbg("Optional header: " + repr(optheader))

        # read data directory
        datadirectorypos = optheaderpos + image_optional_header32.size
        datadirectory = []
        for i in range(number_of_rva_and_sizes):
            entry = image_data_directory.unpack_from(
                data, datadirectorypos + i * image_data_directory.size
            )
            datadirectory.append(entry)

        # read section headers
        secttablepos = datadirectorypos +\
            (number_of_rva_and_sizes * image_data_directory.size)

        # number of sections is known from PE header
        sections = {}
        dbg("sections", 3, push="sections")
        for i in range(number_of_sections):
            sectionheader = image_section_header.unpack_from(
                data, secttablepos + image_section_header.size * i
            )
            sectionname = sectionheader[0].decode('ascii').strip('\0')
            sectionheader = sectionheader[1:]

            dbg("%s: %s" % (
                sectionname,
                (", ".join(["%#x" % i for i in sectionheader]))
            ))

            # read section data
            virtual_size, virtual_address, size_of_raw_data,\
                pointer_to_raw_data, pointer_to_relocations,\
                pointer_to_linenumbers, number_of_relocations,\
                number_of_linenumbers, characteristics = sectionheader

            rawdata = data[pointer_to_raw_data:][:virtual_size]

            sections[sectionname] = sectionheader, rawdata
        dbg(pop="sections")

        ressectionheader, self.rsrcdata = sections[SECTION_NAME_RESOURCE]
        self.resdatava = ressectionheader[1]

        self.data = data
        self.dosheader = dosheader
        self.peheader = peheader
        self.optheader = optheader
        self.datadirectory = datadirectory
        self.sections = sections

        self.rootnode = self.read_rsrc_tree(0)
        self.strings = self.parse_rsrc_strings()

        for lang, strs in self.strings.items():
            dbg("%s: %d resource strings" % (lang, len(strs)))

        dbg(pop="pe")
コード例 #48
0
ファイル: texture.py プロジェクト: MrBeardy/openage
def merge_frames(frames, max_width=0, max_height=0):
    """
    merge all given frames of this slp to a single image file.

    frames = [TextureImage, ...]

    returns = TextureImage, [drawn_frames_meta]
    """

    import numpy

    # TODO: actually optimize free space on the texture.
    # if you ever wanted to implement a combinatoric optimisation
    # algorithm, go for it, this function just waits for you.
    # https://en.wikipedia.org/wiki/Bin_packing_problem
    #
    # for now, using max values for solving bin packing problem
    # after determining the maximum frame width and height,
    # each frame will be placed in a grid with fields of these sizes.
    # this wastes storage, but works. If your blood boils now because you
    # wanna optimize for the best alignment, read the above notice again,
    # and implement a better version.

    if len(frames) == 0:
        raise Exception("cannot create texture with empty input frame list")

    # single-frame texture, no merging needed
    elif len(frames) == 1:
        cx, cy = frames[0].hotspot
        w, h = frames[0].width, frames[0].height
        return frames[0], (w, h), [subtexture_meta(0, 0, w, h, cx, cy)]

    # if not predefined, get maximum frame size by checking all frames
    if max_width == 0 or max_height == 0:
        max_width = max([teximg.width for teximg in frames])
        max_height = max([teximg.height for teximg in frames])

    max_per_row = math.ceil(math.sqrt(len(frames)))
    num_rows = math.ceil(len(frames) / max_per_row)

    # we leave 1 pixel free in between two sprites
    free_space_px = 1
    width = (max_width + free_space_px) * max_per_row
    height = (max_height + free_space_px + 1) * num_rows

    dbg(
        "merging %d frames to %dx%d atlas, %d pics per row, %d rows."
        % (len(frames), width, height, max_per_row, num_rows),
        2,
    )

    # resulting draw pane
    draw_data = list()
    for _ in range(height):
        row_data = list()
        for _ in range(width):
            row_data.append((0, 0, 0, 0))
        draw_data.append(row_data)

    pos_x = 0
    pos_y = 0

    drawn_frames_meta = list()
    drawn_current_row = 0

    for sub_frame in frames:
        sub_w = sub_frame.width
        sub_h = sub_frame.height

        dbg("drawing frame %03d on atlas at %d x %d..." % (len(drawn_frames_meta), pos_x, pos_y), 3)

        for y, row_data in enumerate(sub_frame.data):
            for x, pixel_data in enumerate(row_data):
                draw_data[y + pos_y][x + pos_x] = pixel_data

                # print(pixel_data)

        # generate subtexture meta information object
        hotspot_x, hotspot_y = sub_frame.hotspot
        drawn_frames_meta.append(subtexture_meta(pos_x, pos_y, sub_w, sub_h, hotspot_x, hotspot_y))

        drawn_current_row += 1

        # place the subtexture with a 1px border
        pos_x += max_width + free_space_px

        # see if we have to start a new row now
        if drawn_current_row > max_per_row - 1:
            drawn_current_row = 0
            pos_x = 0
            pos_y += max_height + free_space_px

    atlas_data = numpy.array(draw_data, dtype=numpy.uint8)
    atlas = TextureImage(atlas_data)

    dbg("successfully merged %d frames to atlas." % (len(frames)), 2)

    return atlas, (width, height), drawn_frames_meta
コード例 #49
0
ファイル: __main__.py プロジェクト: ArseniyShestakov/openage
def main():
    """
    this codegen script auto-generates sourcefiles in the cpp/ subtree,
    and is designed for usage by the build system
    (see buildsystem/codegen.cmake).

    invocation synopsis:

    python3 -m openage.codegen

        (mandatory)

        --target-cache=filea
        --depend-cache=fileb
        --cpp-src-dir=dira

        (commands, optional)

        --write-to-sourcedir
        --touch-file-on-cache-change=CMakeLists.txt
        --force-rerun-on-targetcache-change
        --clean

    all file and directory names SHOULD be absolute paths.
    this is not enforced, but relative paths may violate assumptions made by
    codegen.cmake.

    for each invocation, all code generation is performed, and the generated
    sourcefiles are stored in an internal dict.

    in addition, text data is written to the specified cache files:

    - a list of all generated files (targets) to target-cache
    - a list of all loaded python module files to depend-cache

    depending on the specified invocation commands,

    - generated sources are written to the source dir
    - generated sources are cleaned
    - cmake re-builds are triggered if a cache has changed
    """

    # parse arguments

    ap = argparse.ArgumentParser(
        description=("generates c++ code within the source tree. "
                     "designed to be used by [buildsystem/codegen.cmake]"),
        epilog=("all file and directory names should be absolute; otherwise, "
                "assumptions made by this script or the cmake script might "
                "not be fulfilled"))

    ap.add_argument("--target-cache", required=True,
                    help=("filename for target cache. a list of all generated "
                          "sources is written there for every invocation. if "
                          "the list changes, --touch-file-on-cache-change and "
                          "--force-rerun-on-targetcache-change trigger cmake "
                          "re-runs"))
    ap.add_argument("--depend-cache", required=True,
                    help=("filename for dependency cache. a list of all "
                          "python files and other resources that were used "
                          "during source generation. if the list changes, "
                          "--touch-file-on-cache-change will trigger cmake "
                          "re-runs"))
    ap.add_argument("--cpp-src-dir", required=True,
                    help=("c++ source directory; used to determine generated "
                          "file names"))
    ap.add_argument("--write-to-sourcedir", action='store_true',
                    help=("causes the sources to be actually written to "
                          "cpp-src-dir. otherwise, a dry run is performed. "
                          "even during a dry run, all code generation is "
                          "performed in order to determine generation targets "
                          "and dependencies."))
    ap.add_argument("--clean", action='store_true',
                    help=("all generated files are deleted from the source "
                          "directory"))
    ap.add_argument("--touch-file-on-cache-change",
                    help=("the file passed here will be touched if one of the"
                          "caches changes. designed for use with a "
                          "CMakeLists.txt file, to trigger cmake re-runs"))
    ap.add_argument("--force-rerun-on-targetcache-change", action='store_true',
                    help=("a bit more drastic than --touch-file-on-change, "
                          "this causes codegen to abort with an error message "
                          "if the target cache has changed."))

    ap.add_argument("--verbose", "-v", action='count', default=0)

    args = ap.parse_args()

    # process and validate arguments
    if not args.verbose and 'VERBOSE' in os.environ:
        try:
            args.verbose = int(os.environ['VERBOSE'])
        except:
            args.verbose = 2

    log.set_verbosity(args.verbose)

    file_to_touch = args.touch_file_on_cache_change
    if file_to_touch and not os.path.isfile(file_to_touch):
            ap.error("file doesn't exist: %s" % file_to_touch)

    cache_actions_requested = (file_to_touch or
                               args.force_rerun_on_targetcache_change)

    old_target_cache = set()
    try:
        with open(args.target_cache) as f:
            for target in f:
                old_target_cache.add(target.strip())
    except:
        if cache_actions_requested:
            log.dbg("warning: cache actions were requested, " +
                "but the target cache could not be read!", 0)

    old_depend_cache = set()
    try:
        with open(args.depend_cache) as f:
            for depend in f:
                old_depend_cache.add(depend.strip())
    except:
        if cache_actions_requested:
            log.dbg("warning: cache actions were requested, " +
                "but the depends cache could not be read!", 0)

    cpp_src_dir = args.cpp_src_dir
    if not os.path.isdir(cpp_src_dir):
        ap.error("not a directory: %s" % cpp_src_dir)

    # arguments are OK.

    # generate sources
    generated_files = {}
    from . import codegen
    for absfilename, filename, content in codegen.generate_all(cpp_src_dir):
        generated_files[absfilename] = filename, content

    # calculate dependencies (all used python modules)
    new_depend_cache = set()
    depend_cache_file = open(args.depend_cache, 'w')
    for depend in codegen.get_depends():
        depend_cache_file.write("%s\n" % depend)
        new_depend_cache.add(depend)

    # calculate targets
    new_target_cache = set()
    target_cache_file = open(args.target_cache, 'w')
    for filename in generated_files:
        target_cache_file.write(filename)
        target_cache_file.write('\n')
        new_target_cache.add(filename)

    # check whether the cache has changed
    def print_set_difference(fun, old, new):
        if old:
            if old - new:
                fun("removed:\n\t%s" % "\n\t".join(old - new))
            if new - old:
                fun("added:\n\t%s" % "\n\t".join(new - old))
        else:
            fun("\n\t".join(new))

    depend_cache_changed = False
    if old_depend_cache != new_depend_cache:
        depend_cache_changed = True
        log.dbg("codegen dependencies:", 1)
        print_set_difference(lambda s: log.dbg(s, 1),
                             old_depend_cache, new_depend_cache)

    target_cache_changed = False
    if old_target_cache != new_target_cache:
        target_cache_changed = True
        log.dbg("codegen target sources:", 1)
        print_set_difference(lambda s: log.dbg(s, 1),
                             old_target_cache, new_target_cache)

    if file_to_touch and (depend_cache_changed or target_cache_changed):
        try:
            os.utime(file_to_touch)
        except:
            log.dbg("warning: couldn't update the timestamp for %s"
                % file_to_touch, 0)

    if target_cache_changed and args.force_rerun_on_targetcache_change:
        print("""\n\n\n\
The list of generated sourcefiles has changed.
A build update has been triggered; you need to build again.
\n\n\n""")
        # fail
        exit(1)

    # clean sourcedir
    if args.clean:
        for absfilename, (filename, content) in generated_files.items():
            if os.path.isfile(absfilename):
                log.dbg("cleaning file: %s" % filename, 0)
                os.unlink(absfilename)

    # write generated files to sourcedir
    if args.write_to_sourcedir:
        for absfilename, (filename, content) in generated_files.items():
            if os.path.isfile(absfilename):
                with open(absfilename) as f:
                    if f.read() == content:
                        log.dbg("file unchanged: %s" % filename, 1)
                        continue

            log.dbg("generating file: %s" % filename, 0)
            with open(absfilename, 'w') as f:
                f.write(content)
コード例 #50
0
    def __init__(self, idx, data_file, tile_count, header):
        """
        initialize one blending mode,
        consisting of multiple frames for all blending directions

        the bitmasks were used to decide whether this pixel has
        to be used for calculations.

        the alphamask is used to determine the alpha amount for blending.
        """

        #should be 2353 -> number of pixels (single alpha byte values)
        self.pxcount = header[0]
        #tile_flags = header[1:]  #TODO what do they do?

        dbg("tiles in blending mode %d have %d pixels" % (idx, self.pxcount), 2)

        #as we draw in isometric tile format, this is the row count
        row_count = int(math.sqrt(self.pxcount)) + 1  #should be 49

        #alpha_masks_raw is an array of bytes that will draw 32 images,
        #which are bit masks.
        #
        #one of these masks also has 2353 pixels
        #the storage of the bit masks is 4*tilesize, here's why:
        #
        #4 * 8bit * 2353 pixels = 75296 bitpixels
        #==> 75296/(32 images) = 2353 bitpixel/image
        #
        #this means if we interprete the 75296 bitpixels as 32 images,
        #each of these images gets 2353 bit as data.
        #TODO: why 32 images? isn't that depending on tile_count?

        bitmask_buf_size = self.pxcount * 4
        dbg("reading 1bit masks -> %d bytes" % (bitmask_buf_size), 2)
        alpha_masks_buf = data_file.read(bitmask_buf_size)
        alpha_masks_raw = unpack_from("%dB" % (bitmask_buf_size), alpha_masks_buf)

        #list of alpha-mask tiles
        self.alphamasks = list()

        dbg("reading 8bit masks for %d tiles -> %d bytes" % (tile_count, self.pxcount * tile_count), 2)

        #draw mask tiles for this blending mode
        for j in range(tile_count):
            tile_buf = data_file.read(self.pxcount)
            pixels   = unpack_from("%dB" % self.pxcount, tile_buf)
            self.alphamasks.append(self.get_tile_from_data(row_count, pixels))

        bitvalues = list()
        for i in alpha_masks_raw:
            for b_id in range(7, -1, -1):
                #bitmask from 0b00000001 to 0b10000000
                bit_mask = 2 ** b_id
                bit      = i & bit_mask
                bitvalues.append(bit)

        #list of bit-mask tiles
        self.bitmasks = list()

        #TODO: is 32 really hardcoded?
        for i in range(32):
            data_begin =  i    * self.pxcount
            data_end   = (i+1) * self.pxcount
            pixels = bitvalues[ data_begin : data_end ]

            self.bitmasks.append(self.get_tile_from_data(row_count, pixels))
コード例 #51
0
ファイル: generated_file.py プロジェクト: CodyKelly/openage
    def generate(self):
        """
        actually generate the content for this file.
        """

        #TODO: create new snippets for resolving cyclic dependencies (forward declarations)

        dbg(push="generation", lvl=2)

        dbg(lazymsg=lambda: "".join((
            "\n=========== generating %s\n" % (repr(self)),
            "content snippets stored to be inserted:\n",
            pprint.pformat(self.snippets),
            "\n-----------",
        )),
            lvl=3)

        #apply preference overrides
        prefs = self.default_preferences.copy()
        prefs.update(self.output_preferences[self.format])

        snippets_header = {
            s
            for s in self.snippets
            if s.section == ContentSnippet.section_header
        }
        snippets_body = self.snippets - snippets_header

        if len(snippets_body) == 0:
            raise Exception("generated file %s has no body snippets!" %
                            (repr(self)))

        #type references in this file that could not be resolved
        missing_types = set()

        #put snippets into list in correct order
        #snippets will be written according to this [(snippet, prio), ...] list
        snippets_priorized = list()

        #determine each snippet's priority by number of type references and definitions
        #smaller prio means written earlier in the file.
        #also, find snippet dependencies
        dbg("assigning snippet priorities:", push="snippetprio", lvl=4)
        for s in sorted(snippets_body):
            snippet_prio = len(s.typerefs) - len(s.typedefs)
            snippets_priorized.append((s, snippet_prio))
            dbg(lazymsg=lambda: "prio %3.d => %s" % (snippet_prio, repr(s)),
                lvl=4)

            #let each snippet find others as dependencies
            missing_types |= s.add_required_snippets(self.snippets)

        dbg(pop="snippetprio")

        if len(missing_types) > 0:
            raise Exception("missing types for %s:\n%s" %
                            (repr(self), pprint.pformat(missing_types)))

        #sort snippets according to their priority determined above
        snippets_priorized_sorted = sorted(snippets_priorized,
                                           key=lambda s: s[1])

        #create list of snippets to be put in the generated file.
        #[(snippet, prio)]
        snippets_body_sorted = list()
        snippets_body_set = set()

        #fetch list of all required snippets for all snippets to put in the file
        for snippet, prio in snippets_priorized_sorted:
            snippet_candidates = snippet.get_required_snippets()

            dbg(lazymsg=lambda: "required dependency snippet candidates: %s" %
                (pprint.pformat(snippet_candidates)),
                lvl=3)
            for s in snippet_candidates:
                if s.section == ContentSnippet.section_header:
                    if s not in snippets_header:
                        dbg(lazymsg=lambda: " `-> ADD  header snippet %s" %
                            (repr(s)),
                            lvl=4)
                        snippets_header.add(s)
                        continue

                elif s.section == ContentSnippet.section_body:
                    if s not in snippets_body_set:
                        snippets_body_sorted.append(s)
                        snippets_body_set.add(s)
                        dbg(lazymsg=lambda: " `-> ADD  body snippet %s" %
                            (repr(s)),
                            lvl=4)
                        continue

                dbg(lazymsg=lambda: " `-> SKIP snippet %s" % (repr(s)), lvl=4)

        #these snippets will be written outside the namespace
        #in the #include section
        snippets_header_sorted = sorted(snippets_header,
                                        key=lambda h:
                                        (not h.is_global, h.name))

        dbg(lazymsg=lambda: "".join((
            "\n-----------\n",
            "snippets after ordering for %s:\n" % (repr(self)),
            pprint.pformat(snippets_header_sorted + snippets_body_sorted),
            "\n===========",
        )),
            lvl=3)

        #merge file contents
        header_data = "".join(header.get_data()
                              for header in snippets_header_sorted)
        file_data = "\n".join(snippet.get_data()
                              for snippet in snippets_body_sorted)

        namespace = self.namespace
        header_guard = "".join(
            (namespace.upper(), "_", self.file_name.replace("/", "_").upper()))

        #fill file header and footer with the generated file_name
        content_prefix = prefs["content_prefix"].substitute(
            header_guard=header_guard,
            namespace=namespace,
            headers=header_data)
        content_suffix = prefs["content_suffix"].substitute(
            header_guard=header_guard, namespace=namespace)

        #this is the final file content
        file_data = "".join((content_prefix, file_data, content_suffix))

        #determine output file name
        output_file_name_parts = [
            prefs["folder"],
            "%s%s" % (self.file_name, prefs["file_suffix"])
        ]

        dbg(pop="generation")

        #whee, return the (file_name, content)
        return (os.path.join(*output_file_name_parts), file_data)
コード例 #52
0
ファイル: mediafile.py プロジェクト: CodyKelly/openage
def media_convert(args):
    #assume to extract all files when nothing specified.
    if not args.extract:
        args.extract.append('*:*.*')

    extraction_rules = [ExtractionRule(e) for e in args.extract]

    #set path in utility class
    dbg("setting age2 input directory to " + args.srcdir, 1)
    util.set_read_dir(args.srcdir)

    drsfiles = {
        "graphics": DRS("Data/graphics.drs"),
        "interface": DRS("Data/interfac.drs"),
        "sounds0": DRS("Data/sounds.drs"),
        "sounds1": DRS("Data/sounds_x1.drs"),
        "gamedata1": DRS("Data/gamedata_x1.drs"),
        "gamedata2": DRS("Data/gamedata_x1_p1.drs"),
        "terrain": DRS("Data/terrain.drs")
    }

    #gamedata.drs does not exist in HD edition, but its contents are
    #in gamedata_x1.drs instead, so we can ignore this file if it doesn't exist
    if os.path.isfile(util.file_get_path("Data/gamedata.drs")):
        drsfiles["gamedata0"] = DRS("Data/gamedata.drs")

    #this is the ingame color palette file id, 256 color lookup for all graphics pixels
    palette_id = 50500
    palette = ColorTable(drsfiles["interface"].get_file_data(
        'bin', palette_id))

    #metadata dumping output format, more to come?
    output_formats = ("csv", )

    termcolortable = ColorTable(termcolors.urxvtcoltable)
    #write mode is disabled by default, unless destdir is set

    #saving files is disabled by default
    write_enabled = False

    if args.output:
        from .slp import SLP

        write_enabled = True

        dbg("setting write dir to " + args.output, 1)
        util.set_write_dir(args.output)

        player_palette = PlayerColorTable(palette)

        if args.extrafiles:
            palette.save_visualization('info/colortable.pal.png')
            player_palette.save_visualization('info/playercolortable.pal.png')

        from . import blendomatic

        #HD Edition has a blendomatic_x1.dat in addition to its new blendomatic.dat
        #blendomatic_x1.dat is the same file as AoK:TC's blendomatic.dat, and TC does not have
        #blendomatic.dat, so we try _x1 first and fall back to the AoK:TC way if it does not exist
        blend_file = "Data/blendomatic_x1.dat"
        if not os.path.isfile(util.file_get_path(blend_file)):
            blend_file = "Data/blendomatic.dat"
        blend_data = blendomatic.Blendomatic(blend_file)
        blend_data.save(os.path.join(asset_folder, "blendomatic.dat/"),
                        output_formats)

        from .stringresource import StringResource
        stringres = StringResource()

        #AoK:TC uses .DLL files for its string resources,
        #HD uses plaintext files
        if os.path.isfile(util.file_get_path("language.dll")):
            from .pefile import PEFile
            stringres.fill_from(PEFile("language.dll"))
            stringres.fill_from(PEFile("language_x1.dll"))
            stringres.fill_from(PEFile("language_x1_p1.dll"))
            #stringres.fill_from(PEFile("Games/Forgotten Empires/Data/language_x1_p1.dll"))
        else:
            from .hdlanguagefile import HDLanguageFile
            for lang in os.listdir(util.file_get_path("Bin")):
                langfile = "Bin/%s/%s-language.txt" % (lang, lang)

                #there is some "base language" files in HD that we don't need
                #and only the dir for the language that's currently in use contains a language file
                if os.path.isdir(util.file_get_path(
                        "Bin/%s" %
                    (lang))) and os.path.isfile(util.file_get_path(langfile)):
                    stringres.fill_from(HDLanguageFile(langfile, lang))

        #TODO: transform and cleanup the read strings... (strip html, insert formatchars, ...)

        #create the dump for the dat file
        from .gamedata import empiresdat
        datfile_name = "empires2_x1_p1.dat"

        #try to use cached version?
        parse_empiresdat = False
        if args.use_dat_cache:
            dbg("trying to use cache file %s..." % (dat_cache_file), lvl=1)
            try:
                with open(dat_cache_file, "rb") as f:
                    gamedata = pickle.load(f)
                    dbg("could successfully load cached gamedata!", lvl=1)

            except FileNotFoundError as err:
                parse_empiresdat = True

        if not args.use_dat_cache or parse_empiresdat:
            datfile = empiresdat.EmpiresDatGzip("Data/%s" % datfile_name)
            gamedata = empiresdat.EmpiresDatWrapper()

            if args.extrafiles:
                datfile.raw_dump('raw/empires2x1p1.raw')

            dbg("reading main data file %s..." % (datfile_name), lvl=1)
            gamedata.read(datfile.content, 0)

            #store the datfile serialization for caching
            with open(dat_cache_file, "wb") as f:
                pickle.dump(gamedata, f)

        #modify the read contents of datfile
        dbg("repairing some values in main data file %s..." % (datfile_name),
            lvl=1)
        from . import fix_data
        gamedata.empiresdat[0] = fix_data.fix_data(gamedata.empiresdat[0])

        #dbg("transforming main data file %s..." % (datfile_name), lvl=1)
        #TODO: data transformation nao! (merge stuff, etcetc)

        dbg("formatting output data...", lvl=1)
        data_formatter = DataFormatter()

        #dump metadata information
        data_dump = list()
        data_dump += blend_data.dump("blending_modes")
        data_dump += player_palette.dump("player_palette_%d" % palette_id)
        data_dump += termcolortable.dump("termcolors")
        data_dump += stringres.dump("string_resources")
        data_formatter.add_data(data_dump)

        #dump gamedata datfile data
        gamedata_dump = gamedata.dump("gamedata")
        data_formatter.add_data(gamedata_dump[0], prefix="gamedata/")

        output_data = data_formatter.export(output_formats)

        #save the meta files
        dbg("saving output data files...", lvl=1)
        util.file_write_multi(output_data, file_prefix=asset_folder)

    file_list = defaultdict(lambda: list())
    media_files_extracted = 0

    sound_list = filelist.SoundList()

    #iterate over all available files in the drs, check whether they should be extracted
    for drsname, drsfile in drsfiles.items():
        for file_extension, file_id in drsfile.files:
            if not any(
                    er.matches(drsname, file_id, file_extension)
                    for er in extraction_rules):
                continue

            #append this file to the list result
            if args.list_files:
                file_list[file_id].append((drsfile.fname, file_extension))
                continue

            #generate output filename where data will be stored in
            if write_enabled:
                fbase = os.path.join(asset_folder, drsfile.fname, str(file_id))
                fname = "%s.%s" % (fbase, file_extension)

                dbg("Extracting to %s..." % (fname), 2)
                file_data = drsfile.get_file_data(file_extension, file_id)
            else:
                continue

            if file_extension == 'slp':
                s = SLP(file_data)
                out_file_tmp = "%s: %d.%s" % (drsname, file_id, file_extension)

                dbg("%s -> %s -> generating atlas" % (out_file_tmp, fname), 1)

                #create exportable texture from the slp
                texture = Texture(s, palette)

                # the hotspots of terrain textures have to be fixed:
                if drsname == "terrain":
                    for entry in texture.image_metadata:
                        entry["cx"] = 48
                        entry["cy"] = 24

                #save the image and the corresponding metadata file
                texture.save(fname, output_formats)

            elif file_extension == 'wav':
                sound_filename = fname

                wav_output_file = util.file_get_path(fname, write=True)
                util.file_write(wav_output_file, file_data)

                if not args.no_opus:
                    file_extension = "opus"
                    sound_filename = "%s.%s" % (fbase, file_extension)
                    opus_output_file = util.file_get_path(sound_filename,
                                                          write=True)

                    #opusenc invokation (TODO: ffmpeg?)
                    opus_convert_call = [
                        'opusenc', wav_output_file, opus_output_file
                    ]
                    dbg("opus convert: %s -> %s ..." % (fname, sound_filename),
                        1)

                    oc = subprocess.Popen(opus_convert_call,
                                          stdout=subprocess.PIPE,
                                          stderr=subprocess.PIPE)
                    oc_out, oc_err = oc.communicate()

                    if ifdbg(2):
                        oc_out = oc_out.decode("utf-8")
                        oc_err = oc_err.decode("utf-8")

                        dbg(oc_out + "\n" + oc_err, 2)

                    #remove original wave file
                    os.remove(wav_output_file)

                #TODO: this is redundant here, but we need to strip the assets/ part..
                filelist_fname = "%s.%s" % (os.path.join(
                    drsfile.fname, str(file_id)), file_extension)
                sound_list.add_sound(file_id, filelist_fname, file_extension)

            else:
                #format does not require conversion, store it as plain blob
                output_file = util.file_get_path(fname, write=True)
                util.file_write(output_file, file_data)

            media_files_extracted += 1

    if write_enabled:
        sound_formatter = DataFormatter()
        sound_formatter.add_data(sound_list.dump())
        util.file_write_multi(sound_formatter.export(output_formats),
                              file_prefix=asset_folder)

        dbg("media files extracted: %d" % (media_files_extracted), 0)

    #was a file listing requested?
    if args.list_files:
        for idx, f in file_list.items():
            print("%d = [ %s ]" %
                  (idx, ", ".join("%s/%d.%s" % (file_name, idx, file_extension)
                                  for file_name, file_extension in f)))
コード例 #53
0
    def __init__(self, target):
        """
        create a struct definition from an Exportable
        """

        dbg("generating struct definition from %s" % (repr(target)), lvl=3)

        self.name_struct_file = target.name_struct_file  # !< name of file where generated struct will be placed
        self.name_struct = target.name_struct  # !< name of generated C struct
        self.struct_description = target.struct_description  # !< comment placed above generated C struct
        self.prefix = None
        self.target = target  # !< target Exportable class that defines the data format

        # create ordered dict of member type objects from structure definition
        self.members = OrderedDict()
        self.inherited_members = list()
        self.parent_classes = list()

        target_members = target.get_data_format(allowed_modes=(True,
                                                               READ_EXPORT,
                                                               NOREAD_EXPORT),
                                                flatten_includes=True)
        for is_parent, export, member_name, member_type in target_members:

            if isinstance(member_type, IncludeMembers):
                raise Exception(
                    "something went very wrong, inheritance should be flattened at this point."
                )

            if type(member_name) is not str:
                raise Exception(
                    "member name has to be a string, currently: %s<%s>" %
                    (str(member_name), type(member_name)))

            # create member type class according to the defined member type
            if type(member_type) == str:
                array_match = vararray_match.match(member_type)
                if array_match:
                    array_type = array_match.group(1)
                    array_length = array_match.group(2)

                    if array_type == "char":
                        member = CharArrayMember(array_length)
                    elif array_type in NumberMember.type_scan_lookup:
                        # member = ArrayMember(ref_type=NumberMember, length=array_length, ref_type_params=[array_type])
                        # BIG BIG TODO
                        raise Exception("TODO: implement exporting arrays!")
                    else:
                        raise Exception("member %s has unknown array type %s" %
                                        (member_name, member_type))

                elif member_type == "std::string":
                    member = StringMember()
                else:
                    member = NumberMember(member_type)

            elif isinstance(member_type, DataMember):
                member = member_type

            else:
                raise Exception("unknown member type specification!")

            if member is None:
                raise Exception("member %s of struct %s is None" %
                                (member_name, self.name_struct))

            self.members[member_name] = member

            if is_parent:
                self.inherited_members.append(member_name)

        members = target.get_data_format(flatten_includes=False)
        for _, _, _, member_type in members:
            if isinstance(member_type, IncludeMembers):
                self.parent_classes.append(member_type.cls)
コード例 #54
0
    def __init__(self, target):
        """
        create a struct definition from an Exportable
        """

        dbg("generating struct definition from %s" % (repr(target)), lvl=3)

        self.name_struct_file   = target.name_struct_file    # !< name of file where generated struct will be placed
        self.name_struct        = target.name_struct         # !< name of generated C struct
        self.struct_description = target.struct_description  # !< comment placed above generated C struct
        self.prefix             = None
        self.target             = target                     # !< target Exportable class that defines the data format

        # create ordered dict of member type objects from structure definition
        self.members = OrderedDict()
        self.inherited_members = list()
        self.parent_classes = list()

        target_members = target.get_data_format(allowed_modes=(True, READ_EXPORT, NOREAD_EXPORT), flatten_includes=True)
        for is_parent, export, member_name, member_type in target_members:

            if isinstance(member_type, IncludeMembers):
                raise Exception("something went very wrong, inheritance should be flattened at this point.")

            if type(member_name) is not str:
                raise Exception("member name has to be a string, currently: %s<%s>" % (str(member_name), type(member_name)))

            # create member type class according to the defined member type
            if type(member_type) == str:
                array_match = vararray_match.match(member_type)
                if array_match:
                    array_type   = array_match.group(1)
                    array_length = array_match.group(2)

                    if array_type == "char":
                        member = CharArrayMember(array_length)
                    elif array_type in NumberMember.type_scan_lookup:
                        # member = ArrayMember(ref_type=NumberMember, length=array_length, ref_type_params=[array_type])
                        # BIG BIG TODO
                        raise Exception("TODO: implement exporting arrays!")
                    else:
                        raise Exception("member %s has unknown array type %s" % (member_name, member_type))

                elif member_type == "std::string":
                    member = StringMember()
                else:
                    member = NumberMember(member_type)

            elif isinstance(member_type, DataMember):
                member = member_type

            else:
                raise Exception("unknown member type specification!")

            if member is None:
                raise Exception("member %s of struct %s is None" % (member_name, self.name_struct))

            self.members[member_name] = member

            if is_parent:
                self.inherited_members.append(member_name)

        members = target.get_data_format(flatten_includes=False)
        for _, _, _, member_type in members:
            if isinstance(member_type, IncludeMembers):
                self.parent_classes.append(member_type.cls)