コード例 #1
0
def create_dds_struct():
    dds_file = DdsFile()
    dds_file.header_string.data = b"DDS "
    dds_file.reserved_1 = Array([0 for _ in range(11)])

    # header flags
    dds_file.flags.height = 1
    dds_file.flags.width = 1
    dds_file.flags.mipmap_count = 1
    dds_file.flags.linear_size = 1

    # pixel format flags
    dds_file.pixel_format.flags.four_c_c = 1
    dds_file.pixel_format.four_c_c = FourCC.DX10

    # possibly the two 1s in header_3_0
    dds_file.dx_10.resource_dimension = D3D10ResourceDimension.D3D10_RESOURCE_DIMENSION_TEXTURE2D
    # not properly supported by paint net and PS, only gimp
    # header.dx_10.array_size = header_7.array_size
    dds_file.dx_10.array_size = 1

    # caps 1
    dds_file.caps_1.texture = 0
    return dds_file
コード例 #2
0
    def load_dds(self, file_path):
        logging.info(f"Loading DDS {file_path}")
        tex_header, tex_buffers, header_7 = self.get_tex_structs(
            self.sized_str_entry)
        # tex_d = tex_header.one_0
        # tex_d = header_7.depth
        tex_d = 1
        tex_h = header_7.height
        tex_w = header_7.width
        tex_a = header_7.array_size
        comp = tex_header.compression_type.name
        tex_w = align_to(tex_w, comp)

        # read archive tex header to make sure we have the right mip count
        # even when users import DDS with mips when it should have none

        # load dds
        dds_file = DdsFile()
        dds_file.load(file_path)
        self.ensure_size_match(dds_file, tex_h, tex_w, tex_d, tex_a, comp)
        sorted_streams = self.get_sorted_streams()
        if is_pc(self.ovl):
            for buffer, tex_header_3 in zip(sorted_streams, tex_buffers):
                dds_buff = dds_file.pack_mips_pc(tex_header_3.num_mips)
                self.overwrite_buffer(buffer, dds_buff)
        else:
            out_bytes = dds_file.pack_mips(header_7.num_mips)
            sum_of_buffers = sum(buffer.size for buffer in sorted_streams)
            if len(out_bytes) != sum_of_buffers:
                logging.warning(
                    f"Packing of MipMaps failed. OVL expects {sum_of_buffers} bytes, but packing generated {len(out_bytes)} bytes."
                )
            with io.BytesIO(out_bytes) as reader:
                for buffer in sorted_streams:
                    dds_buff = reader.read(buffer.size)
                    self.overwrite_buffer(buffer, dds_buff)
コード例 #3
0
def load_dds(ovl_data, dds_file_path, tex_sized_str_entry, is_2K,
             ovs_sized_str_entry):
    archive = ovl_data.ovs_files[0]

    if archive.is_pc():
        header_3_0, headers_3_1, header_7 = get_tex_structs_pc(
            tex_sized_str_entry)
        tex_h = header_7.height
        tex_w = header_7.width
        tex_d = header_3_0.one_0
        tex_a = header_7.array_size
    else:
        header_3_0, header_3_1, header_7 = get_tex_structs(tex_sized_str_entry)
        tex_h = header_7.height
        tex_w = header_7.width
        tex_d = header_7.depth
        tex_a = header_7.array_size
    comp = header_3_0.compression_type.name
    tex_w = align_to(tex_w, comp)

    # read archive tex header to make sure we have the right mip count
    # even when users import DDS with mips when it should have none
    if is_2K:
        tex_to_2K(tex_sized_str_entry, ovs_sized_str_entry)

    # load dds
    dds_file = DdsFile()
    dds_file.load(dds_file_path)
    ensure_size_match(os.path.basename(dds_file_path), dds_file, tex_h, tex_w,
                      tex_d, tex_a, comp)
    if archive.is_pc():
        for buffer, tex_header_3 in zip(tex_sized_str_entry.data_entry.buffers,
                                        headers_3_1):
            dds_buff = dds_file.pack_mips_pc(tex_header_3.num_mips)
            if len(dds_buff) < buffer.size:
                print(
                    f"Last {buffer.size - len(dds_buff)} bytes of DDS buffer are not overwritten!"
                )
                dds_buff = dds_buff + buffer.data[len(dds_buff):]
            buffer.update_data(dds_buff)
    else:
        out_bytes = dds_file.pack_mips(header_7.num_mips)
        # with dds_file.writer(dds_file_path+"dump.dds") as stream:
        # 	dds_file.write(stream)
        # 	stream.write(out_bytes)

        sum_of_buffers = sum(
            buffer.size for buffer in tex_sized_str_entry.data_entry.buffers)
        if len(out_bytes) != sum_of_buffers:
            print(
                f"Packing of MipMaps failed. OVL expects {sum_of_buffers} bytes, but packing generated {len(out_bytes)} bytes."
            )

        with io.BytesIO(out_bytes) as reader:
            for buffer in tex_sized_str_entry.data_entry.buffers:
                dds_buff = reader.read(buffer.size)
                if len(dds_buff) < buffer.size:
                    print(
                        f"Last {buffer.size - len(dds_buff)} bytes of DDS buffer are not overwritten!"
                    )
                    dds_buff = dds_buff + buffer.data[len(dds_buff):]
                buffer.update_data(dds_buff)
コード例 #4
0
    def extract(self, out_dir, show_temp_files, progress_callback):
        tex_name = self.sized_str_entry.name
        basename = os.path.splitext(tex_name)[0]
        dds_name = basename + ".dds"
        logging.info(f"Writing {tex_name}")

        # get joined output buffer
        buffer_data = b"".join(
            [buffer.data for buffer in self.get_sorted_streams()])

        out_files = []
        tex_path = out_dir(tex_name)
        if show_temp_files:
            out_files.append(tex_path)
        with open(tex_path, "wb") as tex_file:
            tex_file.write(self.pack_header(b"TEX"))
            # num_buffers
            # tex_file.write(struct.pack("I", 1+len(self.file_entry.streams)))
            tex_file.write(self.sized_str_entry.pointers[0].data)
            for frag in self.sized_str_entry.fragments:
                tex_file.write(frag.pointers[1].data)
            tex_file.write(buffer_data)

        tex_file = TexFile(self.ovl.context)
        tex_file.load(tex_path)
        # print(tex_file)
        # return out_files
        dds_file = DdsFile()
        dds_file.buffer = buffer_data

        if is_dla(self.ovl):
            tex_info = tex_file.tex_info
            dds_file.width = tex_info.width
            dds_file.height = tex_info.height
            dds_file.mipmap_count = tex_info.num_mips
            dds_file.linear_size = len(buffer_data)
            dds_file.depth = 1
        elif is_pc(self.ovl) or is_ztuac(self.ovl):
            tex_info = tex_file.frag_01[0]
            dds_file.width = tex_info.width
            # hack until we have proper support for array_size on the image editors
            # todo - this is most assuredly not array size for ED
            dds_file.height = tex_info.height  # * max(1, header_7.array_size)
            dds_file.mipmap_count = tex_info.mip_index
            dds_file.linear_size = len(buffer_data)
            dds_file.depth = 1
        else:
            tex_info = tex_file.frag_11
            if not len(buffer_data) == tex_info.data_size:
                print(
                    f"7_1 data size ({tex_info.data_size}) and actual data size of combined buffers ({len(buffer_data)}) do not match up (bug)"
                )
            dds_file.width = tex_info.width
            # hack until we have proper support for array_size on the image editors
            dds_file.height = tex_info.height * tex_info.array_size
            dds_file.depth = tex_info.depth
            dds_file.linear_size = tex_info.data_size
            dds_file.mipmap_count = tex_info.num_mips

        try:
            dds_type = tex_file.tex_info.compression_type.name
            logging.info(tex_file.tex_info.compression_type)
            # account for aliases
            if dds_type.endswith(("_B", "_C")):
                dds_type = dds_type[:-2]
            dds_compression_types = ((dds_type, DxgiFormat[dds_type]), )
        except KeyError:
            dds_compression_types = [(x.name, x) for x in DxgiFormat]
            logging.warning(
                f"Unknown compression type {tex_file.tex_info.compression_type}, trying all compression types"
            )
        logging.debug(f"dds_compression_type {dds_compression_types}")

        # write out everything for each compression type
        for dds_type, dds_value in dds_compression_types:
            # print(dds_file.width)
            # header attribs
            if not is_ztuac(self.ovl):
                dds_file.width = align_to(dds_file.width, dds_type)

            # dx 10 stuff
            dds_file.dx_10.dxgi_format = dds_value

            # start out
            dds_path = out_dir(dds_name)
            if len(dds_compression_types) > 1:
                dds_path += f"_{dds_type}.dds"

            # write dds
            dds_file.save(dds_path)
            # print(dds_file)
            if show_temp_files:
                out_files.append(dds_path)

            # convert the dds to PNG, PNG must be visible so put it in out_dir
            png_file_path = texconv.dds_to_png(dds_path, dds_file.height)

            if os.path.isfile(png_file_path):
                # postprocessing of the png
                out_files.extend(
                    imarray.wrapper(png_file_path, tex_file.frag_11, self.ovl))
        return out_files