Пример #1
0
    def unpack(self, reader: BinaryReader, **kwargs):
        data = reader.unpack_struct(self.STRUCT)
        self.version = FSBHeaderVersion(data["version"])
        self.mode_flags = data["mode_flags"]
        self.bank_hash = struct.unpack(">Q", data["bank_hash"])[0]
        self.guid = "-".join((
            data["_guid"][3::-1].hex(),  # first three chunks need to be reversed
            data["_guid"][5:3:-1].hex(),
            data["_guid"][7:5:-1].hex(),
            data["_guid"][8:10].hex(),
            data["_guid"][10:].hex(),
        ))

        data_offset = reader.position + data["sample_headers_size"]
        file_size = data_offset + data["sample_data_size"]

        self.samples = []
        for i in range(data["sample_count"]):
            if self.mode_flags & FSBHeaderMode.BASICHEADERS and i > 0:
                # Clone of first sample, with new length/compressed length information.
                sample = copy.deepcopy(self.samples[0])
                basic_sample_header = reader.unpack_struct(self.BASIC_SAMPLE_STRUCT)
                sample.header.length = basic_sample_header["length"]
                sample.header.compressed_length = basic_sample_header["length"]
            else:
                # New sample.
                sample = FSBSample.unpack_from(reader, data_offset=data_offset)
                data_offset += sample.header.compressed_length
            self.samples.append(sample)
        if data_offset != file_size:
            raise ValueError(f"Sample data end offset ({data_offset}) does not equal expected file size ({file_size}).")
Пример #2
0
    def unpack(self, esd_reader: BinaryReader, **kwargs):

        header = esd_reader.unpack_struct(self.EXTERNAL_HEADER_STRUCT)
        # Internal offsets start here, so we reset the buffer.
        esd_reader = BinaryReader(esd_reader.read())

        internal_header = esd_reader.unpack_struct(self.INTERNAL_HEADER_STRUCT)
        self.magic = internal_header["magic"]
        state_machine_headers = esd_reader.unpack_structs(
            self.STATE_MACHINE_HEADER_STRUCT,
            count=header["state_machine_count"])

        for state_machine_header in state_machine_headers:
            states = self.State.unpack(
                esd_reader,
                state_machine_header["state_machine_offset"],
                count=state_machine_header["state_count"],
            )
            self.state_machines[
                state_machine_header["state_machine_index"]] = states

        if internal_header["esd_name_length"] > 0:
            esd_name_offset = internal_header["esd_name_offset"]
            esd_name_length = internal_header["esd_name_length"]
            # Note the given length is the length of the final string. The actual UTF-16 encoded bytes are twice that.
            self.esd_name = esd_reader.unpack_string(offset=esd_name_offset,
                                                     length=2 *
                                                     esd_name_length,
                                                     encoding="utf-16le")
            esd_reader.seek(esd_name_offset + 2 * esd_name_length)
            self.file_tail = esd_reader.read()
        else:
            self.esd_name = ""
            esd_reader.seek(header["unk_offset_1"])  # after packed EZL
            self.file_tail = esd_reader.read()
Пример #3
0
    def unpack(self, msb_reader: BinaryReader):
        part_offset = msb_reader.position

        header = msb_reader.unpack_struct(self.PART_HEADER_STRUCT)
        if header["__part_type"] != self.ENTRY_SUBTYPE:
            raise ValueError(f"Unexpected part type enum {header['part_type']} for class {self.__class__.__name__}.")
        self._instance_index = header["_instance_index"]
        self._model_index = header["_model_index"]
        self._part_type_index = header["_part_type_index"]
        for transform in ("translate", "rotate", "scale"):
            setattr(self, transform, Vector3(header[transform]))
        self._draw_groups = int_group_to_bit_set(header["__draw_groups"], assert_size=8)
        self._display_groups = int_group_to_bit_set(header["__display_groups"], assert_size=8)
        self._backread_groups = int_group_to_bit_set(header["__backread_groups"], assert_size=8)
        self.description = msb_reader.unpack_string(
            offset=part_offset + header["__description_offset"], encoding="utf-16-le",
        )
        self.name = msb_reader.unpack_string(
            offset=part_offset + header["__name_offset"], encoding="utf-16-le",
        )
        self.sib_path = msb_reader.unpack_string(
            offset=part_offset + header["__sib_path_offset"], encoding="utf-16-le",
        )

        msb_reader.seek(part_offset + header["__base_data_offset"])
        base_data = msb_reader.unpack_struct(self.PART_BASE_DATA_STRUCT)
        self.set(**base_data)

        msb_reader.seek(part_offset + header["__type_data_offset"])
        self.unpack_type_data(msb_reader)

        self._unpack_gparam_data(msb_reader, part_offset, header)
        self._unpack_scene_gparam_data(msb_reader, part_offset, header)
Пример #4
0
 def unpack(self, msb_reader: BinaryReader):
     event_offset = msb_reader.position
     header = msb_reader.unpack_struct(self.EVENT_HEADER_STRUCT)
     if header["__event_type"] != self.ENTRY_SUBTYPE:
         raise ValueError(f"Unexpected MSB event type value {header['__event_type']} for {self.__class__.__name__}.")
     msb_reader.seek(event_offset + header["__base_data_offset"])
     base_data = msb_reader.unpack_struct(self.EVENT_BASE_DATA_STRUCT)
     name_offset = event_offset + header["__name_offset"]
     self.name = msb_reader.unpack_string(offset=name_offset, encoding=self.NAME_ENCODING)
     self.set(**header)
     self.set(**base_data)
     msb_reader.seek(event_offset + header["__type_data_offset"])
     self.unpack_type_data(msb_reader)
Пример #5
0
def decompress(dcx_source: ReadableTyping) -> tuple[bytes, DCXType]:
    """Decompress the given file path, raw bytes, or buffer/reader.

    Returns a tuple containing the decompressed `bytes` and a `DCXInfo` instance that can be used to compress later
    with the same DCX type/parameters.
    """
    reader = BinaryReader(dcx_source, byte_order=">")  # always big-endian
    dcx_type = DCXType.detect(reader)

    if dcx_type == DCXType.Unknown:
        raise ValueError("Unknown DCX type. Cannot decompress.")

    header = reader.unpack_struct(DCX_HEADER_STRUCTS[dcx_type], byte_order=">")
    compressed = reader.read(
        header["compressed_size"])  # TODO: do I need to rstrip nulls?

    if dcx_type == DCXType.DCX_KRAK:
        decompressed = oodle.decompress(compressed,
                                        header["decompressed_size"])
    else:
        decompressed = zlib.decompressobj().decompress(compressed)

    if len(decompressed) != header["decompressed_size"]:
        raise ValueError(
            "Decompressed DCX data size does not match size in header.")
    return decompressed, dcx_type
Пример #6
0
    def unpack(self, emevd_reader: BinaryReader, **kwargs):
        header = emevd_reader.unpack_struct(self.HEADER_STRUCT)

        emevd_reader.seek(header["event_table_offset"])
        event_dict = self.Event.unpack_event_dict(
            emevd_reader,
            header["instruction_table_offset"],
            header["base_arg_data_offset"],
            header["event_arg_table_offset"],
            header["event_layers_table_offset"],
            count=header["event_count"],
        )

        self.events.update(event_dict)

        if header["packed_strings_size"] != 0:
            emevd_reader.seek(header["packed_strings_offset"])
            self.packed_strings = emevd_reader.read(
                header["packed_strings_size"])

        if header["linked_files_count"] != 0:
            emevd_reader.seek(header["linked_files_table_offset"])
            # These are relative offsets into the packed string data.
            for _ in range(header["linked_files_count"]):
                self.linked_file_offsets.append(
                    struct.unpack("<Q", emevd_reader.read(8))[0])

        # Parse event args for `RunEvent` and `RunCommonEvent` instructions.
        for event in self.events.values():
            event.update_evs_function_args()
        for event in self.events.values():
            event.update_run_event_instructions()
Пример #7
0
    def unpack(self, reader: BinaryReader, remove_empty_entries=True):
        header = reader.unpack_struct(self.HEADER_STRUCT)

        # Groups of contiguous text string IDs are defined by ranges (first ID, last ID) to save space.
        ranges = reader.unpack_structs(self.RANGE_STRUCT,
                                       count=header["range_count"])
        if reader.position != header["string_offsets_offset"]:
            _LOGGER.warning(
                "Range data did not end at string data offset given in FMG header."
            )
        string_offsets = reader.unpack_structs(self.STRING_OFFSET_STRUCT,
                                               count=header["string_count"])

        # Text pointer table corresponds to all the IDs (joined together) of the above ranges, in order.
        for string_range in ranges:
            i = string_range["first_index"]
            for string_id in range(string_range["first_id"],
                                   string_range["last_id"] + 1):
                if string_id in self.entries:
                    raise ValueError(
                        f"Malformed FMG: Entry index {string_id} appeared more than once."
                    )
                string_offset = string_offsets[i]["offset"]
                if string_offset == 0:
                    if not remove_empty_entries:
                        # Empty text string. These will trigger in-game error messages, like ?PlaceName?.
                        # Distinct from ' ', which is intentionally blank text data (e.g. the unused area subtitles).
                        self.entries[string_id] = ""
                else:
                    string = reader.unpack_string(offset=string_offset,
                                                  encoding="utf-16le")
                    if string or not remove_empty_entries:
                        self.entries[string_id] = string
                i += 1
Пример #8
0
    def unpack(self,
               reader: BinaryReader,
               bounding_box_has_unknown: bool = None):
        mesh = reader.unpack_struct(self.STRUCT)

        bounding_box_offset = mesh.pop("__bounding_box_offset")
        if bounding_box_offset == 0:
            self.bounding_box = None
        else:
            with reader.temp_offset(bounding_box_offset):
                self.bounding_box = BoundingBoxWithUnknown(
                    reader) if bounding_box_has_unknown else BoundingBox(
                        reader)

        bone_count = mesh.pop("__bone_count")
        with reader.temp_offset(mesh.pop("__bone_offset")):
            self.bone_indices = list(reader.unpack(f"<{bone_count}i"))

        face_set_count = mesh.pop("__face_set_count")
        with reader.temp_offset(mesh.pop("__face_set_offset")):
            self._face_set_indices = list(reader.unpack(f"<{face_set_count}i"))

        vertex_count = mesh.pop("__vertex_buffer_count")
        with reader.temp_offset(mesh.pop("__vertex_buffer_offset")):
            self._vertex_buffer_indices = list(
                reader.unpack(f"<{vertex_count}i"))

        self.set(**mesh)
Пример #9
0
    def unpack(self, reader: BinaryReader, start_offset: int, depth=0):
        data = reader.unpack_struct(self.STRUCT)
        name_length = data.pop("__name_length")
        self.name = reader.unpack_string(length=name_length, encoding="ascii")

        self.size = self.STRUCT.size + name_length
        self.depth = depth

        # TODO: Use `_properties` and `field` properties, which inspect the node name, etc.
        self._properties = [FBXProperty.unpack(reader) for _ in range(data.pop("__property_count"))]

        self.size += data.pop("__property_list_size")

        self.children = []
        end_offset = data.pop("__end_offset")
        while start_offset + self.size < end_offset:
            child = self.__class__(reader, start_offset=start_offset + self.size, depth=self.depth + 1)
            self.size += child.size
            if start_offset + self.size == end_offset:
                break  # empty node is not kept
            self.children.append(child)

        if self.name == "P":
            if self.children:
                raise ValueError("`FBXNode` named 'P' should not have any children.")
            name, *args = [p.value for p in self._properties]
            self._field = FBXPropertyField(name, *args)
        else:
            self._field = None
Пример #10
0
 def unpack(self, reader: BinaryReader, struct_offset: int):
     layout_member = reader.unpack_struct(self.STRUCT)
     binary_struct_offset = layout_member.pop("__struct_offset")
     if struct_offset != binary_struct_offset:
         raise ValueError(
             f"`LayoutMember` binary struct offset ({binary_struct_offset}) does not match passed struct offset "
             f"({struct_offset}).")
     self.set(**layout_member)
Пример #11
0
    def unpack(self, reader: BinaryReader):
        buffer_layout = reader.unpack_struct(self.STRUCT)

        with reader.temp_offset(buffer_layout.pop("__member_offset")):
            struct_offset = 0
            self.members = []
            for _ in range(buffer_layout.pop("__member_count")):
                member = LayoutMember(reader, struct_offset=struct_offset)
                self.members.append(member)
                struct_offset += member.layout_type.size()
Пример #12
0
 def unpack(self, reader: BinaryReader, color_is_argb=None):
     if color_is_argb is None:
         raise ValueError("`color_is_argb` (bool) must be given to `Dummy.unpack()`.")
     data = reader.unpack_struct(self.STRUCT)
     if color_is_argb:
         alpha, red, green, blue = data.pop("__color")
     else:
         blue, green, red, alpha = data.pop("__color")
     self.color = ColorRGBA8(red, green, blue, alpha)
     self.set(**data)
Пример #13
0
 def unpack_type_data(self, msb_reader: BinaryReader):
     data = msb_reader.unpack_struct(self.PART_TYPE_DATA_STRUCT, exclude_asserted=True)
     self.set(**data)
     self.area_name_id = abs(data["__area_name_id"]) if data["__area_name_id"] != -1 else -1
     self._force_area_banner = data["__area_name_id"] < 0  # Custom field.
     if data["__play_region_id"] > -10:
         self._play_region_id = data["__play_region_id"]
         self._stable_footing_flag = 0
     else:
         self._play_region_id = 0
         self._stable_footing_flag = -data["__play_region_id"] - 10
Пример #14
0
    def unpack(self, msb_reader: BinaryReader):
        header = msb_reader.unpack_struct(self.MAP_ENTITY_LIST_HEADER)
        entry_offsets = [
            msb_reader.unpack_struct(
                self.MAP_ENTITY_ENTRY_OFFSET)["entry_offset"]
            for _ in range(header["entry_offset_count"] -
                           1)  # 'entry_offset_count' includes tail offset
        ]
        next_entry_list_offset = msb_reader.unpack_struct(
            self.MAP_ENTITY_LIST_TAIL)["next_entry_list_offset"]
        self.name = msb_reader.unpack_string(offset=header["name_offset"],
                                             encoding=self.NAME_ENCODING)

        self._entries = []

        for entry_offset in entry_offsets:
            msb_reader.seek(entry_offset)
            entry = self.ENTRY_CLASS(msb_reader)
            self._entries.append(entry)

        msb_reader.seek(next_entry_list_offset)
Пример #15
0
    def unpack(cls, reader: BinaryReader, event_layers_offset):
        """Unpack event layer bit field as <a, b, c, ...> where a, b, c, ... are the little-endian bit
        zero-based indices of the event layer bit field. 

        e.g. field 01001...110 would be {1, 4, 29, 30}.
        """
        reader.seek(event_layers_offset)
        d = reader.unpack_struct(cls.HEADER_STRUCT)
        enabled_event_layers_list = []
        for i in range(32):
            if (2 ** i) & d["event_layers"]:
                enabled_event_layers_list.append(i)
        return cls(enabled_event_layers_list)
Пример #16
0
 def unpack(self, msb_reader: BinaryReader):
     model_offset = msb_reader.position
     model_data = msb_reader.unpack_struct(self.MODEL_STRUCT)
     self.name = msb_reader.unpack_string(
         offset=model_offset + model_data["__name_offset"], encoding=self.NAME_ENCODING
     )
     self.sib_path = msb_reader.unpack_string(
         offset=model_offset + model_data["__sib_path_offset"], encoding=self.NAME_ENCODING,
     )
     try:
         self.ENTRY_SUBTYPE = MSBModelSubtype(model_data["__model_type"])
     except TypeError:
         raise ValueError(f"Unrecognized MSB model type: {model_data['__model_type']}")
     self.set(**model_data)
Пример #17
0
 def unpack(self, msb_reader: BinaryReader):
     model_offset = msb_reader.position
     header = msb_reader.unpack_struct(self.MODEL_STRUCT)
     self.name = msb_reader.unpack_string(offset=model_offset +
                                          header["__name_offset"],
                                          encoding=self.NAME_ENCODING)
     self.sib_path = msb_reader.unpack_string(
         offset=model_offset + header["__sib_path_offset"],
         encoding=self.NAME_ENCODING,
     )
     if header["__model_type"] != self.ENTRY_SUBTYPE.value:
         raise ValueError(
             f"Unexpected MSB model type value {header['__model_type']} for {self.__class__.__name__}. "
             f"Expected {self.ENTRY_SUBTYPE.value}.")
     self.set(**header)
Пример #18
0
    def unpack(cls,
               esd_reader: BinaryReader,
               condition_pointers_offset,
               count=1):
        """Returns a list of `Condition` instances`."""
        conditions = []
        if condition_pointers_offset == -1:
            return conditions
        pointers = esd_reader.unpack_structs(cls.POINTER_STRUCT,
                                             count=count,
                                             offset=condition_pointers_offset)
        for p in pointers:
            d = esd_reader.unpack_struct(cls.STRUCT,
                                         offset=p["condition_offset"])
            pass_commands = cls.Command.unpack(
                esd_reader,
                d["pass_commands_offset"],
                count=d["pass_commands_count"],
            )
            subconditions = cls.unpack(  # safe recursion
                esd_reader,
                d["subcondition_pointers_offset"],
                count=d["subcondition_pointers_count"],
            )
            test_ezl = esd_reader.unpack_bytes(offset=d["test_ezl_offset"],
                                               length=d["test_ezl_size"])
            if d["next_state_offset"] > 0:
                next_state_index = esd_reader.unpack_struct(
                    cls.STATE_ID_STRUCT,
                    offset=d["next_state_offset"])["state_id"]
            else:
                next_state_index = -1
            conditions.append(
                cls(next_state_index, test_ezl, pass_commands, subconditions))

        return conditions
Пример #19
0
    def unpack(self, msb_reader: BinaryReader):
        header = msb_reader.unpack_struct(self.MAP_ENTITY_LIST_HEADER)
        entry_offsets = [
            msb_reader.unpack_struct(
                self.MAP_ENTITY_ENTRY_OFFSET)["entry_offset"]
            for _ in range(header["entry_offset_count"] -
                           1)  # 'entry_offset_count' includes tail offset
        ]
        next_entry_list_offset = msb_reader.unpack_struct(
            self.MAP_ENTITY_LIST_TAIL)["next_entry_list_offset"]
        name = msb_reader.unpack_string(offset=header["name_offset"],
                                        encoding=self.NAME_ENCODING)
        if name != self.INTERNAL_NAME:
            raise ValueError(
                f"MSB entry list internal name '{name}' does not match known name '{self.INTERNAL_NAME}'."
            )
        self._entries = []

        for entry_offset in entry_offsets:
            msb_reader.seek(entry_offset)
            entry = self.ENTRY_CLASS(msb_reader)
            self._entries.append(entry)

        msb_reader.seek(next_entry_list_offset)
Пример #20
0
    def unpack(self, reader: BinaryReader, **kwargs):
        header = reader.unpack_struct(self.HEADER_STRUCT)
        self.node_class = FBXNode64 if header["version"] >= 7700 else FBXNode32
        if header["version"] > self.MAX_VERSION:
            raise NotImplementedError(
                f"Cannot unpack FBX version {header['version']}. Last supported version is {self.MAX_VERSION}."
            )

        start_offset = self.HEADER_STRUCT.size
        self.root_nodes = []
        while 1:
            node = self.node_class(reader, start_offset=start_offset)
            start_offset += node.size
            if node.is_empty:
                break  # empty node is not kept
            self.root_nodes.append(node)
Пример #21
0
 def unpack(self, reader: BinaryReader, big_endian=False):
     self.big_endian = self._check_big_endian(reader)
     header = reader.unpack_struct(
         self.HEADER_STRUCT, byte_order=">" if self.big_endian else "<")
     if self._check_use_struct_64(reader, header["goal_count"]):
         goal_struct = self.GOAL_STRUCT_64
     else:
         goal_struct = self.GOAL_STRUCT_32
     self.goals = []
     for _ in range(header["goal_count"]):
         goal = self.unpack_goal(reader, goal_struct)
         if goal.script_name in [g.script_name for g in self.goals]:
             _LOGGER.warning(
                 f"Goal '{goal.goal_id}' is referenced multiple times in LuaInfo (same ID and type). Each goal ID "
                 f"should have (at most) one 'battle' goal and one 'logic' goal. All goal entries after the first "
                 f"will be ignored.")
         else:
             self.goals.append(goal)
Пример #22
0
 def unpack_goal(self, reader: BinaryReader,
                 goal_struct: BinaryStruct) -> LuaGoal:
     goal = reader.unpack_struct(goal_struct,
                                 byte_order=">" if self.big_endian else "<")
     name = reader.unpack_string(offset=goal["name_offset"],
                                 encoding=self.encoding)
     if goal["logic_interrupt_name_offset"] > 0:
         logic_interrupt_name = reader.unpack_string(
             offset=goal["logic_interrupt_name_offset"],
             encoding=self.encoding)
     else:
         logic_interrupt_name = ""
     return LuaGoal(
         goal_id=goal["goal_id"],
         goal_name=name,
         has_battle_interrupt=goal["has_battle_interrupt"],
         has_logic_interrupt=goal["has_logic_interrupt"],
         logic_interrupt_name=logic_interrupt_name,
     )
Пример #23
0
 def unpack(self, dcx_reader: BinaryReader):
     if self.magic:
         raise ValueError(
             "`DCX.magic` cannot be set manually before unpack.")
     header = dcx_reader.unpack_struct(self.HEADER_STRUCT)
     self.magic = header["magic"]
     compressed = dcx_reader.read().rstrip(
         b"\0")  # Nulls stripped from the end.
     if len(compressed) != header["compressed_size"]:
         # No error raised. This happens in some files.
         file_path = f" {self.dcx_path}" if self.dcx_path else ""
         _LOGGER.warning(
             f"Compressed data size ({len(compressed)}) does not match size in header "
             f"({header['compressed_size']}) in DCX-compressed file{file_path}."
         )
     self.data = zlib.decompressobj().decompress(compressed)
     if len(self.data) != header["decompressed_size"]:
         raise ValueError(
             "Decompressed data size does not match size in header.")
Пример #24
0
 def unpack(self, paramdef_reader: BinaryReader, **kwargs):
     header = paramdef_reader.unpack_struct(self.HEADER_STRUCT)
     if "param_name" in header:
         self.param_type = header["param_name"]
     else:
         self.param_type = paramdef_reader.unpack_string(
             offset=header["param_name_offset"],
             encoding="shift_jis_2004",  # never unicode
         )
     self.data_version = header["data_version"]
     self.format_version = header["format_version"]
     self.unicode = header["unicode"]
     self.fields = self.FIELD_CLASS.unpack_fields(
         self.param_type,
         paramdef_reader,
         header["field_count"],
         self.format_version,
         self.unicode,
         self.BYTE_ORDER,
     )
Пример #25
0
 def unpack(
     self,
     reader: BinaryReader,
     encoding: str,
     version: Version,
     gx_lists: tp.List[GXList],
     gx_list_indices: tp.Dict[int, int],
 ):
     material = reader.unpack_struct(self.STRUCT)
     self.name = reader.unpack_string(offset=material.pop("__name__z"), encoding=encoding)
     self.mtd_path = reader.unpack_string(offset=material.pop("__mtd_path__z"), encoding=encoding)
     gx_offset = material.pop("__gx_offset")
     if gx_offset == 0:
         self.gx_index = -1
     elif gx_offset in gx_list_indices:
         self.gx_index = gx_list_indices[gx_offset]
     else:
         self.gx_index = gx_list_indices[gx_offset] = len(gx_lists)
         with reader.temp_offset(gx_offset):
             gx_lists.append(GXList(reader, version))
     self.set(**material)
Пример #26
0
    def unpack(self, reader: BinaryReader, header_vertex_index_size: int,
               vertex_data_offset: int):
        face_set = reader.unpack_struct(self.STRUCT)

        vertex_index_size = face_set.pop("__vertex_index_size")
        if vertex_index_size == 0:
            vertex_index_size = header_vertex_index_size

        if vertex_index_size == 8:
            raise NotImplementedError(
                "Soulstruct cannot support edge-compressed FLVER face sets.")
        elif vertex_index_size in {16, 32}:
            vertex_indices_count = face_set.pop("__vertex_indices_count")
            vertex_indices_offset = face_set.pop("__vertex_indices_offset")
            with reader.temp_offset(vertex_data_offset +
                                    vertex_indices_offset):
                fmt = f"<{vertex_indices_count}{'H' if vertex_index_size == 16 else 'I'}"
                self.vertex_indices = list(reader.unpack(fmt))
        else:
            raise ValueError(
                f"Unsupported face set index size: {vertex_index_size}")

        self.set(**face_set)
Пример #27
0
    def unpack(self, msb_reader: BinaryReader):
        region_offset = msb_reader.position
        base_data = msb_reader.unpack_struct(self.REGION_STRUCT)
        self.name = msb_reader.unpack_string(
            offset=region_offset + base_data["name_offset"],
            encoding=self.NAME_ENCODING,
        )
        self._region_index = base_data["__region_index"]
        self.translate = Vector3(base_data["translate"])
        self.rotate = Vector3(base_data["rotate"])
        self.check_null_field(msb_reader,
                              region_offset + base_data["unknown_offset_1"])
        self.check_null_field(msb_reader,
                              region_offset + base_data["unknown_offset_2"])

        if base_data["type_data_offset"] != 0:
            msb_reader.seek(region_offset + base_data["type_data_offset"])
            self.unpack_type_data(msb_reader)

        msb_reader.seek(region_offset + base_data["entity_id_offset"])
        self.entity_id = msb_reader.unpack_value("i")

        return region_offset + base_data["entity_id_offset"]
Пример #28
0
 def _unpack_scene_gparam_data(self, msb_reader: BinaryReader, part_offset, header):
     if header["__scene_gparam_data_offset"] == 0:
         raise ValueError(f"Zero SceneGParam offset found in SceneGParam-supporting part {self.name}.")
     msb_reader.seek(part_offset + header["__scene_gparam_data_offset"])
     scene_gparam_data = msb_reader.unpack_struct(self.PART_SCENE_GPARAM_STRUCT)
     self.set(**scene_gparam_data)
Пример #29
0
 def unpack_type_data(self, msb_reader: BinaryReader):
     """This unpacks simple attributes by default, but some Parts need to process these values more."""
     self.set(**msb_reader.unpack_struct(self.PART_TYPE_DATA_STRUCT,
                                         exclude_asserted=True))
Пример #30
0
    def unpack(self, reader: BinaryReader, **kwargs):
        self.byte_order = reader.byte_order = ">" if reader.unpack_value(
            "B", offset=44) == 255 else "<"
        version_info = reader.unpack("bbb", offset=45)
        self.flags1 = ParamFlags1(version_info[0])
        self.flags2 = ParamFlags2(version_info[1])
        self.paramdef_format_version = version_info[2]
        header_struct = self.GET_HEADER_STRUCT(self.flags1, self.byte_order)
        header = reader.unpack_struct(header_struct)
        try:
            self.param_type = header["param_type"]
        except KeyError:
            self.param_type = reader.unpack_string(
                offset=header["param_type_offset"], encoding="utf-8")
        self.paramdef_data_version = header["paramdef_data_version"]
        self.unknown = header["unknown"]
        # Row data offset in header not used. (It's an unsigned short, yet doesn't limit row count to 5461.)
        name_data_offset = header[
            "name_data_offset"]  # CANNOT BE TRUSTED IN VANILLA FILES! Off by +12 bytes.

        # Load row pointer data.
        row_struct = self.ROW_STRUCT_64 if self.flags1.LongDataOffset else self.ROW_STRUCT_32
        row_pointers = reader.unpack_structs(row_struct,
                                             count=header["row_count"])
        row_data_offset = reader.position  # Reliable row data offset.

        # Row size is lazily determined. TODO: Unpack row data in sequence and associate with names separately.
        if len(row_pointers) == 0:
            return
        elif len(row_pointers) == 1:
            # NOTE: The only vanilla param in Dark Souls with one row is LEVELSYNC_PARAM_ST (Remastered only),
            # for which the row size is hard-coded here. Otherwise, we can trust the repacked offset from Soulstruct
            # (and SoulsFormats, etc.).
            if self.param_type == "LEVELSYNC_PARAM_ST":
                row_size = 220
            else:
                row_size = name_data_offset - row_data_offset
        else:
            row_size = row_pointers[1]["data_offset"] - row_pointers[0][
                "data_offset"]

        # Note that we no longer need to track reader offset.
        name_encoding = self.get_name_encoding()
        for row_struct in row_pointers:
            reader.seek(row_struct["data_offset"])
            row_data = reader.read(row_size)
            if row_struct["name_offset"] != 0:
                try:
                    name = reader.unpack_string(
                        offset=row_struct["name_offset"],
                        encoding=name_encoding,
                        reset_old_offset=False,  # no need to reset
                    )
                except UnicodeDecodeError as ex:
                    if ex.object in self.undecodable_row_names:
                        name = reader.unpack_bytes(
                            offset=row_struct["name_offset"],
                            reset_old_offset=False,  # no need to reset
                        )
                    else:
                        raise
                except ValueError:
                    reader.seek(row_struct["name_offset"])
                    _LOGGER.error(
                        f"Error encountered while parsing row name string in {self.param_type}.\n"
                        f"    Header: {header}\n"
                        f"    Row Struct: {row_struct}\n"
                        f"    30 chrs of name data: {' '.join(f'{{:02x}}'.format(x) for x in reader.read(30))}"
                    )
                    raise
            else:
                name = ""
            self.rows[row_struct["id"]] = ParamRow(row_data,
                                                   self.paramdef,
                                                   name=name)