def get_raw_reflexive_offsets(desc, two_byte_offs, four_byte_offs, off=0): if INCLUDE in desc: desc = BlockDef.include_attrs(None, dict(desc)) BlockDef.set_entry_count(None, desc) for i in range(desc.get(ENTRIES, 0)): f_desc = desc[i] f_type = f_desc[TYPE] size = f_desc.get(SIZE, f_type.size) if f_type.is_data: big = f_type.big little = f_type.little if big is little or big.enc == little.enc: # endianness is forced. dont swap anything pass elif size == 4: four_byte_offs.append(off) elif size == 2: two_byte_offs.append(off) off += size elif f_type is Pad: off += size else: off = get_raw_reflexive_offsets( f_desc, two_byte_offs, four_byte_offs, off) return off
def __init__(self, def_id, *desc_entries, **kwargs): ''' Initializes a TagDef. Positional arguments: # str: def_id --------- An identifier string used for naming and keeping track of BlockDefs. Used as the NAME entry in the top level of the descriptor if one doesnt exist. Optional positional arguments: # dict: *desc_entries -- Dictionaries formatted as descriptors. A descriptor will be built from all supplied positional arguments and all keyword arguments(if the keyword is in the desc_keywords set). Positional arguments are keyed under the index they are located at in desc_entries, and keyword arguments are keyed under their keyword. If a FieldType is not supplied under the TYPE keyword, the BlockDef will default to using Container. If supplying a descriptor in this way, do not provide one through the "descriptor" keyword as well. Doing so will raise a TypeError Optional keyword arguments: # bool: incomplete ----- Specifies that the definition is only partially written and that any editing must be done to a copy of the original data in order to keep all of the the undefined data intact. Blocks SHOULD NEVER be added or deleted from data mapped out with an incomplete definition, though this library will not prevent you from doing so. # str: ext ------------ Used as the extension when writing a Tag to a file. # type: tag_cls -------- The Tag class constructor to build instances of. Passes 'def_id', all positional arguments, and all other keyword arguments to BlockDef.__init__ ''' self.ext = str(kwargs.pop('ext', self.ext)) self.incomplete = bool(kwargs.pop('incomplete', self.incomplete)) self.tag_cls = kwargs.pop('tag_cls', self.tag_cls) BlockDef.__init__(self, def_id, *desc_entries, **kwargs)
def make_dependency_os_block(class_name="NONE", tag_id=0xFFffFFff, tag_path="", tag_path_pointer=0xFFffFFff, block_def=BlockDef(dependency_os())): return make_dependency_block(class_name, tag_id, tag_path, tag_path_pointer, block_def)
def make_dependency_block(class_name="NONE", tag_id=0xFFffFFff, tag_path="", tag_path_pointer=0xFFffFFff, block_def=BlockDef(dependency())): block = block_def.build() try: block.tag_class.set_to(class_name) except Exception: pass block.id = tag_id & 0xFFffFFff block.path_pointer = tag_path_pointer & 0xFFffFFff block.filepath = str(tag_path) return block
def reflexive(name, substruct, max_count=MAX_REFLEXIVE_COUNT, *names, **desc): '''This function serves to macro the creation of a reflexive''' desc.update(INCLUDE=reflexive_struct, STEPTREE=ReflexiveArray(name + "_array", SIZE=".size", MAX=max_count, SUB_STRUCT=substruct, WIDGET=ReflexiveFrame), SIZE=12) if DYN_NAME_PATH in desc: desc[STEPTREE][DYN_NAME_PATH] = desc.pop(DYN_NAME_PATH) if names: name_map = {} for i in range(len(names)): e_name = BlockDef.str_to_name(None, names[i]) name_map[e_name] = i desc[STEPTREE][NAME_MAP] = name_map return Reflexive(name, **desc)
def make_subdefs(self, replace_subdefs=True): BlockDef.make_subdefs(self, replace_subdefs)
import os import struct from time import time from tkinter import * from tkinter.filedialog import askdirectory from traceback import format_exc from supyr_struct.defs.constants import PATHDIV from supyr_struct.defs.block_def import BlockDef from reclaimer.os_v3_hek.defs.coll import fast_coll_def as coll_def from reclaimer.stubbs.defs.coll import fast_coll_def as stubbs_coll_def from reclaimer.common_descs import tag_header_os tag_header_def = BlockDef(tag_header_os) PATHDIV = PATHDIV curr_dir = os.path.abspath(os.curdir) + PATHDIV # maps stubbs material numbers 0-34 to halo names to change them to material_map = ( "dirt", "sand", "stone", "snow", "wood", "metal_hollow", "metal_thin", "metal_thick", "rubber",
import os from collections import OrderedDict from traceback import format_exc from array import array from reclaimer.common_descs import rawdata_ref_struct from reclaimer.field_types import FieldType, RawdataRef, Reflexive, TagRef from supyr_struct.defs.block_def import BlockDef from supyr_struct.tag import Tag from ..wrappers.map_pointer_converter import MapPointerConverter empty_rawdata_def = BlockDef("samples_stub", INCLUDE=rawdata_ref_struct) REMOVED_RSRC_TAG_DIR = "!rem\\" def set_tag_meta_pointers(block, meta_pointer, raw_pointer_base=0): if not (hasattr(block, "desc") and hasattr(block, "__iter__")): return meta_pointer, raw_pointer_base item_indices = list(range(len(block))) if hasattr(block, "STEPTREE"): item_indices.append("STEPTREE") for i in item_indices: typ = block.get_desc("TYPE", i) sub_block = block[i] if typ == RawdataRef: if getattr(sub_block, "data", None): orig_size = sub_block.size
"tag index", UInt32("tag index offset"), UInt32("scenario tag id"), UInt32("map id"), # normally unused, but can be used # for spoofing the maps checksum. UInt32("tag count"), UInt32("vertex parts count"), UInt32("model data offset"), UInt32("index parts count"), UInt32("vertex data size"), UInt32("model data size"), UInt32("tag sig", EDITABLE=False, DEFAULT='tags'), SIZE=40, STEPTREE=tag_index_array) #tag_index_pc = tipc = dict(tag_index_xbox) #tipc['ENTRIES'] += 1; tipc['SIZE'] += 4 #tipc[7] = UInt32("vertex data size") #tipc[9] = tipc[8]; tipc[8] = UInt32("model data size") map_header_def = BlockDef(map_header) map_header_anni_def = BlockDef(map_header, endian=">") map_header_demo_def = BlockDef(map_header_demo) map_header_vap_def = BlockDef(map_header_vap) tag_index_xbox_def = BlockDef(tag_index_xbox) tag_index_pc_def = BlockDef(tag_index_pc) tag_index_anni_def = BlockDef(tag_index_pc, endian=">") map_header_vap_def = BlockDef(map_header_vap)
''' Most byteswapping is handeled by supyr_struct by changing the endianness, but certain chunks of raw data are significantly faster to just write byteswapping routines for, like raw vertex, triangle, and animation data. ''' import array from supyr_struct.field_types import BytearrayRaw from supyr_struct.defs.block_def import BlockDef raw_block_def = BlockDef("raw_block", BytearrayRaw('data', SIZE=lambda node, *a, **kw: 0 if node is None else len(node)) ) def byteswap_raw_reflexive(refl): desc = refl.desc struct_size, two_byte_offs, four_byte_offs = desc.get( "RAW_REFLEXIVE_INFO", (0, (), ())) if not two_byte_offs and not four_byte_offs: return data = refl.STEPTREE refl.STEPTREE = swapped = bytearray(data) for refl_off in range(0, refl.size*struct_size, struct_size): for tmp_off in two_byte_offs: tmp_off += refl_off swapped[tmp_off] = data[tmp_off+1] swapped[tmp_off+1] = data[tmp_off]
from reclaimer.hek.defs.mod2 import mod2_def from reclaimer.hek.defs.mode import mode_def from reclaimer.stubbs.defs.mode import mode_def as stubbs_mode_def from reclaimer.stubbs.defs.coll import coll_def as stubbs_coll_def from reclaimer.common_descs import tag_header from reclaimer.util.matrices import quaternion_to_matrix, Matrix from supyr_struct.buffer import get_rawdata from supyr_struct.defs.block_def import BlockDef from supyr_struct.field_types import FieldType, BytearrayRaw window_base_class = tk.Toplevel if __name__ == "__main__": window_base_class = tk.Tk tag_header_def = BlockDef(tag_header) def undef_size(node, *a, **kwa): if node is None: return 0 return len(node) raw_block_def = BlockDef("raw_block", BytearrayRaw('data', SIZE=undef_size) ) class Permutation(): name = ""
# Reclaimer is free software under the GNU General Public License v3.0. # See LICENSE for more information. # from supyr_struct.defs.block_def import BlockDef from supyr_struct.field_types import QStruct, Struct, Container, Void,\ UBitInt, BitStruct, Float, UInt16, UInt32,\ UInt32Array, UInt16Array, FloatArray # these structure definitions aren't really used in any code, but # are a good way to illustrate the structure of the compressed data. # for uncompressed animations, the data is stored as: # rotation first, then translation, and finally scale keyframe_header_def = BlockDef("keyframe_header", UBitInt("keyframe_count", SIZE=12), UBitInt("keyframe_data_offset", SIZE=20), SIZE=4, TYPE=BitStruct) # compressed quaternion rotation # Bits AND bytes are ordered left to right as most significant to least # bbbbbbbb aaaaaaaa dddddddd cccccccc ffffffff eeeeeeee # iiiiiiii iiiijjjj jjjjjjjj kkkkkkkk kkkkwwww wwwwwwww quaternion48_def = BlockDef( "quaternion48", # you'll need to read the 6 bytes as 3 little endian ints, then bit shift # them together like so: compressed_quat = w2 + (w1<<16) + (w0<<32) UInt16("w0"), UInt16("w1"), UInt16("w2"), SIZE=6,
UInt16("node_size", DEFAULT=20), UInt8("is_valid", DEFAULT=1), UInt8("identifier_zero_invalid"), # zero? UInt16("unused"), UInt32("sig", DEFAULT="d@t@"), UInt16("next_node"), # zero? UInt16("last_node"), BytesRaw("next", SIZE=4), Pointer32("first"), SIZE=56, STEPTREE=WhileArray("nodes", SUB_STRUCT=fast_script_node)) h1_script_syntax_data_os = dict(h1_script_syntax_data) h1_script_syntax_data_os[1] = UInt16("max_nodes", DEFAULT=28501) h1_script_syntax_data_def = BlockDef(h1_script_syntax_data) h1_script_syntax_data_os_def = BlockDef(h1_script_syntax_data_os) def cast_uint32_to_float(uint32, packer=MethodType(pack, "<I"), unpacker=MethodType(unpack, "<f")): return unpacker(packer(uint32))[0] def cast_uint32_to_sint16(uint32): return ((uint32 + 0x8000) % 0x10000) - 0x8000 def cast_uint32_to_sint32(uint32): return ((uint32 + 0x80000000) % 0x100000000) - 0x80000000
256) fast_sbsp_body[40] = raw_reflexive("pathfinding_surfaces", pathfinding_surface, 131072) fast_sbsp_body[41] = raw_reflexive("pathfinding_edges", pathfinding_edge, 262144) fast_sbsp_body[47] = raw_reflexive("markers", marker, 1024, DYN_NAME_PATH='.name') sbsp_meta_header_def = BlockDef( "sbsp_meta_header", # to convert these pointers to offsets, do: pointer - bsp_magic UInt32("meta_pointer"), UInt32("uncompressed_lightmap_materials_count"), UInt32("uncompressed_lightmap_materials_pointer"), # name is a guess UInt32("compressed_lightmap_materials_count"), UInt32("compressed_lightmap_materials_pointer"), # name is a guess UInt32("sig", DEFAULT="sbsp"), SIZE=24, TYPE=QStruct) def get(): return sbsp_def sbsp_def = TagDef( "sbsp", blam_header("sbsp", 5), sbsp_body,
def sanitize_test(): # definitions to _test sanitize error catching and reporting routines try: # endianness characters must be one of '<', '>', '' _test = BlockDef('test', ENDIAN=None) _error_test_fail('endian_test') except SanitizationError: _error_test_pass('endian_test') try: # cant have non-struct bit_based data outside a bitstruct _test = BlockDef('test', Bit('bit')) _error_test_fail('bit_based_test1') except SanitizationError: _error_test_pass('bit_based_test1') try: # bitstructs cant contain non-bit_based data _test = BlockDef('test', BitStruct('bitstruct', UInt8('int8'))) _error_test_fail('bit_based_test2') except SanitizationError: _error_test_pass('bit_based_test2') try: # bitstructs cannot contain bitstructs _test = BlockDef( 'test', BitStruct('bitstruct_outer', BitStruct('bitstruct_inner'))) _error_test_fail('bit_based_test3') except SanitizationError: _error_test_pass('bit_based_test3') try: # cannot use oe_size fields inside a struct _test = BlockDef( 'test', Struct( 'struct', StreamAdapter('stream', DECODER=lambda: 0, SUB_STRUCT=Struct('s')))) _error_test_fail('oe_size_test') except SanitizationError: _error_test_pass('oe_size_test') try: # cannot use containers inside structs _test = BlockDef('test', Struct('struct', Array('array'))) _error_test_fail('container_inside_struct_test') except SanitizationError: _error_test_pass('container_inside_struct_test') try: # variable size data must have its size defined _test = BlockDef('test', BytesRaw('data')) _error_test_fail('var_size_test1') except SanitizationError: _error_test_pass('var_size_test1') try: # var_size data in a struct must have its size statically defined _test = BlockDef('test', Struct('struct', BytesRaw('data', SIZE=None))) _error_test_fail('var_size_test2') except SanitizationError: _error_test_pass('var_size_test2') try: # non-open ended arrays must have a defined size _test = BlockDef('test', Array('array', SUB_STRUCT=None)) _error_test_fail('array_test1') except SanitizationError: _error_test_pass('array_test1') try: # arrays must have a SUB_STRUCT entry _test = BlockDef('test', Array('array', SIZE=0)) _error_test_fail('array_test2') except SanitizationError: _error_test_pass('array_test2') try: # all fields must be given names _test = BlockDef('test', {TYPE: UInt8}) _error_test_fail('name_test1') except SanitizationError: _error_test_pass('name_test1') try: # all names must be strings(dur) _test = BlockDef('test', {TYPE: UInt8, NAME: None}) _error_test_fail('name_test2') except SanitizationError: _error_test_pass('name_test2') try: # names cannot be descriptor keywords _test = BlockDef(NAME, UInt8('test')) _error_test_fail('name_test3') except SanitizationError: _error_test_pass('name_test3') try: # names cannot be empty strings _test = BlockDef('', UInt8('test')) _error_test_fail('name_test4') except SanitizationError: _error_test_pass('name_test4')
Pad(2), Float("float"), # auto aligns to 16 Double("double"), # auto aligns to 24 StrRawUtf8("str1", SIZE=3), # auto aligns to 32 StrRawUtf16("str2", SIZE=10), # auto aligns to 36 StrRawUtf32("str3", SIZE=4), # auto aligns to 48 Pad(1), Pointer64("pointer1"), # auto aligns to 56 Pad(1), Pointer32("pointer2"), # auto aligns to 68 Pad(1)) # a definition to test automatic structure alignment auto_align_test_def = BlockDef( 'auto_align_test', align_test_struct, # size should pad to 80 bytes when auto alignment happens align_mode=ALIGN_AUTO) no_align_test_def = BlockDef( 'no_align_test', align_test_struct, # size should pad to 54 bytes align_mode=ALIGN_NONE) def auto_align_test(): # test the align test and make sure the automatic alignment works if auto_align_test_def.descriptor[0][SIZE] == 80: print("Passed 'auto_align' test.") pass_fail['pass'] += 1
"lightmap", ), UInt8("tex_page_index"), # page the fullsize image is stored in. # mipmaps are always stored in primary. UEnum32("format", *pixelformat_typecodes) ) s_tag_d3d_texture = Struct("s_tag_d3d_texture", Struct("primary_page_data", INCLUDE=rawdata_ref_struct), Struct("secondary_page_data", INCLUDE=rawdata_ref_struct), Struct("texture", INCLUDE=d3d_texture) ) s_tag_d3d_texture_interleaved = Struct("s_tag_d3d_texture_interleaved", Struct("primary_page_data", INCLUDE=rawdata_ref_struct), Struct("secondary_page_data", INCLUDE=rawdata_ref_struct), Struct("texture0", INCLUDE=d3d_texture), Struct("texture1", INCLUDE=d3d_texture), ) s_tag_d3d_texture_def = BlockDef( s_tag_d3d_texture, endian=">" ) s_tag_d3d_texture_interleaved_def = BlockDef( s_tag_d3d_texture_interleaved, endian=">" )
jfif_image = Container('jfif_image', BytesRaw('image_start', # marks the start of a jfif image SIZE=2, DEFAULT=SOI), WhileArray('jfif_streams', CASE=has_next_jfif_stream, SUB_STRUCT=jfif_stream), BytesRaw('image_end', SIZE=2, DEFAULT=EOI) ) thumb_stream_header = QuickStruct('header', UInt32('header_len', DEFAULT=12), UInt32('unknown'), # seems to always be 1 UInt32('stream_len'), ) thumb_stream_def = BlockDef('thumb_stream', thumb_stream_header, jfif_image ) # a variant structure which doesnt attempt to parse the # jfif image stream, but rather just treats it as a # bytes object with a length defined in the header. fast_thumb_stream_def = BlockDef('fast_thumb_stream', thumb_stream_header, BytesRaw('data_stream', SIZE='.header.stream_len') ) # The directory in a thumbnails file seems to consist of a # specific pattern of directory entries. This is the pattern: # Root Entry # 1 # Catalog
from reclaimer.hek.defs.mod2 import mod2_def from reclaimer.hek.defs.mod2 import part as part_desc from supyr_struct.defs.block_def import BlockDef part_def = BlockDef(part_desc, endian=">") import sys import copy import time mod2_ext = ".gbxmodel" # Returns a list of indices for each shader. If there is more than one entry for the same shader # the list entries corresponding to the duplicates will contain the index of the first occurrence. def ListShaderIds(shaders_block): shaders = shaders_block.STEPTREE shader_count = shaders_block.size shader_ids = [0] * shader_count #preallocate list for i in range(shader_count): shader_ids[i] = i for j in range(i): if (shaders[i].shader.filepath == shaders[j].shader.filepath and shaders[i].shader.tag_class == shaders[j].shader.tag_class and shaders[i].permutation_index == shaders[j].permutation_index): shader_ids[i] = j break return shader_ids # Returns a condensed shader block and a list for use when translating # the shader indices in the tag to match the new block def BuildCondensedShaderBlock(shaders_block):