def test_decode(self): """Ensures that a simple model can be decoded successfully from JSON.""" # Arrange v = '{"asset": {"version": "2.1"}, "buffers": [{ "uri": "triangle.bin", "byteLength": 44 }]}' # Act model = GLTFModel.from_json(v) # Assert self.assertEqual(model, GLTFModel(asset=Asset(version='2.1'), buffers=[Buffer(uri='triangle.bin', byteLength=44)]))
def test_to_json_removes_properties_set_to_None(self): """ Ensures that any properties in the model that are set to None are deleted when encoding the model to JSON. """ # Arrange model = GLTFModel(asset=Asset(generator=None, minVersion=None), buffers=None) # Act v = model.to_json() # Assert data = json.loads(v) self.assertDictEqual(data, {'asset': {'version': '2.0'}})
def test_to_json_retains_empty_strings_lists_and_dicts(self): """ Ensures that any properties in the model that are set to an empty string, list, or dictionary are retained when encoding the model to JSON. """ # Arrange model = GLTFModel(asset=Asset(generator="", minVersion=None), buffers=[], extensions={}) # Act v = model.to_json() # Assert data = json.loads(v) self.assertDictEqual(data, {'asset': {'version': '2.0', 'generator': ''}, 'buffers': [], 'extensions': {}})
def test_to_json_removes_empty_properties(self): """ Ensures that any properties in the model that are "empty" (empty strings, lists, etc.) are deleted when encoding the model to JSON. """ # Arrange model = GLTFModel(asset=Asset(generator='', minVersion=None), buffers=[]) # Act v = model.to_json() # Assert data = json.loads(v) self.assertDictEqual(data, {'asset': {'version': '2.0'}})
def __init__(self): _resources = [] _model = GLTFModel( asset=Asset(version="2.0"), scenes=[], nodes=[], buffers=[], bufferViews=[], cameras=[], images=[], materials=[], meshes=[], samplers=[Sampler()], skins=[], textures=[], extensionsRequired=[], extensionsUsed=[], accessors=[], ) GLTF0.__init__(self, _model, _resources) self.md52PV = {} self.fn2node = {} self.fn2texture_index = {} self.cacheMaterial = {}
def test_asset_version(self): """Ensures asset version is retained if a value is passed in""" # Act model = GLTFModel(asset=Asset(version='2.1')) # Assert self.assertEqual(model.asset.version, '2.1')
def test_asset_version_default(self): """Ensures asset version is initialized as 2.0 if not passed in""" # Act model = GLTFModel(asset=Asset()) # Assert self.assertEqual(model.asset.version, '2.0')
def test_init(self): """ Basic test ensuring the successful initialization of a GLTF2 model when all required properties are passed in. Note the only required property in a GLTF2 model is the asset. """ # Act model = GLTFModel(asset=Asset()) # Assert self.assertIsInstance(model, GLTFModel)
def test_decode_missing_required_property(self): """ Ensures that an error is raised when decoding a model from JSON if any required properties are missing. In this case, the version property on the asset is missing. """ # Arrange v = '{}' # Act/Assert with self.assertRaisesRegex(TypeError, 'version.*Asset'): _ = GLTFModel.from_json(v)
def test_decode_missing_required_property(self): """ Ensures that a warning is emitted when decoding a model from JSON if any required properties are missing. In this case, the "asset" property on the model is missing. """ # Arrange v = '{}' # Act/Assert with self.assertWarnsRegex(RuntimeWarning, "non-optional type asset"): _ = GLTFModel.from_json(v)
def _convert_animations(rsm_version: int, frame_rate_per_second: Optional[float], nodes: List[AbstractNode], gltf_model: GLTFModel) -> List[FileResource]: gltf_resources = [] if frame_rate_per_second: delay_between_frames = 1.0 / frame_rate_per_second else: delay_between_frames = 1.0 / 1000.0 model_anim = Animation(name="animation", samplers=[], channels=[]) input_buffer_id = None input_stream = io.BytesIO() rot_buffer_id = None rot_output_stream = io.BytesIO() scale_buffer_id = None scale_output_stream = io.BytesIO() pos_buffer_id = None pos_output_stream = io.BytesIO() for node_id, node in enumerate(nodes): rsm_node = node.impl node_name = decode_string(rsm_node.name) # Rotation rotation_frame_count = len(rsm_node.rot_key_frames) if rotation_frame_count > 0: if input_buffer_id is None: input_buffer_id = len(gltf_model.buffers) gltf_model.buffers.append(Buffer(byteLength=0)) if rot_buffer_id is None: rot_buffer_id = len(gltf_model.buffers) gltf_model.buffers.append(Buffer(byteLength=0)) input_values = [ delay_between_frames * rot_frame.frame_id for rot_frame in rsm_node.rot_key_frames ] input_view_offset = input_stream.tell() input_written = serialize_floats(input_values, input_stream) output_view_offset = rot_output_stream.tell() output_written = 0 for frame in rsm_node.rot_key_frames: if rsm_version < 0x200: gltf_quat = [ frame.quaternion[0], frame.quaternion[2], frame.quaternion[1], frame.quaternion[3], ] else: gltf_quat = frame.quaternion for value in gltf_quat: output_written += rot_output_stream.write( struct.pack('f', value)) curr_buffer_view_id = len(gltf_model.bufferViews) gltf_model.bufferViews += [ BufferView(buffer=input_buffer_id, byteOffset=input_view_offset, byteLength=input_written), BufferView(buffer=rot_buffer_id, byteOffset=output_view_offset, byteLength=output_written) ] curr_accessor_id = len(gltf_model.accessors) gltf_model.accessors += [ Accessor(bufferView=curr_buffer_view_id, byteOffset=0, componentType=ComponentType.FLOAT.value, count=rotation_frame_count, type=AccessorType.SCALAR.value, min=[min(input_values)], max=[max(input_values)]), Accessor(bufferView=curr_buffer_view_id + 1, byteOffset=0, componentType=ComponentType.FLOAT.value, count=rotation_frame_count, type=AccessorType.VEC4.value) ] rot_sampler = AnimationSampler(input=curr_accessor_id, output=curr_accessor_id + 1) sampler_id = len(model_anim.samplers) rot_channel = Channel(sampler=sampler_id, target=Target(path="rotation", node=node_id)) model_anim.samplers.append(rot_sampler) model_anim.channels.append(rot_channel) # Scale if rsm_version >= 0x106: scale_frame_count = len(rsm_node.scale_key_frames) if scale_frame_count > 0: if input_buffer_id is None: input_buffer_id = len(gltf_model.buffers) gltf_model.buffers.append(Buffer(byteLength=0)) if scale_buffer_id is None: scale_buffer_id = len(gltf_model.buffers) gltf_model.buffers.append(Buffer(byteLength=0)) input_values = [ delay_between_frames * scale_frame.frame_id for scale_frame in rsm_node.scale_key_frames ] input_view_offset = input_stream.tell() input_written = serialize_floats(input_values, input_stream) output_view_offset = scale_output_stream.tell() output_written = 0 for frame in rsm_node.scale_key_frames: for value in frame.scale: output_written += scale_output_stream.write( struct.pack('f', value)) curr_buffer_view_id = len(gltf_model.bufferViews) gltf_model.bufferViews += [ BufferView(buffer=input_buffer_id, byteOffset=input_view_offset, byteLength=input_written), BufferView(buffer=scale_buffer_id, byteOffset=output_view_offset, byteLength=output_written) ] curr_accessor_id = len(gltf_model.accessors) gltf_model.accessors += [ Accessor(bufferView=curr_buffer_view_id, byteOffset=0, componentType=ComponentType.FLOAT.value, count=scale_frame_count, type=AccessorType.SCALAR.value, min=[min(input_values)], max=[max(input_values)]), Accessor(bufferView=curr_buffer_view_id + 1, byteOffset=0, componentType=ComponentType.FLOAT.value, count=scale_frame_count, type=AccessorType.VEC3.value) ] scale_sampler = AnimationSampler(input=curr_accessor_id, output=curr_accessor_id + 1) sampler_id = len(model_anim.samplers) scale_channel = Channel(sampler=sampler_id, target=Target(path="scale", node=node_id)) model_anim.samplers.append(scale_sampler) model_anim.channels.append(scale_channel) # Translation if rsm_version >= 0x203: translation_frame_count = len(rsm_node.pos_key_frames) if translation_frame_count > 0: if input_buffer_id is None: input_buffer_id = len(gltf_model.buffers) gltf_model.buffers.append(Buffer(byteLength=0)) if pos_buffer_id is None: pos_buffer_id = len(gltf_model.buffers) gltf_model.buffers.append(Buffer(byteLength=0)) input_values = [ delay_between_frames * pos_frame.frame_id for pos_frame in rsm_node.pos_key_frames ] input_view_offset = input_stream.tell() input_written = serialize_floats(input_values, input_stream) output_view_offset = pos_output_stream.tell() output_written = 0 for frame in rsm_node.pos_key_frames: for value in frame.position: output_written += pos_output_stream.write( struct.pack('f', value)) curr_buffer_view_id = len(gltf_model.bufferViews) gltf_model.bufferViews += [ BufferView(buffer=input_buffer_id, byteOffset=input_view_offset, byteLength=input_written), BufferView(buffer=pos_buffer_id, byteOffset=output_view_offset, byteLength=output_written) ] curr_accessor_id = len(gltf_model.accessors) gltf_model.accessors += [ Accessor(bufferView=curr_buffer_view_id, byteOffset=0, componentType=ComponentType.FLOAT.value, count=translation_frame_count, type=AccessorType.SCALAR.value, min=[min(input_values)], max=[max(input_values)]), Accessor(bufferView=curr_buffer_view_id + 1, byteOffset=0, componentType=ComponentType.FLOAT.value, count=translation_frame_count, type=AccessorType.VEC3.value) ] pos_sampler = AnimationSampler(input=curr_accessor_id, output=curr_accessor_id + 1) sampler_id = len(model_anim.samplers) pos_channel = Channel(sampler=sampler_id, target=Target(path="translation", node=node_id)) model_anim.samplers.append(pos_sampler) model_anim.channels.append(pos_channel) if input_buffer_id: # Add input data input_stream.seek(0) input_data = input_stream.read() input_file_name = "anim_in.bin" gltf_resources.append(FileResource(input_file_name, data=input_data)) gltf_model.buffers[input_buffer_id].uri = input_file_name gltf_model.buffers[input_buffer_id].byteLength = len(input_data) # Add rotation data if rot_buffer_id: rot_output_stream.seek(0) rot_data = rot_output_stream.read() rot_file_name = 'anim_rot.bin' gltf_resources.append(FileResource(rot_file_name, data=rot_data)) gltf_model.buffers[rot_buffer_id].uri = rot_file_name gltf_model.buffers[rot_buffer_id].byteLength = len(rot_data) # Add scale data if scale_buffer_id: scale_output_stream.seek(0) scale_data = scale_output_stream.read() scale_file_name = 'anim_scale.bin' gltf_resources.append( FileResource(scale_file_name, data=scale_data)) gltf_model.buffers[scale_buffer_id].uri = scale_file_name gltf_model.buffers[scale_buffer_id].byteLength = len(scale_data) # Add tanslation data if pos_buffer_id: pos_output_stream.seek(0) pos_data = pos_output_stream.read() pos_file_name = 'anim_pos.bin' gltf_resources.append(FileResource(pos_file_name, data=pos_data)) gltf_model.buffers[pos_buffer_id].uri = pos_file_name gltf_model.buffers[pos_buffer_id].byteLength = len(pos_data) gltf_model.animations = [model_anim] return gltf_resources
def convert_rsm(rsm_file: str, data_folder: str = "data", glb: bool = False) -> None: """ Converts an RSM file to glTF 2.0 Parameters ---------- rsm_file : string Path to the RSM file to convert data_folder : string Path to the data folder containing texture files glb : boolean Export as GLB (single binary file) """ logging.basicConfig(level=logging.INFO) _LOGGER.info(f"Converting RSM file '{rsm_file}'") rsm_file_path = Path(rsm_file) try: rsm_obj = _parse_rsm_file(rsm_file_path) except FileNotFoundError: _LOGGER.error(f"'{rsm_file_path}' isn't a file or doesn't exist") sys.exit(1) except ValidationNotEqualError as ex: _LOGGER.error(f"Invalid RSM file: {ex}") sys.exit(1) gltf_model = GLTFModel( asset=Asset(version='2.0', generator="rag2gltf"), samplers=[ Sampler( magFilter=9729, # LINEAR minFilter=9987, # LINEAR_MIPMAP_LINEAR wrapS=33071, # CLAMP_TO_EDGE wrapT=33071 # CLAMP_TO_EDGE ) ], nodes=[], meshes=[], buffers=[], bufferViews=[], accessors=[], images=[], textures=[], materials=[]) gltf_resources: List[FileResource] = [] _LOGGER.info("Converting textures ...") try: resources, tex_id_by_node = _convert_textures(rsm_obj, Path(data_folder), gltf_model) except FileNotFoundError as ex: _LOGGER.error(f"Cannot find texture file: {ex}") sys.exit(1) gltf_resources += resources _LOGGER.info("Converting 3D model ...") nodes = extract_nodes(rsm_obj) resources, root_nodes = _convert_nodes(rsm_obj.version, nodes, tex_id_by_node, gltf_model) gltf_model.scenes = [Scene(nodes=root_nodes)] gltf_resources += resources # Convert animations if rsm_obj.version >= 0x202: fps = rsm_obj.frame_rate_per_second else: fps = None _LOGGER.info("Converting animations ...") resources = _convert_animations(rsm_obj.version, fps, nodes, gltf_model) gltf_resources += resources if glb: destination_path = rsm_file_path.with_suffix(".glb").name else: destination_path = rsm_file_path.with_suffix(".gltf").name gltf = GLTF(model=gltf_model, resources=gltf_resources) gltf.export(destination_path) _LOGGER.info(f"Converted model has been saved as '{destination_path}'") sys.exit()
def _convert_nodes( rsm_version: int, nodes: List[AbstractNode], tex_id_by_node: List[List[int]], gltf_model: GLTFModel) -> Tuple[List[FileResource], List[int]]: root_nodes = [] model_bbox = calculate_model_bounding_box(rsm_version, nodes) for node in nodes: if node.parent is None: _compute_transform_matrices(rsm_version, node, len(nodes) == 0, model_bbox) nodes_children: Dict[str, List[int]] = {} vertex_bytearray = bytearray() tex_vertex_bytearray = bytearray() byteoffset = 0 tex_byteoffset = 0 for node_id, node in enumerate(nodes): rsm_node = node.impl node_name = decode_string(rsm_node.name) if node.parent is not None: parent_name = decode_string(node.parent.impl.name) if parent_name in nodes_children: nodes_children[parent_name] += [node_id] else: nodes_children[parent_name] = [node_id] if rsm_version >= 0x203: node_tex_ids = tex_id_by_node[node_id] else: node_tex_ids = rsm_node.texture_ids vertices_by_texture = _sort_vertices_by_texture(rsm_node, node_tex_ids) gltf_primitives = [] for tex_id, vertices in vertices_by_texture.items(): # Model vertices bytelen = _serialize_vertices(vertices[0], vertex_bytearray) # Texture vertices tex_bytelen = _serialize_vertices(vertices[1], tex_vertex_bytearray) (mins, maxs) = _calculate_vertices_bounds(vertices[0]) (tex_mins, tex_maxs) = _calculate_vertices_bounds(vertices[1]) gltf_model.bufferViews.append( BufferView( buffer=0, # Vertices byteOffset=byteoffset, byteLength=bytelen, target=BufferTarget.ARRAY_BUFFER.value)) gltf_model.bufferViews.append( BufferView( buffer=1, # Texture vertices byteOffset=tex_byteoffset, byteLength=tex_bytelen, target=BufferTarget.ARRAY_BUFFER.value)) buffer_view_id = len(gltf_model.accessors) gltf_model.accessors.append( Accessor(bufferView=buffer_view_id, byteOffset=0, componentType=ComponentType.FLOAT.value, count=len(vertices[0]), type=AccessorType.VEC3.value, min=mins, max=maxs)) gltf_model.accessors.append( Accessor(bufferView=buffer_view_id + 1, byteOffset=0, componentType=ComponentType.FLOAT.value, count=len(vertices[1]), type=AccessorType.VEC2.value, min=tex_mins, max=tex_maxs)) gltf_primitives.append( Primitive(attributes=Attributes(POSITION=buffer_view_id + 0, TEXCOORD_0=buffer_view_id + 1), material=tex_id)) byteoffset += bytelen tex_byteoffset += tex_bytelen gltf_model.meshes.append(Mesh(primitives=gltf_primitives)) # Decompose matrix to TRS translation, rotation, scale = decompose_matrix( node.gltf_transform_matrix) gltf_model.nodes.append( Node(name=node_name, mesh=node_id, translation=translation.to_list() if translation else None, rotation=rotation.to_list() if rotation else None, scale=scale.to_list() if scale else None)) if node.parent is None: root_nodes.append(node_id) # Register vertex buffers vtx_file_name = 'vertices.bin' tex_vtx_file_name = 'tex_vertices.bin' gltf_resources = [ FileResource(vtx_file_name, data=vertex_bytearray), FileResource(tex_vtx_file_name, data=tex_vertex_bytearray) ] gltf_model.buffers = [ Buffer(byteLength=byteoffset, uri=vtx_file_name), Buffer(byteLength=tex_byteoffset, uri=tex_vtx_file_name) ] # Update nodes' children # Note(LinkZ): Consume children with `pop` to avoid issues with models # containing multiple nodes with the same name for gltf_node in gltf_model.nodes: gltf_node.children = nodes_children.pop(gltf_node.name, None) return gltf_resources, root_nodes
def export_gltf(icon, filename, metadata=None): basename = PurePath(filename).stem vertex_info_format = ("3f" * icon.animation_shapes) + "3f 2f 3f" float_size = struct.calcsize("f") animation_speed = 0.1 animation_present = icon.animation_shapes > 1 model_data = bytearray() mins = {} maxs = {} for i, vertex in enumerate(icon.vertices): for j, position in enumerate(vertex.positions): if j == 0: values_basis = [ position.x / 4096, -position.y / 4096, -position.z / 4096 ] values = values_basis else: # Subtract basis position to compensate for shape keys being relative to basis values = [ position.x / 4096 - values_basis[0], -position.y / 4096 - values_basis[1], -position.z / 4096 - values_basis[2] ] if j not in mins: mins[j] = values.copy() else: if values[0] < mins[j][0]: mins[j][0] = values[0] if values[1] < mins[j][1]: mins[j][1] = values[1] if values[2] < mins[j][2]: mins[j][2] = values[2] if j not in maxs: maxs[j] = values.copy() else: if values[0] > maxs[j][0]: maxs[j][0] = values[0] if values[1] > maxs[j][1]: maxs[j][1] = values[1] if values[2] > maxs[j][2]: maxs[j][2] = values[2] model_data.extend(struct.pack("3f", *values)) model_data.extend( struct.pack("3f 2f 3f", vertex.normal.x / 4096, -vertex.normal.y / 4096, -vertex.normal.z / 4096, 1.0 - (vertex.tex_coord.u / 4096), 1.0 - (vertex.tex_coord.v / 4096), vertex.color.r / 255, vertex.color.g / 255, vertex.color.b / 255)) # Generate animation data if multiple animation shapes are present if animation_present: animation_offset = len(model_data) for i in range(icon.frame_count + 1): model_data.extend(struct.pack("f", i * animation_speed)) for i, frame in enumerate(icon.frames + [icon.frames[0]]): segment = [struct.pack("f", 0.0)] * (icon.animation_shapes - 1) if frame.shape_id != 0: segment[frame.shape_id - 1] = struct.pack("f", 1.0) for item in segment: model_data.extend(item) animation_length = len(model_data) - animation_offset # Generate texture if isinstance(icon.texture, Ps2ico.CompressedTexture): image_data = convert_compressed_texture_data(icon.texture.size, icon.texture.data) elif isinstance(icon.texture, Ps2ico.UncompressedTexture): image_data = convert_uncompressed_texture_data(icon.texture.data) with BytesIO() as png: PILImage.frombytes("RGB", (128, 128), image_data).save(png, "png") texture_data = png.getvalue() # Basic glTF info model = GLTFModel() model.asset = Asset(version="2.0", generator=f"ico2gltf v{VERSION}") model.scenes = [Scene(nodes=[0])] model.scene = 0 model.nodes = [Node(mesh=0)] # If present, embed metadata if metadata: # Normalize title: turn japanese full-width characters into normal ones and insert the line break title = unicodedata.normalize("NFKC", metadata.title).rstrip("\x00") title = title[:metadata.offset_2nd_line // 2] + "\n" + title[metadata.offset_2nd_line // 2:] model.extras = { "title": title, "background_opacity": metadata.bg_opacity / 0x80, "background_bottom_left_color": [ metadata.bg_color_lowerleft.r / 0x80, metadata.bg_color_lowerleft.g / 0x80, metadata.bg_color_lowerleft.b / 0x80, metadata.bg_color_lowerleft.a / 0x80 ], "background_bottom_right_color": [ metadata.bg_color_lowerright.r / 0x80, metadata.bg_color_lowerright.g / 0x80, metadata.bg_color_lowerright.b / 0x80, metadata.bg_color_lowerright.a / 0x80 ], "background_top_left_color": [ metadata.bg_color_upperleft.r / 0x80, metadata.bg_color_upperleft.g / 0x80, metadata.bg_color_upperleft.b / 0x80, metadata.bg_color_upperleft.a / 0x80 ], "background_top_right_color": [ metadata.bg_color_upperright.r / 0x80, metadata.bg_color_upperright.g / 0x80, metadata.bg_color_upperright.b / 0x80, metadata.bg_color_lowerright.a / 0x80 ], "ambient_color": [ metadata.light_ambient_color.r, metadata.light_ambient_color.g, metadata.light_ambient_color.b ], "light1_direction": [ metadata.light1_direction.x, metadata.light1_direction.y, metadata.light1_direction.z ], "light1_color": [ metadata.light1_color.r, metadata.light1_color.g, metadata.light1_color.b, metadata.light1_color.a ], "light2_direction": [ metadata.light2_direction.x, metadata.light2_direction.y, metadata.light2_direction.z ], "light2_color": [ metadata.light2_color.r, metadata.light2_color.g, metadata.light2_color.b, metadata.light2_color.a ], "light3_direction": [ metadata.light3_direction.x, metadata.light3_direction.y, metadata.light3_direction.z ], "light3_color": [ metadata.light3_color.r, metadata.light3_color.g, metadata.light3_color.b, metadata.light3_color.a ], } # Meshes primitive = Primitive(attributes=Attributes( POSITION=0, NORMAL=icon.animation_shapes, TEXCOORD_0=icon.animation_shapes + 1, COLOR_0=icon.animation_shapes + 2), material=0) if animation_present: primitive.targets = [{ "POSITION": i + 1 } for i in range(icon.animation_shapes - 1)] model.meshes = [Mesh(name="Icon", primitives=[primitive])] # Buffers model.buffers = [ Buffer(uri=f"{basename}.bin", byteLength=len(model_data)), Buffer(uri=f"{basename}.png", byteLength=len(texture_data)) ] # Materials model.images = [Image(bufferView=1, mimeType="image/png")] model.textures = [Texture(source=0)] model.materials = [ Material(name="Material", pbrMetallicRoughness=PBRMetallicRoughness( baseColorTexture=TextureInfo(index=0), roughnessFactor=1, metallicFactor=0)) ] # Animations if animation_present: model.animations = [ Animation(name="Default", samplers=[ AnimationSampler( input=icon.animation_shapes + 3, output=icon.animation_shapes + 4, interpolation=Interpolation.LINEAR.value) ], channels=[ Channel(sampler=0, target=Target(node=0, path="weights")) ]), ] # Buffer Views model.bufferViews = [ BufferView(name="Data", buffer=0, byteStride=struct.calcsize(vertex_info_format), byteLength=len(model_data)), BufferView(name="Texture", buffer=1, byteLength=len(texture_data)), ] if animation_present: model.bufferViews.append( BufferView(name="Animation", buffer=0, byteOffset=animation_offset, byteLength=animation_length), ) # Accessors model.accessors = [ Accessor(name=f"Position {i}", bufferView=0, byteOffset=i * 3 * float_size, min=mins[i], max=maxs[i], count=len(icon.vertices), componentType=ComponentType.FLOAT.value, type=AccessorType.VEC3.value) for i in range(icon.animation_shapes) ] model.accessors.extend([ Accessor(name="Normal", bufferView=0, byteOffset=((icon.animation_shapes - 1) * 3 * float_size) + 3 * float_size, count=len(icon.vertices), componentType=ComponentType.FLOAT.value, type=AccessorType.VEC3.value), Accessor(name="UV", bufferView=0, byteOffset=((icon.animation_shapes - 1) * 3 * float_size) + 6 * float_size, count=len(icon.vertices), componentType=ComponentType.FLOAT.value, type=AccessorType.VEC2.value), Accessor(name="Color", bufferView=0, byteOffset=((icon.animation_shapes - 1) * 3 * float_size) + 8 * float_size, count=len(icon.vertices), componentType=ComponentType.FLOAT.value, type=AccessorType.VEC3.value), ]) if animation_present: model.accessors.extend([ Accessor(name="Animation Time", bufferView=2, byteOffset=0, min=[0.0], max=[(icon.frame_count) * animation_speed], count=(icon.frame_count + 1), componentType=ComponentType.FLOAT.value, type=AccessorType.SCALAR.value), Accessor(name="Animation Data", bufferView=2, byteOffset=(icon.frame_count + 1) * float_size, min=[0.0], max=[1.0], count=(icon.frame_count + 1) * (icon.animation_shapes - 1), componentType=ComponentType.FLOAT.value, type=AccessorType.SCALAR.value) ]) resources = [ FileResource(f"{basename}.bin", data=model_data), FileResource(f"{basename}.png", data=texture_data) ] gltf = GLTF(model=model, resources=resources) gltf.export(filename)
def test_init_missing_property(self): """Ensures model initialization results in error if a required property is missing""" # Act/Assert with self.assertRaisesRegex(TypeError, 'asset'): _ = GLTFModel()