Beispiel #1
0
def _convert_animations(rsm_version: int,
                        frame_rate_per_second: Optional[float],
                        nodes: List[AbstractNode],
                        gltf_model: GLTFModel) -> List[FileResource]:
    gltf_resources = []

    if frame_rate_per_second:
        delay_between_frames = 1.0 / frame_rate_per_second
    else:
        delay_between_frames = 1.0 / 1000.0

    model_anim = Animation(name="animation", samplers=[], channels=[])
    input_buffer_id = None
    input_stream = io.BytesIO()
    rot_buffer_id = None
    rot_output_stream = io.BytesIO()
    scale_buffer_id = None
    scale_output_stream = io.BytesIO()
    pos_buffer_id = None
    pos_output_stream = io.BytesIO()

    for node_id, node in enumerate(nodes):
        rsm_node = node.impl
        node_name = decode_string(rsm_node.name)

        # Rotation
        rotation_frame_count = len(rsm_node.rot_key_frames)
        if rotation_frame_count > 0:
            if input_buffer_id is None:
                input_buffer_id = len(gltf_model.buffers)
                gltf_model.buffers.append(Buffer(byteLength=0))
            if rot_buffer_id is None:
                rot_buffer_id = len(gltf_model.buffers)
                gltf_model.buffers.append(Buffer(byteLength=0))

            input_values = [
                delay_between_frames * rot_frame.frame_id
                for rot_frame in rsm_node.rot_key_frames
            ]

            input_view_offset = input_stream.tell()
            input_written = serialize_floats(input_values, input_stream)
            output_view_offset = rot_output_stream.tell()
            output_written = 0
            for frame in rsm_node.rot_key_frames:
                if rsm_version < 0x200:
                    gltf_quat = [
                        frame.quaternion[0],
                        frame.quaternion[2],
                        frame.quaternion[1],
                        frame.quaternion[3],
                    ]
                else:
                    gltf_quat = frame.quaternion
                for value in gltf_quat:
                    output_written += rot_output_stream.write(
                        struct.pack('f', value))

            curr_buffer_view_id = len(gltf_model.bufferViews)
            gltf_model.bufferViews += [
                BufferView(buffer=input_buffer_id,
                           byteOffset=input_view_offset,
                           byteLength=input_written),
                BufferView(buffer=rot_buffer_id,
                           byteOffset=output_view_offset,
                           byteLength=output_written)
            ]
            curr_accessor_id = len(gltf_model.accessors)
            gltf_model.accessors += [
                Accessor(bufferView=curr_buffer_view_id,
                         byteOffset=0,
                         componentType=ComponentType.FLOAT.value,
                         count=rotation_frame_count,
                         type=AccessorType.SCALAR.value,
                         min=[min(input_values)],
                         max=[max(input_values)]),
                Accessor(bufferView=curr_buffer_view_id + 1,
                         byteOffset=0,
                         componentType=ComponentType.FLOAT.value,
                         count=rotation_frame_count,
                         type=AccessorType.VEC4.value)
            ]

            rot_sampler = AnimationSampler(input=curr_accessor_id,
                                           output=curr_accessor_id + 1)
            sampler_id = len(model_anim.samplers)
            rot_channel = Channel(sampler=sampler_id,
                                  target=Target(path="rotation", node=node_id))

            model_anim.samplers.append(rot_sampler)
            model_anim.channels.append(rot_channel)

        # Scale
        if rsm_version >= 0x106:
            scale_frame_count = len(rsm_node.scale_key_frames)
            if scale_frame_count > 0:
                if input_buffer_id is None:
                    input_buffer_id = len(gltf_model.buffers)
                    gltf_model.buffers.append(Buffer(byteLength=0))
                if scale_buffer_id is None:
                    scale_buffer_id = len(gltf_model.buffers)
                    gltf_model.buffers.append(Buffer(byteLength=0))

                input_values = [
                    delay_between_frames * scale_frame.frame_id
                    for scale_frame in rsm_node.scale_key_frames
                ]

                input_view_offset = input_stream.tell()
                input_written = serialize_floats(input_values, input_stream)
                output_view_offset = scale_output_stream.tell()
                output_written = 0
                for frame in rsm_node.scale_key_frames:
                    for value in frame.scale:
                        output_written += scale_output_stream.write(
                            struct.pack('f', value))

                curr_buffer_view_id = len(gltf_model.bufferViews)
                gltf_model.bufferViews += [
                    BufferView(buffer=input_buffer_id,
                               byteOffset=input_view_offset,
                               byteLength=input_written),
                    BufferView(buffer=scale_buffer_id,
                               byteOffset=output_view_offset,
                               byteLength=output_written)
                ]
                curr_accessor_id = len(gltf_model.accessors)
                gltf_model.accessors += [
                    Accessor(bufferView=curr_buffer_view_id,
                             byteOffset=0,
                             componentType=ComponentType.FLOAT.value,
                             count=scale_frame_count,
                             type=AccessorType.SCALAR.value,
                             min=[min(input_values)],
                             max=[max(input_values)]),
                    Accessor(bufferView=curr_buffer_view_id + 1,
                             byteOffset=0,
                             componentType=ComponentType.FLOAT.value,
                             count=scale_frame_count,
                             type=AccessorType.VEC3.value)
                ]

                scale_sampler = AnimationSampler(input=curr_accessor_id,
                                                 output=curr_accessor_id + 1)
                sampler_id = len(model_anim.samplers)
                scale_channel = Channel(sampler=sampler_id,
                                        target=Target(path="scale",
                                                      node=node_id))

                model_anim.samplers.append(scale_sampler)
                model_anim.channels.append(scale_channel)

        # Translation
        if rsm_version >= 0x203:
            translation_frame_count = len(rsm_node.pos_key_frames)
            if translation_frame_count > 0:
                if input_buffer_id is None:
                    input_buffer_id = len(gltf_model.buffers)
                    gltf_model.buffers.append(Buffer(byteLength=0))
                if pos_buffer_id is None:
                    pos_buffer_id = len(gltf_model.buffers)
                    gltf_model.buffers.append(Buffer(byteLength=0))

                input_values = [
                    delay_between_frames * pos_frame.frame_id
                    for pos_frame in rsm_node.pos_key_frames
                ]

                input_view_offset = input_stream.tell()
                input_written = serialize_floats(input_values, input_stream)
                output_view_offset = pos_output_stream.tell()
                output_written = 0
                for frame in rsm_node.pos_key_frames:
                    for value in frame.position:
                        output_written += pos_output_stream.write(
                            struct.pack('f', value))

                curr_buffer_view_id = len(gltf_model.bufferViews)
                gltf_model.bufferViews += [
                    BufferView(buffer=input_buffer_id,
                               byteOffset=input_view_offset,
                               byteLength=input_written),
                    BufferView(buffer=pos_buffer_id,
                               byteOffset=output_view_offset,
                               byteLength=output_written)
                ]
                curr_accessor_id = len(gltf_model.accessors)
                gltf_model.accessors += [
                    Accessor(bufferView=curr_buffer_view_id,
                             byteOffset=0,
                             componentType=ComponentType.FLOAT.value,
                             count=translation_frame_count,
                             type=AccessorType.SCALAR.value,
                             min=[min(input_values)],
                             max=[max(input_values)]),
                    Accessor(bufferView=curr_buffer_view_id + 1,
                             byteOffset=0,
                             componentType=ComponentType.FLOAT.value,
                             count=translation_frame_count,
                             type=AccessorType.VEC3.value)
                ]

                pos_sampler = AnimationSampler(input=curr_accessor_id,
                                               output=curr_accessor_id + 1)
                sampler_id = len(model_anim.samplers)
                pos_channel = Channel(sampler=sampler_id,
                                      target=Target(path="translation",
                                                    node=node_id))

                model_anim.samplers.append(pos_sampler)
                model_anim.channels.append(pos_channel)

    if input_buffer_id:
        # Add input data
        input_stream.seek(0)
        input_data = input_stream.read()
        input_file_name = "anim_in.bin"
        gltf_resources.append(FileResource(input_file_name, data=input_data))
        gltf_model.buffers[input_buffer_id].uri = input_file_name
        gltf_model.buffers[input_buffer_id].byteLength = len(input_data)

        # Add rotation data
        if rot_buffer_id:
            rot_output_stream.seek(0)
            rot_data = rot_output_stream.read()
            rot_file_name = 'anim_rot.bin'
            gltf_resources.append(FileResource(rot_file_name, data=rot_data))
            gltf_model.buffers[rot_buffer_id].uri = rot_file_name
            gltf_model.buffers[rot_buffer_id].byteLength = len(rot_data)

        # Add scale data
        if scale_buffer_id:
            scale_output_stream.seek(0)
            scale_data = scale_output_stream.read()
            scale_file_name = 'anim_scale.bin'
            gltf_resources.append(
                FileResource(scale_file_name, data=scale_data))
            gltf_model.buffers[scale_buffer_id].uri = scale_file_name
            gltf_model.buffers[scale_buffer_id].byteLength = len(scale_data)

        # Add tanslation data
        if pos_buffer_id:
            pos_output_stream.seek(0)
            pos_data = pos_output_stream.read()
            pos_file_name = 'anim_pos.bin'
            gltf_resources.append(FileResource(pos_file_name, data=pos_data))
            gltf_model.buffers[pos_buffer_id].uri = pos_file_name
            gltf_model.buffers[pos_buffer_id].byteLength = len(pos_data)

        gltf_model.animations = [model_anim]

    return gltf_resources
Beispiel #2
0
def export_gltf(icon, filename, metadata=None):
    basename = PurePath(filename).stem

    vertex_info_format = ("3f" * icon.animation_shapes) + "3f 2f 3f"
    float_size = struct.calcsize("f")
    animation_speed = 0.1

    animation_present = icon.animation_shapes > 1

    model_data = bytearray()

    mins = {}
    maxs = {}

    for i, vertex in enumerate(icon.vertices):
        for j, position in enumerate(vertex.positions):
            if j == 0:
                values_basis = [
                    position.x / 4096, -position.y / 4096, -position.z / 4096
                ]
                values = values_basis
            else:
                # Subtract basis position to compensate for shape keys being relative to basis
                values = [
                    position.x / 4096 - values_basis[0],
                    -position.y / 4096 - values_basis[1],
                    -position.z / 4096 - values_basis[2]
                ]

            if j not in mins:
                mins[j] = values.copy()
            else:
                if values[0] < mins[j][0]: mins[j][0] = values[0]
                if values[1] < mins[j][1]: mins[j][1] = values[1]
                if values[2] < mins[j][2]: mins[j][2] = values[2]

            if j not in maxs:
                maxs[j] = values.copy()
            else:
                if values[0] > maxs[j][0]: maxs[j][0] = values[0]
                if values[1] > maxs[j][1]: maxs[j][1] = values[1]
                if values[2] > maxs[j][2]: maxs[j][2] = values[2]

            model_data.extend(struct.pack("3f", *values))

        model_data.extend(
            struct.pack("3f 2f 3f", vertex.normal.x / 4096,
                        -vertex.normal.y / 4096, -vertex.normal.z / 4096,
                        1.0 - (vertex.tex_coord.u / 4096),
                        1.0 - (vertex.tex_coord.v / 4096),
                        vertex.color.r / 255, vertex.color.g / 255,
                        vertex.color.b / 255))

    # Generate animation data if multiple animation shapes are present

    if animation_present:
        animation_offset = len(model_data)

        for i in range(icon.frame_count + 1):
            model_data.extend(struct.pack("f", i * animation_speed))

        for i, frame in enumerate(icon.frames + [icon.frames[0]]):
            segment = [struct.pack("f", 0.0)] * (icon.animation_shapes - 1)

            if frame.shape_id != 0:
                segment[frame.shape_id - 1] = struct.pack("f", 1.0)

            for item in segment:
                model_data.extend(item)

        animation_length = len(model_data) - animation_offset

    # Generate texture

    if isinstance(icon.texture, Ps2ico.CompressedTexture):
        image_data = convert_compressed_texture_data(icon.texture.size,
                                                     icon.texture.data)
    elif isinstance(icon.texture, Ps2ico.UncompressedTexture):
        image_data = convert_uncompressed_texture_data(icon.texture.data)

    with BytesIO() as png:
        PILImage.frombytes("RGB", (128, 128), image_data).save(png, "png")
        texture_data = png.getvalue()

    # Basic glTF info

    model = GLTFModel()

    model.asset = Asset(version="2.0", generator=f"ico2gltf v{VERSION}")

    model.scenes = [Scene(nodes=[0])]

    model.scene = 0

    model.nodes = [Node(mesh=0)]

    # If present, embed metadata

    if metadata:
        # Normalize title: turn japanese full-width characters into normal ones and insert the line break
        title = unicodedata.normalize("NFKC", metadata.title).rstrip("\x00")
        title = title[:metadata.offset_2nd_line //
                      2] + "\n" + title[metadata.offset_2nd_line // 2:]

        model.extras = {
            "title":
            title,
            "background_opacity":
            metadata.bg_opacity / 0x80,
            "background_bottom_left_color": [
                metadata.bg_color_lowerleft.r / 0x80,
                metadata.bg_color_lowerleft.g / 0x80,
                metadata.bg_color_lowerleft.b / 0x80,
                metadata.bg_color_lowerleft.a / 0x80
            ],
            "background_bottom_right_color": [
                metadata.bg_color_lowerright.r / 0x80,
                metadata.bg_color_lowerright.g / 0x80,
                metadata.bg_color_lowerright.b / 0x80,
                metadata.bg_color_lowerright.a / 0x80
            ],
            "background_top_left_color": [
                metadata.bg_color_upperleft.r / 0x80,
                metadata.bg_color_upperleft.g / 0x80,
                metadata.bg_color_upperleft.b / 0x80,
                metadata.bg_color_upperleft.a / 0x80
            ],
            "background_top_right_color": [
                metadata.bg_color_upperright.r / 0x80,
                metadata.bg_color_upperright.g / 0x80,
                metadata.bg_color_upperright.b / 0x80,
                metadata.bg_color_lowerright.a / 0x80
            ],
            "ambient_color": [
                metadata.light_ambient_color.r, metadata.light_ambient_color.g,
                metadata.light_ambient_color.b
            ],
            "light1_direction": [
                metadata.light1_direction.x, metadata.light1_direction.y,
                metadata.light1_direction.z
            ],
            "light1_color": [
                metadata.light1_color.r, metadata.light1_color.g,
                metadata.light1_color.b, metadata.light1_color.a
            ],
            "light2_direction": [
                metadata.light2_direction.x, metadata.light2_direction.y,
                metadata.light2_direction.z
            ],
            "light2_color": [
                metadata.light2_color.r, metadata.light2_color.g,
                metadata.light2_color.b, metadata.light2_color.a
            ],
            "light3_direction": [
                metadata.light3_direction.x, metadata.light3_direction.y,
                metadata.light3_direction.z
            ],
            "light3_color": [
                metadata.light3_color.r, metadata.light3_color.g,
                metadata.light3_color.b, metadata.light3_color.a
            ],
        }

    # Meshes

    primitive = Primitive(attributes=Attributes(
        POSITION=0,
        NORMAL=icon.animation_shapes,
        TEXCOORD_0=icon.animation_shapes + 1,
        COLOR_0=icon.animation_shapes + 2),
                          material=0)

    if animation_present:
        primitive.targets = [{
            "POSITION": i + 1
        } for i in range(icon.animation_shapes - 1)]

    model.meshes = [Mesh(name="Icon", primitives=[primitive])]

    # Buffers

    model.buffers = [
        Buffer(uri=f"{basename}.bin", byteLength=len(model_data)),
        Buffer(uri=f"{basename}.png", byteLength=len(texture_data))
    ]

    # Materials

    model.images = [Image(bufferView=1, mimeType="image/png")]

    model.textures = [Texture(source=0)]

    model.materials = [
        Material(name="Material",
                 pbrMetallicRoughness=PBRMetallicRoughness(
                     baseColorTexture=TextureInfo(index=0),
                     roughnessFactor=1,
                     metallicFactor=0))
    ]

    # Animations

    if animation_present:
        model.animations = [
            Animation(name="Default",
                      samplers=[
                          AnimationSampler(
                              input=icon.animation_shapes + 3,
                              output=icon.animation_shapes + 4,
                              interpolation=Interpolation.LINEAR.value)
                      ],
                      channels=[
                          Channel(sampler=0,
                                  target=Target(node=0, path="weights"))
                      ]),
        ]

    # Buffer Views

    model.bufferViews = [
        BufferView(name="Data",
                   buffer=0,
                   byteStride=struct.calcsize(vertex_info_format),
                   byteLength=len(model_data)),
        BufferView(name="Texture", buffer=1, byteLength=len(texture_data)),
    ]

    if animation_present:
        model.bufferViews.append(
            BufferView(name="Animation",
                       buffer=0,
                       byteOffset=animation_offset,
                       byteLength=animation_length), )

    # Accessors

    model.accessors = [
        Accessor(name=f"Position {i}",
                 bufferView=0,
                 byteOffset=i * 3 * float_size,
                 min=mins[i],
                 max=maxs[i],
                 count=len(icon.vertices),
                 componentType=ComponentType.FLOAT.value,
                 type=AccessorType.VEC3.value)
        for i in range(icon.animation_shapes)
    ]

    model.accessors.extend([
        Accessor(name="Normal",
                 bufferView=0,
                 byteOffset=((icon.animation_shapes - 1) * 3 * float_size) +
                 3 * float_size,
                 count=len(icon.vertices),
                 componentType=ComponentType.FLOAT.value,
                 type=AccessorType.VEC3.value),
        Accessor(name="UV",
                 bufferView=0,
                 byteOffset=((icon.animation_shapes - 1) * 3 * float_size) +
                 6 * float_size,
                 count=len(icon.vertices),
                 componentType=ComponentType.FLOAT.value,
                 type=AccessorType.VEC2.value),
        Accessor(name="Color",
                 bufferView=0,
                 byteOffset=((icon.animation_shapes - 1) * 3 * float_size) +
                 8 * float_size,
                 count=len(icon.vertices),
                 componentType=ComponentType.FLOAT.value,
                 type=AccessorType.VEC3.value),
    ])

    if animation_present:
        model.accessors.extend([
            Accessor(name="Animation Time",
                     bufferView=2,
                     byteOffset=0,
                     min=[0.0],
                     max=[(icon.frame_count) * animation_speed],
                     count=(icon.frame_count + 1),
                     componentType=ComponentType.FLOAT.value,
                     type=AccessorType.SCALAR.value),
            Accessor(name="Animation Data",
                     bufferView=2,
                     byteOffset=(icon.frame_count + 1) * float_size,
                     min=[0.0],
                     max=[1.0],
                     count=(icon.frame_count + 1) *
                     (icon.animation_shapes - 1),
                     componentType=ComponentType.FLOAT.value,
                     type=AccessorType.SCALAR.value)
        ])

    resources = [
        FileResource(f"{basename}.bin", data=model_data),
        FileResource(f"{basename}.png", data=texture_data)
    ]

    gltf = GLTF(model=model, resources=resources)
    gltf.export(filename)