def gather_animation_sampler(channels: typing.Tuple[bpy.types.FCurve],
                             blender_object: bpy.types.Object,
                             bake_bone: typing.Union[str, None],
                             bake_channel: typing.Union[str, None],
                             bake_range_start,
                             bake_range_end,
                             action_name: str,
                             driver_obj,
                             export_settings
                             ) -> gltf2_io.AnimationSampler:

    blender_object_if_armature = blender_object if blender_object.type == "ARMATURE" else None
    if blender_object_if_armature is not None and driver_obj is None:
        if bake_bone is None:
            pose_bone_if_armature = gltf2_blender_get.get_object_from_datapath(blender_object_if_armature,
                                                                               channels[0].data_path)
        else:
            pose_bone_if_armature = blender_object.pose.bones[bake_bone]
    else:
        pose_bone_if_armature = None
    non_keyed_values = __gather_non_keyed_values(channels, blender_object,
                                                 blender_object_if_armature, pose_bone_if_armature,
                                                 bake_channel,
                                                 driver_obj,
                                                 export_settings)


    sampler = gltf2_io.AnimationSampler(
        extensions=__gather_extensions(channels, blender_object_if_armature, export_settings, bake_bone, bake_channel),
        extras=__gather_extras(channels, blender_object_if_armature, export_settings, bake_bone, bake_channel),
        input=__gather_input(channels, blender_object_if_armature, non_keyed_values,
                             bake_bone, bake_channel, bake_range_start, bake_range_end, action_name, driver_obj, export_settings),
        interpolation=__gather_interpolation(channels, blender_object_if_armature, export_settings, bake_bone, bake_channel),
        output=__gather_output(channels, blender_object.matrix_parent_inverse.copy().freeze(),
                               blender_object_if_armature,
                               non_keyed_values,
                               bake_bone,
                               bake_channel,
                               bake_range_start,
                               bake_range_end,
                               action_name,
                               driver_obj,
                               export_settings)
    )

    export_user_extensions('gather_animation_sampler_hook',
                            export_settings,
                            sampler,
                            channels,
                            blender_object,
                            bake_bone,
                            bake_channel,
                            bake_range_start,
                            bake_range_end,
                            action_name)

    return sampler
예제 #2
0
def gather_animation_sampler(channels: typing.Tuple[bpy.types.FCurve],
                             blender_object: bpy.types.Object,
                             export_settings
                             ) -> gltf2_io.AnimationSampler:
    return gltf2_io.AnimationSampler(
        extensions=__gather_extensions(channels, blender_object, export_settings),
        extras=__gather_extras(channels, blender_object, export_settings),
        input=__gather_input(channels, blender_object, export_settings),
        interpolation=__gather_interpolation(channels, blender_object, export_settings),
        output=__gather_output(channels, blender_object, export_settings)
    )
예제 #3
0
def gather_animation_sampler(action_group: bpy.types.ActionGroup,
                             blender_object: bpy.types.Object,
                             export_settings) -> gltf2_io.AnimationSampler:
    return gltf2_io.AnimationSampler(
        extensions=__gather_extensions(action_group, blender_object,
                                       export_settings),
        extras=__gather_extras(action_group, blender_object, export_settings),
        input=__gather_input(action_group, blender_object, export_settings),
        interpolation=__gather_interpolation(action_group, blender_object,
                                             export_settings),
        output=__gather_output(action_group, blender_object, export_settings))
예제 #4
0
def gather_animation_sampler(channels: typing.Tuple[bpy.types.FCurve],
                             blender_object: bpy.types.Object,
                             export_settings
                             ) -> gltf2_io.AnimationSampler:
    return gltf2_io.AnimationSampler(
        extensions=__gather_extensions(channels, blender_object, export_settings),
        extras=__gather_extras(channels, blender_object, export_settings),
        input=__gather_input(channels, export_settings),
        interpolation=__gather_interpolation(channels, blender_object, export_settings),
        output=__gather_output(channels, blender_object.matrix_parent_inverse.copy().freeze(),
                               blender_object if blender_object.type == "ARMATURE" else None,
                               export_settings)
    )
def __gather_animation(pre_anim, nodes, export_settings):
    anim_name, (frame_start, frame_end) = pre_anim
    print(f'exporting animation track {anim_name}')

    # Star all the tracks named anim_name. Objects without an anim_name
    # track are considered unanimated. Star the empty temp track for those.
    anim_nodes = []
    for node in nodes:
        if node.__blender_data[0] != 'OBJECT' and node.__blender_data[
                0] != 'BONE':
            continue
        ob = node.__blender_data[1]
        if not ob.animation_data:
            continue
        for track in ob.animation_data.nla_tracks:
            if track.name == anim_name:
                track.is_solo = True
                if ob.type == 'ARMATURE' and node.__blender_data[0] == 'BONE':
                    # only append bones that are animated in current anim
                    actions = __get_blender_actions(ob)
                    for action in actions:
                        if action[1] == anim_name:
                            for fcurve in action[0].fcurves:
                                bone_path = fcurve.data_path.rpartition('.')[0]
                                bone = ob.path_resolve(bone_path)
                                if node.name == bone.name:
                                    if node not in anim_nodes:
                                        anim_nodes.append(node)
                else:
                    anim_nodes.append(node)
                break
        else:
            if node.__blender_data[0] == 'OBJECT':
                node.__temp_nla_track.is_solo = True

    f_start = math.floor(frame_start)
    f_end = math.ceil(frame_end) + 1
    f_step = export_settings['gltf_frame_step']

    # Stores TRS values for each node at each frame
    data = {}
    data['translation'] = [[] for _node in nodes]
    data['rotation'] = [[] for _node in nodes]
    data['scale'] = [[] for _node in nodes]

    for f in range(f_start, f_end, f_step):
        bpy.context.scene.frame_set(f)
        for i, node in enumerate(anim_nodes):
            if node.__blender_data[0] == 'OBJECT':
                t, r, s = __get_gltf_trs_from_object(node.__blender_data[1],
                                                     export_settings)
            elif node.__blender_data[0] == 'BONE':
                arma_ob = node.__blender_data[1]
                pbone = arma_ob.pose.bones[node.__blender_data[2]]
                t, r, s = __get_gltf_trs_from_bone(pbone, export_settings)
            else:
                assert False
            data['translation'][i].append(t)
            data['rotation'][i].append(r)
            data['scale'][i].append(s)

    # Put it all together to get the glTF animation

    channels = []
    samplers = []

    for i, node in enumerate(anim_nodes):
        # Get paths used in the NLA track
        actions = __get_blender_actions(node.__blender_data[1])
        paths = []

        pathTypes = {
            'delta_location': 'translation',
            'delta_rotation_euler': 'rotation',
            'location': 'translation',
            'rotation_axis_angle': 'rotation',
            'rotation_euler': 'rotation',
            'rotation_quaternion': 'rotation',
            'scale': 'scale'
        }

        for action in actions:
            if action[1] == anim_name:
                for fcurve in action[0].fcurves:
                    data_path = fcurve.data_path
                    if node.__blender_data[0] == 'OBJECT':
                        paths.append(pathTypes.get(data_path))
                    else:  # for armatures
                        paths.append(
                            pathTypes.get(data_path.rpartition('.')[2]))

        for path in ['translation', 'rotation', 'scale']:
            if path in paths:
                sampler = gltf2_io.AnimationSampler(
                    input=__get_keyframe_accessor(f_start, f_end, f_step),
                    output=__encode_output_accessor(data[path][i], path),
                    interpolation='LINEAR',
                    extensions=None,
                    extras=None,
                )
                samplers.append(sampler)
                channel = gltf2_io.AnimationChannel(
                    sampler=len(samplers) - 1,
                    target=gltf2_io.AnimationChannelTarget(
                        node=node,
                        path=path,
                        extensions=None,
                        extras=None,
                    ),
                    extensions=None,
                    extras=None,
                )
                channels.append(channel)

    animation = gltf2_io.Animation(
        name=anim_name,
        channels=channels,
        samplers=samplers,
        extensions=None,
        extras=None,
    )

    return animation
def gather_animation_sampler(channels: typing.Tuple[bpy.types.FCurve],
                             obj_uuid: str, bake_bone: typing.Union[str, None],
                             bake_channel: typing.Union[str, None],
                             bake_range_start, bake_range_end,
                             force_range: bool, action_name: str,
                             driver_obj_uuid, node_channel_is_animated: bool,
                             need_rotation_correction,
                             export_settings) -> gltf2_io.AnimationSampler:

    blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object
    is_armature = True if blender_object.type == "ARMATURE" else False
    blender_object_if_armature = blender_object if is_armature is True else None
    if is_armature is True and driver_obj_uuid is None:
        if bake_bone is None:
            pose_bone_if_armature = gltf2_blender_get.get_object_from_datapath(
                blender_object_if_armature, channels[0].data_path)
        else:
            pose_bone_if_armature = blender_object.pose.bones[bake_bone]
    else:
        pose_bone_if_armature = None
    non_keyed_values = __gather_non_keyed_values(channels, blender_object,
                                                 blender_object_if_armature,
                                                 pose_bone_if_armature,
                                                 bake_channel, driver_obj_uuid,
                                                 export_settings)
    if blender_object.parent is not None:
        matrix_parent_inverse = blender_object.matrix_parent_inverse.copy(
        ).freeze()
    else:
        matrix_parent_inverse = mathutils.Matrix.Identity(4).freeze()

    input = __gather_input(channels, obj_uuid, is_armature, non_keyed_values,
                           bake_bone, bake_channel, bake_range_start,
                           bake_range_end, force_range, action_name,
                           driver_obj_uuid, node_channel_is_animated,
                           export_settings)

    if input is None:
        # After check, no need to animate this node for this channel
        return None

    sampler = gltf2_io.AnimationSampler(
        extensions=__gather_extensions(channels, blender_object_if_armature,
                                       export_settings, bake_bone,
                                       bake_channel),
        extras=__gather_extras(channels, blender_object_if_armature,
                               export_settings, bake_bone, bake_channel),
        input=input,
        interpolation=__gather_interpolation(channels,
                                             blender_object_if_armature,
                                             export_settings, bake_bone,
                                             bake_channel),
        output=__gather_output(channels, matrix_parent_inverse, obj_uuid,
                               is_armature, non_keyed_values, bake_bone,
                               bake_channel, bake_range_start, bake_range_end,
                               force_range, action_name, driver_obj_uuid,
                               node_channel_is_animated,
                               need_rotation_correction, export_settings))

    export_user_extensions('gather_animation_sampler_hook', export_settings,
                           sampler, channels, blender_object, bake_bone,
                           bake_channel, bake_range_start, bake_range_end,
                           action_name)

    return sampler
예제 #7
0
def gather_animation_sampler(channels: typing.Tuple[bpy.types.FCurve],
                             blender_object: bpy.types.Object,
                             bake_bone: typing.Union[str, None],
                             bake_channel: typing.Union[str, None],
                             bake_range_start, bake_range_end,
                             action_name: str, driver_obj,
                             export_settings) -> gltf2_io.AnimationSampler:

    if blender_object.animation_data and blender_object.animation_data.nla_tracks and export_settings[
            gltf2_blender_export_keys.FRAME_RANGE]:
        # Attempt to adjust the bake range to match the nla track strip that matches the action_name.
        for track in blender_object.animation_data.nla_tracks:
            if not track.strips:
                continue
            for strip in track.strips:
                if strip.name == action_name:
                    bake_range_start = strip.frame_start
                    bake_range_end = strip.frame_end
                    break

    blender_object_if_armature = blender_object if blender_object.type == "ARMATURE" else None
    if blender_object_if_armature is not None and driver_obj is None:
        if bake_bone is None:
            pose_bone_if_armature = gltf2_blender_get.get_object_from_datapath(
                blender_object_if_armature, channels[0].data_path)
        else:
            pose_bone_if_armature = blender_object.pose.bones[bake_bone]
    else:
        pose_bone_if_armature = None
    non_keyed_values = __gather_non_keyed_values(channels, blender_object,
                                                 blender_object_if_armature,
                                                 pose_bone_if_armature,
                                                 bake_channel, driver_obj,
                                                 export_settings)
    if blender_object.parent is not None:
        matrix_parent_inverse = blender_object.matrix_parent_inverse.copy(
        ).freeze()
    else:
        matrix_parent_inverse = mathutils.Matrix.Identity(4).freeze()

    sampler = gltf2_io.AnimationSampler(
        extensions=__gather_extensions(channels, blender_object_if_armature,
                                       export_settings, bake_bone,
                                       bake_channel),
        extras=__gather_extras(channels, blender_object_if_armature,
                               export_settings, bake_bone, bake_channel),
        input=__gather_input(channels, blender_object_if_armature,
                             non_keyed_values, bake_bone, bake_channel,
                             bake_range_start, bake_range_end, action_name,
                             driver_obj, export_settings),
        interpolation=__gather_interpolation(channels,
                                             blender_object_if_armature,
                                             export_settings, bake_bone,
                                             bake_channel),
        output=__gather_output(channels, matrix_parent_inverse,
                               blender_object_if_armature, non_keyed_values,
                               bake_bone, bake_channel, bake_range_start,
                               bake_range_end, action_name, driver_obj,
                               export_settings))

    export_user_extensions('gather_animation_sampler_hook', export_settings,
                           sampler, channels, blender_object, bake_bone,
                           bake_channel, bake_range_start, bake_range_end,
                           action_name)

    return sampler