コード例 #1
0
ファイル: pis.py プロジェクト: bbigii/BlenderTools
def _fill_bones_sections(bones, export_scale):
    """Creates "Bones" section."""
    section = _SectionData("Bones")
    for bone_i, bone in enumerate(bones):
        bone_mat = (Matrix.Scale(export_scale, 4) * _convert_utils.scs_to_blend_matrix().inverted() * bone.matrix_local).transposed()
        section.data.append(("__bone__", bone.name, bone.parent, bone_mat))
    return section
コード例 #2
0
ファイル: pip.py プロジェクト: P-casper1/BlenderTools
def _fill_node_sections(data_list, offset_matrix):
    """Fills up "Node" sections."""
    sections = []
    for item in data_list:
        section = _SectionData("Node")
        section.props.append(("Index", int(item.scs_props.locator_prefab_con_node_index)))
        loc = _convert_utils.convert_location_to_scs(item.location, offset_matrix)
        section.props.append(("Position", ["&&", loc]))
        direction = _convert_utils.scs_to_blend_matrix().inverted() * (item.matrix_world.to_quaternion() * Vector((0, 1, 0)))
        section.props.append(("Direction", ["&&", direction]))
        # print('p_locator_lanes: %s' % item.scs_props.p_locator_lanes)

        # lane_input_values = []
        # lane_output_values = []
        # for lane_num in range(item.scs_props.p_locator_lanes):
        # if lane_num:
        # lane_input_values.append(lane_num)
        # lane_output_values.append(lane_num)
        # else:
        # lane_input_values.append(-1)
        # lane_output_values.append(-1)
        section.props.append(("InputLanes", ["ii", (-1, -1, -1, -1, -1, -1, -1, -1)]))
        # section.props.append(("InputLanes", lane_input_values))
        section.props.append(("OutputLanes", ["ii", (-1, -1, -1, -1, -1, -1, -1, -1)]))
        # section.props.append(("OutputLanes", lane_output_values))
        section.props.append(("TerrainPointCount", 0))
        section.props.append(("StreamCount", 0))
        section.props.append(("", ""))
        # section.props.append(("__EMPTY_LINE__", 0))
        sections.append(section)
    return sections
コード例 #3
0
ファイル: pis.py プロジェクト: coola69/BlenderTools
def _fill_bones_sections(bones, export_scale):
    """Creates "Bones" section."""
    section = _SectionData("Bones")
    for bone_i, bone in enumerate(bones):
        bone_mat = (Matrix.Scale(export_scale, 4) *
                    _convert_utils.scs_to_blend_matrix().inverted() *
                    bone.matrix_local).transposed()
        section.data.append(("__bone__", bone.name, bone.parent, bone_mat))
    return section
コード例 #4
0
ファイル: pis.py プロジェクト: P-casper1/BlenderTools
def _fill_bones_sections(scs_root_obj, armature_obj, used_bones, export_scale):
    """Creates "Bones" section."""
    section = _SectionData("Bones")
    for bone_name in used_bones:
        bone = armature_obj.data.bones[bone_name]

        # armature matrix stores transformation of armature object against scs root
        # and has to be added to all bones as they only armature space transformations
        armature_mat = scs_root_obj.matrix_world.inverted() * armature_obj.matrix_world

        bone_mat = (Matrix.Scale(export_scale, 4) * _convert_utils.scs_to_blend_matrix().inverted() *
                    armature_mat * bone.matrix_local)
        section.data.append(("__bone__", bone.name, bone.parent, bone_mat.transposed()))
    return section
コード例 #5
0
def _fill_piece_sections(convex_coll_locators, export_scale):
    """Fills up "Piece" sections for convex colliders."""

    len_vertices = 0
    len_faces = 0
    piece_sections = []
    index = 0
    for item in convex_coll_locators:
        stream_cnt = 0
        verts = item.scs_props.get("coll_convex_verts", 0)
        faces = item.scs_props.get("coll_convex_faces", 0)
        if verts:
            stream_cnt += 1

        len_vertices += len(verts)
        len_faces += len(faces)

        section = _SectionData("Piece")
        section.props.append(("Index", index))
        section.props.append(("Material", 0))
        section.props.append(("VertexCount", len(verts)))
        section.props.append(("TriangleCount", len(faces)))
        section.props.append(("StreamCount", stream_cnt))
        section.props.append(("", ""))

        # VERTICES
        if verts:
            vector_verts = []
            for vert in verts:
                # scs_position = Matrix.Scale(scs_globals.export_scale, 4) * io_utils.scs_to_blend_matrix().inverted() * mat_world * position ##
                # POSITION
                scs_position = Matrix.Scale(
                    export_scale, 4) * _convert_utils.scs_to_blend_matrix(
                    ).inverted() * Vector(vert)  # POSITION
                vector_verts.append(Vector(scs_position))
            section.sections.append(
                _pix_container.make_stream_section(vector_verts, "_POSITION",
                                                   ()))

        # FACES (TRIANGLES)
        if faces:
            # FACE FLIPPING
            flipped_faces = _mesh_utils.flip_faceverts(faces)
            section.sections.append(
                _pix_container.make_triangle_stream(flipped_faces))

        index += 1
        piece_sections.append(section)
    return len_vertices, len_faces, piece_sections
コード例 #6
0
ファイル: pia.py プロジェクト: bbigii/BlenderTools
def _get_delta_matrix(bone_rest_matrix, bone_rest_matrix_scs, parent_bone_rest_matrix_scs, bone_animation_matrix_scs, import_scale):
    """."""
    rest_location, rest_rotation, rest_scale = bone_rest_matrix_scs.decompose()
    # print(' BONES rest_scale: %s' % str(rest_scale))
    rest_scale = rest_scale * import_scale
    scale_removal_matrix = Matrix()
    scale_removal_matrix[0] = (1.0 / rest_scale[0], 0, 0, 0)
    scale_removal_matrix[1] = (0, 1.0 / rest_scale[1], 0, 0)
    scale_removal_matrix[2] = (0, 0, 1.0 / rest_scale[2], 0)
    scale_removal_matrix[3] = (0, 0, 0, 1)
    scale_matrix = Matrix.Scale(import_scale, 4)
    return (bone_rest_matrix.inverted() *
            scale_matrix *
            _convert_utils.scs_to_blend_matrix() *
            parent_bone_rest_matrix_scs *
            bone_animation_matrix_scs *
            scale_removal_matrix)
コード例 #7
0
def _fill_bones_sections(scs_root_obj, armature_obj, used_bones, export_scale):
    """Creates "Bones" section."""
    section = _SectionData("Bones")
    for bone_name in used_bones:
        bone = armature_obj.data.bones[bone_name]

        # armature matrix stores transformation of armature object against scs root
        # and has to be added to all bones as they only armature space transformations
        armature_mat = scs_root_obj.matrix_world.inverted(
        ) @ armature_obj.matrix_world

        bone_mat = (Matrix.Scale(
            export_scale, 4) @ _convert_utils.scs_to_blend_matrix().inverted()
                    @ armature_mat @ bone.matrix_local)
        section.data.append(
            ("__bone__", bone.name, bone.parent, bone_mat.transposed()))
    return section
コード例 #8
0
ファイル: pia.py プロジェクト: coola69/BlenderTools
def _get_delta_matrix(bone_rest_matrix, bone_rest_matrix_scs,
                      parent_bone_rest_matrix_scs, bone_animation_matrix_scs,
                      import_scale):
    """."""
    rest_location, rest_rotation, rest_scale = bone_rest_matrix_scs.decompose()
    # print(' BONES rest_scale: %s' % str(rest_scale))
    rest_scale = rest_scale * import_scale
    scale_removal_matrix = Matrix()
    scale_removal_matrix[0] = (1.0 / rest_scale[0], 0, 0, 0)
    scale_removal_matrix[1] = (0, 1.0 / rest_scale[1], 0, 0)
    scale_removal_matrix[2] = (0, 0, 1.0 / rest_scale[2], 0)
    scale_removal_matrix[3] = (0, 0, 0, 1)
    scale_matrix = Matrix.Scale(import_scale, 4)
    return (bone_rest_matrix.inverted() * scale_matrix *
            _convert_utils.scs_to_blend_matrix() *
            parent_bone_rest_matrix_scs * bone_animation_matrix_scs *
            scale_removal_matrix)
コード例 #9
0
ファイル: pic.py プロジェクト: P-casper1/BlenderTools
def _fill_piece_sections(convex_coll_locators, export_scale):
    """Fills up "Piece" sections for convex colliders."""

    len_vertices = 0
    len_faces = 0
    piece_sections = []
    index = 0
    for item in convex_coll_locators:
        stream_cnt = 0
        verts = item.scs_props.get("coll_convex_verts", 0)
        faces = item.scs_props.get("coll_convex_faces", 0)
        if verts:
            stream_cnt += 1

        len_vertices += len(verts)
        len_faces += len(faces)

        section = _SectionData("Piece")
        section.props.append(("Index", index))
        section.props.append(("Material", 0))
        section.props.append(("VertexCount", len(verts)))
        section.props.append(("TriangleCount", len(faces)))
        section.props.append(("StreamCount", stream_cnt))
        section.props.append(("", ""))

        # VERTICES
        if verts:
            vector_verts = []
            for vert in verts:
                # scs_position = Matrix.Scale(scs_globals.export_scale, 4) * io_utils.scs_to_blend_matrix().inverted() * mat_world * position ##
                # POSITION
                scs_position = Matrix.Scale(export_scale, 4) * _convert_utils.scs_to_blend_matrix().inverted() * Vector(vert)  # POSITION
                vector_verts.append(Vector(scs_position))
            section.sections.append(_pix_container.make_stream_section(vector_verts, "_POSITION", ()))

        # FACES (TRIANGLES)
        if faces:
            # FACE FLIPPING
            flipped_faces = _mesh_utils.flip_faceverts(faces)
            section.sections.append(_pix_container.make_triangle_stream(flipped_faces))

        index += 1
        piece_sections.append(section)
    return len_vertices, len_faces, piece_sections
コード例 #10
0
ファイル: pip.py プロジェクト: coola69/BlenderTools
def _fill_node_sections(data_list, offset_matrix):
    """Fills up "Node" sections."""
    sections = []
    for item in data_list:
        section = _SectionData("Node")
        section.props.append(
            ("Index", int(item.scs_props.locator_prefab_con_node_index)))
        loc = _convert_utils.convert_location_to_scs(item.location,
                                                     offset_matrix)
        section.props.append(("Position", ["&&", loc]))
        direction = _convert_utils.scs_to_blend_matrix().inverted() * (
            item.matrix_world.to_quaternion() * Vector((0, 1, 0)))
        section.props.append(("Direction", ["&&", direction]))
        # print('p_locator_lanes: %s' % item.scs_props.p_locator_lanes)

        # lane_input_values = []
        # lane_output_values = []
        # for lane_num in range(item.scs_props.p_locator_lanes):
        # if lane_num:
        # lane_input_values.append(lane_num)
        # lane_output_values.append(lane_num)
        # else:
        # lane_input_values.append(-1)
        # lane_output_values.append(-1)
        section.props.append(
            ("InputLanes", ["ii", (-1, -1, -1, -1, -1, -1, -1, -1)]))
        # section.props.append(("InputLanes", lane_input_values))
        section.props.append(
            ("OutputLanes", ["ii", (-1, -1, -1, -1, -1, -1, -1, -1)]))
        # section.props.append(("OutputLanes", lane_output_values))
        section.props.append(("TerrainPointCount", 0))
        section.props.append(("StreamCount", 0))
        section.props.append(("", ""))
        # section.props.append(("__EMPTY_LINE__", 0))
        sections.append(section)
    return sections
コード例 #11
0
ファイル: pia.py プロジェクト: bbigii/BlenderTools
def _get_bone_channels(bone_list, action, export_scale):
    """Takes a bone list and action and returns bone channels.
    bone_channels structure example:
    [("Bone", [("_TIME", [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]), ("_MATRIX", [])])]"""
    bone_channels = []
    frame_start = action.frame_range[0]
    frame_end = action.frame_range[1]
    total_time = action.scs_props.action_length
    anim_export_step = action.scs_props.anim_export_step
    total_frames = (frame_end - frame_start) / anim_export_step
    # print(' -- action: %r' % str(action))
    for bone in bone_list:
        # print(' oo bone: %r' % str(bone))
        if bone:
            # print(' -- bone_name: %r' % bone.name)
            bone_name = bone.name
            bone_rest_mat = bone.matrix_local
            if bone.parent:
                parent_bone_rest_mat = Matrix.Scale(export_scale, 4) * _convert_utils.scs_to_blend_matrix().inverted() * bone.parent.matrix_local
            else:
                parent_bone_rest_mat = Matrix()
            for group in action.groups:
                if group.name == bone_name:
                    # print(' -- group: %r' % str(group))

                    # GET CHANELS' CURVES
                    loc_curves = {}
                    euler_rot_curves = {}
                    quat_rot_curves = {}
                    sca_curves = {}
                    rot_mode = ''
                    for channel in group.channels:
                        data_path = channel.data_path
                        array_index = channel.array_index
                        # channel_start = channel.range()[0]
                        # channel_end = channel.range()[1]
                        # print('      channel: %r (%s) [%s - %s]' % (data_path, array_index, channel_start, channel_end))
                        if data_path.endswith("location"):
                            loc_curves[array_index] = channel
                        elif data_path.endswith("rotation_euler"):
                            euler_rot_curves[array_index] = channel
                            rot_mode = 'euler'
                        elif data_path.endswith("rotation_quaternion"):
                            quat_rot_curves[array_index] = channel
                            rot_mode = 'quat'
                        elif data_path.endswith("scale"):
                            sca_curves[array_index] = channel

                    # GO THOUGH FRAMES
                    actual_frame = frame_start
                    timings_stream = []
                    matrices_stream = []
                    while actual_frame <= frame_end:
                        mat_loc = Matrix()
                        mat_rot = Matrix()
                        mat_sca = Matrix()

                        # LOCATION MATRIX
                        if len(loc_curves) > 0:
                            location = Vector()
                            for index in range(3):
                                if index in loc_curves:
                                    location[index] = loc_curves[index].evaluate(actual_frame)
                            mat_loc = Matrix.Translation(location)

                        # ROTATION MATRIX
                        if rot_mode == 'euler' and len(euler_rot_curves) > 0:
                            rotation = Euler()
                            for index in range(3):
                                if index in euler_rot_curves:
                                    rotation[index] = euler_rot_curves[index].evaluate(actual_frame)
                            mat_rot = Euler(rotation, 'XYZ').to_matrix().to_4x4()  # TODO: Solve the other rotation modes.
                        if rot_mode == 'quat' and len(quat_rot_curves) > 0:
                            rotation = Quaternion()
                            for index in range(4):
                                if index in quat_rot_curves:
                                    rotation[index] = quat_rot_curves[index].evaluate(actual_frame)
                            mat_rot = rotation.to_matrix().to_4x4()

                        # SCALE MATRIX
                        if len(sca_curves) > 0:
                            scale = Vector((1.0, 1.0, 1.0))
                            for index in range(3):
                                if index in sca_curves:
                                    scale[index] = sca_curves[index].evaluate(actual_frame)
                            mat_sca = Matrix()
                            mat_sca[0] = (scale[0], 0, 0, 0)
                            mat_sca[1] = (0, scale[2], 0, 0)
                            mat_sca[2] = (0, 0, scale[1], 0)
                            mat_sca[3] = (0, 0, 0, 1)

                        # BLENDER FRAME MATRIX
                        mat = mat_loc * mat_rot * mat_sca

                        # SCALE REMOVAL MATRIX
                        rest_location, rest_rotation, rest_scale = bone_rest_mat.decompose()
                        # print(' BONES rest_scale: %s' % str(rest_scale))
                        rest_scale = rest_scale * export_scale
                        scale_removal_matrix = Matrix()
                        scale_removal_matrix[0] = (1.0 / rest_scale[0], 0, 0, 0)
                        scale_removal_matrix[1] = (0, 1.0 / rest_scale[1], 0, 0)
                        scale_removal_matrix[2] = (0, 0, 1.0 / rest_scale[2], 0)
                        scale_removal_matrix[3] = (0, 0, 0, 1)

                        # SCALE MATRIX
                        scale_matrix = Matrix.Scale(export_scale, 4)

                        # COMPUTE SCS FRAME MATRIX
                        frame_matrix = (parent_bone_rest_mat.inverted() * _convert_utils.scs_to_blend_matrix().inverted() *
                                        scale_matrix.inverted() * bone_rest_mat * mat * scale_removal_matrix.inverted())

                        # print('          actual_frame: %s - value: %s' % (actual_frame, frame_matrix))
                        timings_stream.append(("__time__", total_time / total_frames), )
                        matrices_stream.append(("__matrix__", frame_matrix.transposed()), )
                        actual_frame += anim_export_step
                    anim_timing = ("_TIME", timings_stream)
                    anim_matrices = ("_MATRIX", matrices_stream)
                    bone_anim = (anim_timing, anim_matrices)
                    bone_data = (bone_name, bone_anim)
                    bone_channels.append(bone_data)
        else:
            lprint('W bone %r is not part of the actual Armature!', bone.name)
            # print(' -- bone.name: %r' % (bone.name))
    return bone_channels
コード例 #12
0
def _get_bone_channels(bone_list, action, export_scale):
    """Takes a bone list and action and returns bone channels.
    bone_channels structure example:
    [("Bone", [("_TIME", [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]), ("_MATRIX", [])])]"""
    bone_channels = []
    frame_start = action.frame_range[0]
    frame_end = action.frame_range[1]
    total_time = action.scs_props.action_length
    anim_export_step = action.scs_props.anim_export_step
    total_frames = (frame_end - frame_start) / anim_export_step
    # print(' -- action: %r' % str(action))
    for bone in bone_list:
        # print(' oo bone: %r' % str(bone))
        if bone:
            # print(' -- bone_name: %r' % bone.name)
            bone_name = bone.name
            bone_rest_mat = bone.matrix_local
            if bone.parent:
                parent_bone_rest_mat = Matrix.Scale(
                    export_scale, 4) * _convert_utils.scs_to_blend_matrix(
                    ).inverted() * bone.parent.matrix_local
            else:
                parent_bone_rest_mat = Matrix()
            for group in action.groups:
                if group.name == bone_name:
                    # print(' -- group: %r' % str(group))

                    # GET CHANELS' CURVES
                    loc_curves = {}
                    euler_rot_curves = {}
                    quat_rot_curves = {}
                    sca_curves = {}
                    rot_mode = ''
                    for channel in group.channels:
                        data_path = channel.data_path
                        array_index = channel.array_index
                        # channel_start = channel.range()[0]
                        # channel_end = channel.range()[1]
                        # print('      channel: %r (%s) [%s - %s]' % (data_path, array_index, channel_start, channel_end))
                        if data_path.endswith("location"):
                            loc_curves[array_index] = channel
                        elif data_path.endswith("rotation_euler"):
                            euler_rot_curves[array_index] = channel
                            rot_mode = 'euler'
                        elif data_path.endswith("rotation_quaternion"):
                            quat_rot_curves[array_index] = channel
                            rot_mode = 'quat'
                        elif data_path.endswith("scale"):
                            sca_curves[array_index] = channel

                    # GO THOUGH FRAMES
                    actual_frame = frame_start
                    timings_stream = []
                    matrices_stream = []
                    while actual_frame <= frame_end:
                        mat_loc = Matrix()
                        mat_rot = Matrix()
                        mat_sca = Matrix()

                        # LOCATION MATRIX
                        if len(loc_curves) > 0:
                            location = Vector()
                            for index in range(3):
                                if index in loc_curves:
                                    location[index] = loc_curves[
                                        index].evaluate(actual_frame)
                            mat_loc = Matrix.Translation(location)

                        # ROTATION MATRIX
                        if rot_mode == 'euler' and len(euler_rot_curves) > 0:
                            rotation = Euler()
                            for index in range(3):
                                if index in euler_rot_curves:
                                    rotation[index] = euler_rot_curves[
                                        index].evaluate(actual_frame)
                            mat_rot = Euler(rotation, 'XYZ').to_matrix(
                            ).to_4x4()  # TODO: Solve the other rotation modes.
                        if rot_mode == 'quat' and len(quat_rot_curves) > 0:
                            rotation = Quaternion()
                            for index in range(4):
                                if index in quat_rot_curves:
                                    rotation[index] = quat_rot_curves[
                                        index].evaluate(actual_frame)
                            mat_rot = rotation.to_matrix().to_4x4()

                        # SCALE MATRIX
                        if len(sca_curves) > 0:
                            scale = Vector((1.0, 1.0, 1.0))
                            for index in range(3):
                                if index in sca_curves:
                                    scale[index] = sca_curves[index].evaluate(
                                        actual_frame)
                            mat_sca = Matrix()
                            mat_sca[0] = (scale[0], 0, 0, 0)
                            mat_sca[1] = (0, scale[2], 0, 0)
                            mat_sca[2] = (0, 0, scale[1], 0)
                            mat_sca[3] = (0, 0, 0, 1)

                        # BLENDER FRAME MATRIX
                        mat = mat_loc * mat_rot * mat_sca

                        # SCALE REMOVAL MATRIX
                        rest_location, rest_rotation, rest_scale = bone_rest_mat.decompose(
                        )
                        # print(' BONES rest_scale: %s' % str(rest_scale))
                        rest_scale = rest_scale * export_scale
                        scale_removal_matrix = Matrix()
                        scale_removal_matrix[0] = (1.0 / rest_scale[0], 0, 0,
                                                   0)
                        scale_removal_matrix[1] = (0, 1.0 / rest_scale[1], 0,
                                                   0)
                        scale_removal_matrix[2] = (0, 0, 1.0 / rest_scale[2],
                                                   0)
                        scale_removal_matrix[3] = (0, 0, 0, 1)

                        # SCALE MATRIX
                        scale_matrix = Matrix.Scale(export_scale, 4)

                        # COMPUTE SCS FRAME MATRIX
                        frame_matrix = (
                            parent_bone_rest_mat.inverted() *
                            _convert_utils.scs_to_blend_matrix().inverted() *
                            scale_matrix.inverted() * bone_rest_mat * mat *
                            scale_removal_matrix.inverted())

                        # print('          actual_frame: %s - value: %s' % (actual_frame, frame_matrix))
                        timings_stream.append(
                            ("__time__", total_time / total_frames), )
                        matrices_stream.append(
                            ("__matrix__", frame_matrix.transposed()), )
                        actual_frame += anim_export_step
                    anim_timing = ("_TIME", timings_stream)
                    anim_matrices = ("_MATRIX", matrices_stream)
                    bone_anim = (anim_timing, anim_matrices)
                    bone_data = (bone_name, bone_anim)
                    bone_channels.append(bone_data)
        else:
            lprint('W bone %r is not part of the actual Armature!', bone.name)
            # print(' -- bone.name: %r' % (bone.name))
    return bone_channels
コード例 #13
0
ファイル: pis.py プロジェクト: P-casper1/BlenderTools
def load(filepath, armature, get_only=False):
    scs_globals = _get_scs_globals()
    import_scale = scs_globals.import_scale
    bone_import_scale = scs_globals.bone_import_scale
    connected_bones = scs_globals.connected_bones

    print("\n************************************")
    print("**      SCS PIS Importer          **")
    print("**      (c)2014 SCS Software      **")
    print("************************************\n")

    # scene = context.scene
    ind = '    '
    pis_container = _pix_container.get_data_from_file(filepath, ind)

    # TEST PRINTOUTS
    # ind = '  '
    # for section in pis_container:
    # print('SEC.: "%s"' % section.type)
    # for prop in section.props:
    # print('%sProp: %s' % (ind, prop))
    # for data in section.data:
    # print('%sdata: %s' % (ind, data))
    # for sec in section.sections:
    # print_section(sec, ind)
    # print('\nTEST - Source: "%s"' % pis_container[0].props[1][1])
    # print('')

    # TEST EXPORT
    # path, file = os.path.splitext(filepath)
    # export_filepath = str(path + '_reex' + file)
    # result = pix_write.write_data(pis_container, export_filepath, ind)
    # if result == {'FINISHED'}:
    # Print(dump_level, '\nI Test export succesful! The new file:\n  "%s"', export_filepath)
    # else:
    # Print(dump_level, '\nE Test export failed! File:\n  "%s"', export_filepath)

    # LOAD HEADER
    '''
    NOTE: skipped for now as no data needs to be readed
    format_version, source, f_type, f_name, source_filename, author = _get_header(pis_container)
    '''

    # LOAD GLOBALS
    '''
    NOTE: skipped for now as no data needs to be readed
    # bone_count = _get_global(pis_container)
    '''

    # LOAD BONES
    bones = _get_bones(pis_container)

    if get_only:  # only return bones (used when importing PIA from panel)
        return bones

    # PROVIDE AN ARMATURE
    if not armature:
        lprint('\nE No Armature for file "%s"!', (os.path.basename(filepath),))
        return {'CANCELLED'}, None

    bpy.context.scene.objects.active = armature
    bpy.ops.object.mode_set(mode='EDIT')

    # CONNECTED BONES - Add information about all children...
    if connected_bones:
        for bone in bones:
            # print('  bone: %r - %r\n%s\n' % (bone, bones[bone][0], str(bones[bone][1])))
            children = []
            for item in bones:
                if bone == bones[item][0]:
                    children.append(item)
            bones[bone].append(children)
            # print('  bone: %r - %r\n%s\n' % (bone, bones[bone][0], str(bones[bone][2])))

    for bone_i, bone in enumerate(armature.data.bones):
        # print('----- bone: %r ------------------------------' % bone.name)

        # SET PARENT
        if bones[bone.name][0] != "":  # if bone has parent...
            # print('  %r --> %r' % (bone.name, bones[bone.name][0]))
            # armature.data.edit_bones[bone.name].use_connect = False
            armature.data.edit_bones[bone.name].parent = armature.data.edit_bones[bones[bone.name][0]]
            # else:
            # print('  %r - NO parent' % bone.name)

        # COMPUTE BONE TRANSFORMATION
        matrix = bones[bone.name][1]
        bone_matrix = _convert_utils.scs_to_blend_matrix() * matrix.transposed()
        axis, angle = _convert_utils.mat3_to_vec_roll(bone_matrix)
        # print(' * %r - angle: %s' % (bone.name, angle))

        # SET BONE TRANSFORMATION
        armature.data.edit_bones[bone.name].head = bone_matrix.to_translation().to_3d() * import_scale
        armature.data.edit_bones[bone.name].tail = (armature.data.edit_bones[bone.name].head +
                                                    Vector(axis).normalized() *
                                                    bone_import_scale *
                                                    import_scale)
        armature.data.edit_bones[bone.name].roll = angle

        # CONNECTED BONES
        # NOTE: Doesn't work as expected! Disabled for now in UI.
        # Child bones gets position offset and there is also a problem when translation
        # is animated, for which connected bones doesn't allow.
        if connected_bones:
            if len(bones[bone.name][2]) == 1:
                matrix = bones[bones[bone.name][2][0]][1]
                bone_matrix = _convert_utils.scs_to_blend_matrix() * matrix.transposed()
                armature.data.edit_bones[bone.name].tail = bone_matrix.to_translation().to_3d() * import_scale
                armature.data.edit_bones[bones[bone.name][2][0]].use_connect = True

    bpy.ops.object.mode_set(mode='OBJECT')
    armature.data.show_axes = True
    armature.draw_type = 'WIRE'

    # WARNING PRINTOUTS
    # if piece_count < 0: Print(dump_level, '\nW More Pieces found than were declared!')
    # if piece_count > 0: Print(dump_level, '\nW Some Pieces not found, but were declared!')
    # if dump_level > 1: print('')

    print("************************************")
    return bones
コード例 #14
0
ファイル: pip.py プロジェクト: P-casper1/BlenderTools
def _fill_nav_curve_sections(nav_point_list, offset_matrix):
    """Fills up (navigation) "Curve" sections."""

    _INDEX = "index"
    _START = "start"
    _END = "end"
    _PREV_CURVES = "prev_curves"
    _NEXT_CURVES = "next_curves"

    curves_dict = _connections_group_wrapper.get_curves(nav_point_list, _INDEX, _START, _END, _NEXT_CURVES, _PREV_CURVES)

    # prepare empty sections for curves so it can be later placed directly on right index
    sections = [_SectionData("Dummy")] * len(curves_dict)
    for connection_key in curves_dict.keys():

        curve = curves_dict[connection_key]

        start_loc = bpy.data.objects[curve[_START]]
        end_loc = bpy.data.objects[curve[_END]]

        section = _SectionData("Curve")
        section.props.append(("Index", curve[_INDEX]))
        section.props.append(("Name", _name_utils.tokenize_name(curve[_START])))
        section.props.append(("", ""))
        section.props.append(("#", "Flags:"))
        section.props.append(("Flags", _get_np_flags(start_loc, end_loc)))
        section.props.append(("", ""))
        section.props.append(("LeadsToNodes", 0))  # TODO SIMON: make it happen when you know what it means

        speed_limit = _get_np_speed_limit(start_loc)
        if speed_limit:
            section.props.append(("", ""))
            section.props.append(("SpeedLimit", ["&", ]))

        traffic_light = _get_np_traffic_light_id(start_loc)
        if traffic_light != -1:
            section.props.append(("", ""))
            section.props.append(("TrafficLightID", ))

        section.props.append(("", ""))
        section.props.append(("NextCurves", ["ii", _get_np_prev_next_curves(curves_dict, curve[_NEXT_CURVES], _INDEX)]))
        section.props.append(("PrevCurves", ["ii", _get_np_prev_next_curves(curves_dict, curve[_PREV_CURVES], _INDEX)]))
        section.props.append(("", ""))
        section.props.append(("Length", ["&", (_get_np_length(start_loc, end_loc), )]))
        section.props.append(("", ""))
        bezier_section = _SectionData("Bezier")

        # START NODE
        start_section = _SectionData("Start")
        loc = _convert_utils.convert_location_to_scs(start_loc.location, offset_matrix)
        start_section.props.append(("Position", ["&&", loc]))
        direction_vector = _convert_utils.scs_to_blend_matrix().inverted() * (start_loc.matrix_world.to_quaternion() * Vector((0, 1, 0)))
        start_section.props.append(("Direction", ["&&", (direction_vector[0], direction_vector[1], direction_vector[2])]))

        # END NODE
        end_section = _SectionData("End")
        loc = _convert_utils.convert_location_to_scs(end_loc.location, offset_matrix)
        end_section.props.append(("Position", ["&&", loc]))
        direction_vector = _convert_utils.scs_to_blend_matrix().inverted() * (end_loc.matrix_world.to_quaternion() * Vector((0, 1, 0)))
        end_section.props.append(("Direction", ["&&", (direction_vector[0], direction_vector[1], direction_vector[2])]))

        bezier_section.sections.append(start_section)
        bezier_section.sections.append(end_section)
        section.sections.append(bezier_section)

        # make sure that current section is placed on right place
        sections[curve[_INDEX]] = section

    return sections
コード例 #15
0
ファイル: pia.py プロジェクト: P-casper1/BlenderTools
def _get_bone_channels(scs_root_obj, armature, scs_animation, action, export_scale):
    """Takes armature and action and returns bone channels.
    bone_channels structure example:
    [("Bone", [("_TIME", [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]), ("_MATRIX", [])])]"""
    bone_channels = []
    frame_start = scs_animation.anim_start
    frame_end = scs_animation.anim_end
    anim_export_step = action.scs_props.anim_export_step
    total_frames = (frame_end - frame_start) / anim_export_step

    # armature matrix stores transformation of armature object against scs root
    # and has to be added to all bones as they only armature space transformations
    armature_mat = scs_root_obj.matrix_world.inverted() * armature.matrix_world

    invalid_data = False  # flag to indicate invalid data state
    curves_per_bone = {}  # store all the curves we are interested in per bone names

    for bone in armature.data.bones:
        for fcurve in action.fcurves:

            # check if curve belongs to bone
            if '["' + bone.name + '"]' in fcurve.data_path:

                data_path = fcurve.data_path
                array_index = fcurve.array_index

                if data_path.endswith("location"):
                    curve_type = "location"
                elif data_path.endswith("rotation_euler"):
                    curve_type = "euler_rotation"
                elif data_path.endswith("rotation_quaternion"):
                    curve_type = "quat_rotation"
                elif data_path.endswith("scale"):
                    curve_type = "scale"
                else:
                    curve_type = None

                # write only recognized curves
                if curve_type is not None:
                    if bone.name not in curves_per_bone:
                        curves_per_bone[bone.name] = {
                            "location": {},
                            "euler_rotation": {},
                            "quat_rotation": {},
                            "scale": {}
                        }

                    curves_per_bone[bone.name][curve_type][array_index] = fcurve

    for bone_name, bone_curves in curves_per_bone.items():

        bone = armature.data.bones[bone_name]
        pose_bone = armature.pose.bones[bone_name]
        loc_curves = bone_curves["location"]
        euler_rot_curves = bone_curves["euler_rotation"]
        quat_rot_curves = bone_curves["quat_rotation"]
        sca_curves = bone_curves["scale"]

        bone_rest_mat = armature_mat * bone.matrix_local
        if bone.parent:
            parent_bone_rest_mat = (Matrix.Scale(export_scale, 4) * _convert_utils.scs_to_blend_matrix().inverted() *
                                    armature_mat * bone.parent.matrix_local)
        else:
            parent_bone_rest_mat = Matrix()

        # GO THOUGH FRAMES
        actual_frame = frame_start
        timings_stream = []
        matrices_stream = []
        while actual_frame <= frame_end:
            mat_loc = Matrix()
            mat_rot = Matrix()
            mat_sca = Matrix()

            # LOCATION MATRIX
            if len(loc_curves) > 0:
                location = Vector()
                for index in range(3):
                    if index in loc_curves:
                        location[index] = loc_curves[index].evaluate(actual_frame)
                mat_loc = Matrix.Translation(location)

            # ROTATION MATRIX
            if len(euler_rot_curves) > 0:
                rotation = Euler()
                for index in range(3):
                    if index in euler_rot_curves:
                        rotation[index] = euler_rot_curves[index].evaluate(actual_frame)
                mat_rot = Euler(rotation, pose_bone.rotation_mode).to_matrix().to_4x4()  # calc rotation by pose rotation mode

            elif len(quat_rot_curves) > 0:
                rotation = Quaternion()
                for index in range(4):
                    if index in quat_rot_curves:
                        rotation[index] = quat_rot_curves[index].evaluate(actual_frame)
                mat_rot = rotation.to_matrix().to_4x4()

            # SCALE MATRIX
            if len(sca_curves) > 0:
                scale = Vector((1.0, 1.0, 1.0))
                for index in range(3):
                    if index in sca_curves:
                        scale[index] = sca_curves[index].evaluate(actual_frame)

                        if scale[index] < 0:
                            lprint(str("E Negative scale detected on bone %r:\n\t   "
                                       "(Action: %r, keyframe no.: %s, SCS Animation: %r)."),
                                   (bone_name, action.name, actual_frame, scs_animation.name))
                            invalid_data = True

                mat_sca = Matrix()
                mat_sca[0] = (scale[0], 0, 0, 0)
                mat_sca[1] = (0, scale[2], 0, 0)
                mat_sca[2] = (0, 0, scale[1], 0)
                mat_sca[3] = (0, 0, 0, 1)

            # BLENDER FRAME MATRIX
            mat = mat_loc * mat_rot * mat_sca

            # SCALE REMOVAL MATRIX
            rest_location, rest_rotation, rest_scale = bone_rest_mat.decompose()
            # print(' BONES rest_scale: %s' % str(rest_scale))
            rest_scale = rest_scale * export_scale
            scale_removal_matrix = Matrix()
            scale_removal_matrix[0] = (1.0 / rest_scale[0], 0, 0, 0)
            scale_removal_matrix[1] = (0, 1.0 / rest_scale[1], 0, 0)
            scale_removal_matrix[2] = (0, 0, 1.0 / rest_scale[2], 0)
            scale_removal_matrix[3] = (0, 0, 0, 1)

            # SCALE MATRIX
            scale_matrix = Matrix.Scale(export_scale, 4)

            # COMPUTE SCS FRAME MATRIX
            frame_matrix = (parent_bone_rest_mat.inverted() * _convert_utils.scs_to_blend_matrix().inverted() *
                            scale_matrix.inverted() * bone_rest_mat * mat * scale_removal_matrix.inverted())

            # print('          actual_frame: %s - value: %s' % (actual_frame, frame_matrix))
            timings_stream.append(("__time__", scs_animation.length / total_frames), )
            matrices_stream.append(("__matrix__", frame_matrix.transposed()), )
            actual_frame += anim_export_step

        anim_timing = ("_TIME", timings_stream)
        anim_matrices = ("_MATRIX", matrices_stream)
        bone_anim = (anim_timing, anim_matrices)
        bone_data = (bone_name, bone_anim)
        bone_channels.append(bone_data)

    # return empty bone channels if data are invalid
    if invalid_data:
        return []

    return bone_channels
コード例 #16
0
ファイル: pip.py プロジェクト: coola69/BlenderTools
def _fill_nav_curve_sections(nav_point_list, offset_matrix):
    """Fills up (navigation) "Curve" sections."""

    _INDEX = "index"
    _START = "start"
    _END = "end"
    _PREV_CURVES = "prev_curves"
    _NEXT_CURVES = "next_curves"

    curves_dict = _connections_group_wrapper.get_curves(
        nav_point_list, _INDEX, _START, _END, _NEXT_CURVES, _PREV_CURVES)

    # prepare empty sections for curves so it can be later placed directly on right index
    sections = [_SectionData("Dummy")] * len(curves_dict)
    for connection_key in curves_dict.keys():

        curve = curves_dict[connection_key]

        start_loc = bpy.data.objects[curve[_START]]
        end_loc = bpy.data.objects[curve[_END]]

        section = _SectionData("Curve")
        section.props.append(("Index", curve[_INDEX]))
        section.props.append(
            ("Name", _name_utils.tokenize_name(curve[_START])))
        section.props.append(("", ""))
        section.props.append(("#", "Flags:"))
        section.props.append(("Flags", _get_np_flags(start_loc, end_loc)))
        section.props.append(("", ""))
        section.props.append(
            ("LeadsToNodes",
             0))  # TODO SIMON: make it happen when you know what it means

        speed_limit = _get_np_speed_limit(start_loc)
        if speed_limit:
            section.props.append(("", ""))
            section.props.append(("SpeedLimit", [
                "&",
            ]))

        traffic_light = _get_np_traffic_light_id(start_loc)
        if traffic_light != -1:
            section.props.append(("", ""))
            section.props.append(("TrafficLightID", ))

        section.props.append(("", ""))
        section.props.append(("NextCurves", [
            "ii",
            _get_np_prev_next_curves(curves_dict, curve[_NEXT_CURVES], _INDEX)
        ]))
        section.props.append(("PrevCurves", [
            "ii",
            _get_np_prev_next_curves(curves_dict, curve[_PREV_CURVES], _INDEX)
        ]))
        section.props.append(("", ""))
        section.props.append(
            ("Length", ["&", (_get_np_length(start_loc, end_loc), )]))
        section.props.append(("", ""))
        bezier_section = _SectionData("Bezier")

        # START NODE
        start_section = _SectionData("Start")
        loc = _convert_utils.convert_location_to_scs(start_loc.location,
                                                     offset_matrix)
        start_section.props.append(("Position", ["&&", loc]))
        direction_vector = _convert_utils.scs_to_blend_matrix().inverted() * (
            start_loc.matrix_world.to_quaternion() * Vector((0, 1, 0)))
        start_section.props.append(("Direction", [
            "&&",
            (direction_vector[0], direction_vector[1], direction_vector[2])
        ]))

        # END NODE
        end_section = _SectionData("End")
        loc = _convert_utils.convert_location_to_scs(end_loc.location,
                                                     offset_matrix)
        end_section.props.append(("Position", ["&&", loc]))
        direction_vector = _convert_utils.scs_to_blend_matrix().inverted() * (
            end_loc.matrix_world.to_quaternion() * Vector((0, 1, 0)))
        end_section.props.append(("Direction", [
            "&&",
            (direction_vector[0], direction_vector[1], direction_vector[2])
        ]))

        bezier_section.sections.append(start_section)
        bezier_section.sections.append(end_section)
        section.sections.append(bezier_section)

        # make sure that current section is placed on right place
        sections[curve[_INDEX]] = section

    return sections
コード例 #17
0
def _get_geometry_dict(root_object, obj, mesh, offset_matrix, material_dict,
                       used_materials, bone_list, scs_globals):
    """
    :param root_object: SCS Root Object
    :type root_object: bpy.types.Object
    :param obj: Actual Object data
    :type obj: bpy.types.Object
    :param mesh: Object's Mesh data
    :type mesh: bpy.types.Mesh
    :param offset_matrix: Matrix for specifying of pivot point
    :type offset_matrix: Matrix
    :param material_dict: Materials used in current Object
    :type material_dict: dict
    :param used_materials: All Materials used in 'SCS Game Object'
    :type used_materials: list
    :param bone_list: Bones for export
    :type bone_list: list
    :param scs_globals: SCS Tools Globals
    :type scs_globals: GlobalSCSProps
    :return: Piece dictionary, Skin list, Skin weight count, Skin clone count
    :rtype: list
    """
    # Create Index => Material Dictionary...
    index_material_dict = _create_index_material_dict(material_dict)

    # Create Piece (Material) Dictionary...
    piece_dict = {}
    for material in material_dict:
        material_i = loc_material_i = material_dict[material][
            0]  # Eliminates multiple Material assignment

        # Set correct Material index for Piece
        for used_mat_i, used_mat in enumerate(used_materials):
            if material == used_mat:
                material_i = used_mat_i

        # print(' "%s" = %s => %s' % (str(material), str(material_dict[material]), str(loc_material_i)))

        piece_dict[loc_material_i] = {}
        piece_dict[loc_material_i]['material_index'] = material_i
        piece_dict[loc_material_i]['hash_dict'] = {}
        piece_dict[loc_material_i]['verts'] = []
        piece_dict[loc_material_i]['faces'] = []

    # DATA LAYERS
    uv_layers = mesh.tessface_uv_textures
    vc_layers = mesh.tessface_vertex_colors
    vertex_groups = obj.vertex_groups

    # Make sure if everything is recalculated...
    mesh.calc_tessface()
    mesh.calc_normals()
    mesh.calc_normals_split()

    for tessface in mesh.tessfaces:
        material = index_material_dict[tessface.material_index]
        loc_material_i = material_dict[material][
            0]  # Eliminates multiple Material assignment
        # print('tessface [%s]: %s' % (str(tessface.index).rjust(2, "0"), str(tessface.vertices)))
        # print(' "%s" = %s => %s' % (str(material), str(material_dict[material]), str(material_i)))
        face_verts = []

        for facevert_i, facevert in enumerate(tessface.vertices):
            facevert_common_data = {}
            facevert_unique_data = {}
            # POSITION
            facevert_co = offset_matrix.inverted(
            ) * obj.matrix_world * mesh.vertices[facevert].co
            # print('  facevert [%s] - position: %s' % (str(facevert).rjust(2, "0"), str(facevert_co)))
            scs_position = (Matrix.Scale(scs_globals.export_scale, 4) *
                            _convert_utils.scs_to_blend_matrix().inverted() *
                            facevert_co)
            facevert_common_data['_POSITION'] = scs_position

            # NORMAL
            facevert_no = mesh.vertices[facevert].normal
            # print('    normal: %s' % str(facevert_no))
            scs_normal = (_convert_utils.scs_to_blend_matrix().inverted() *
                          offset_matrix.inverted() * obj.matrix_world *
                          facevert_no)

            # normalize normal vector and set it
            facevert_common_data['_NORMAL'] = Vector(scs_normal).normalized()

            # VERTEX GROUPS - for every vertices we need weight for every existing vertex group
            # print('    vg : %s len(%i)' % (str(vertex_groups), len(vertex_groups)))
            vert_grp = {}
            unused_groups = vertex_groups.keys(
            )  # store all groups that are not yet used
            for vg_elem in mesh.vertices[facevert].groups:
                vert_grp[vertex_groups[vg_elem.group].name] = vg_elem.weight
                unused_groups.remove(
                    vertex_groups[vg_elem.group].name)  # remove it from unused
            for group_name in unused_groups:  # for all unused groups that are left write weight 0
                vert_grp[group_name] = 0.0

            # print('      vert_grp: %s' % str(vert_grp))
            facevert_common_data['_VG'] = vert_grp

            # VERTEX UV LAYERS
            # print('    uv: %s len(%i)' % (str(uv_layers), len(uv_layers)))
            uv_lyr = {}
            if scs_globals.active_uv_only:
                uv = uv_layers.active.data[tessface.index].uv[facevert_i][:]
                scs_uv = _convert_utils.change_to_scs_uv_coordinates(uv)
                uv_lyr[uv_layers.active.name] = Vector(scs_uv)
            else:
                for layer_i, layer in enumerate(uv_layers.keys()):
                    uv = uv_layers[layer_i].data[
                        tessface.index].uv[facevert_i][:]
                    # print('      uv%i: %r %s' % (layer_i, layer, str(uv)))
                    scs_uv = _convert_utils.change_to_scs_uv_coordinates(uv)
                    uv_lyr[layer] = Vector(scs_uv)
            # print('      uv_lyr: %s' % str(uv_lyr))
            facevert_unique_data['_UV'] = uv_lyr

            # VERTEX COLOR LAYERS
            # NOTE: In current PIM version 5 there should be only one Color layer present,
            # but I'll leave the multilayer solution here just for the case it could
            # be used in the future.
            active_vc_only = True
            # print('    vc : %s len(%i)' % (str(vc_layers), len(vc_layers)))
            if vc_layers and scs_globals.export_vertex_color:
                vc_lyr = {}
                if active_vc_only:
                    if facevert_i == 0:
                        vc = vc_layers.active.data[tessface.index].color1[:]
                    elif facevert_i == 1:
                        vc = vc_layers.active.data[tessface.index].color2[:]
                    elif facevert_i == 2:
                        vc = vc_layers.active.data[tessface.index].color3[:]
                    elif facevert_i == 3:
                        vc = vc_layers.active.data[tessface.index].color4[:]
                    if scs_globals.export_vertex_color_type == 'rgbda':
                        vc = (vc[0], vc[1], vc[2], 1.0)
                    # print('      vc%i: %r %s' % (layer_i, layer, str(vc)))
                    vc_lyr[vc_layers.active.name] = Vector(vc)
                else:
                    for layer_i, layer in enumerate(vc_layers.keys()):
                        if facevert_i == 0:
                            vc = vc_layers[layer_i].data[
                                tessface.index].color1[:]
                        elif facevert_i == 1:
                            vc = vc_layers[layer_i].data[
                                tessface.index].color2[:]
                        elif facevert_i == 2:
                            vc = vc_layers[layer_i].data[
                                tessface.index].color3[:]
                        elif facevert_i == 3:
                            vc = vc_layers[layer_i].data[
                                tessface.index].color4[:]
                        if scs_globals.export_vertex_color_type == 'rgbda':
                            vc = (vc[0], vc[1], vc[2], 1.0)
                        # print('      vc%i: %r %s' % (layer_i, layer, str(vc)))
                        vc_lyr[layer] = Vector(vc)
                # print('      vc_lyr: %s' % str(vc_lyr))
                facevert_unique_data['_RGBA'] = vc_lyr

            # DATA EVALUATION
            # print(' *** (%s) *** (%s) ***' % (str(facevert).rjust(3, "0"), str(tessface.vertices[facevert_i]).rjust(3, "0")))
            if facevert in piece_dict[loc_material_i]['hash_dict']:
                for vert in piece_dict[loc_material_i]['hash_dict'][facevert]:
                    # print(' %s > UD: %s' % (str(facevert).rjust(3, "0"), str(facevert_unique_data)))
                    vert_facevert_unique_data = piece_dict[loc_material_i][
                        'verts'][vert][1]
                    # print(' %s < UD: %s' % (str(facevert).rjust(3, "0"), str(vert_facevert_unique_data)))
                    if facevert_unique_data == vert_facevert_unique_data:
                        # print(' %s O MATCH!' % str(facevert).rjust(3, "0"))
                        face_verts.append(vert)
                        break
                else:
                    # print(' %s - NOT in existing record...' % str(facevert).rjust(3, "0"))
                    new_vert_index = len(piece_dict[loc_material_i]['verts'])
                    piece_dict[loc_material_i]['hash_dict'][facevert].append(
                        new_vert_index
                    )  # Add the new vertex index to "hash_dict" record
                    face_verts.append(
                        new_vert_index)  # Add the vertex to the actual face
                    piece_dict[loc_material_i]['verts'].append(
                        (facevert_common_data, facevert_unique_data
                         ))  # Create a new vertex to 'verts'
            else:
                # print(' %s | NOT a record... %s' % (str(facevert).rjust(3, "0"), str(facevert_common_data['_POSITION'][:])))
                new_vert_index = len(piece_dict[loc_material_i]['verts'])
                piece_dict[loc_material_i]['hash_dict'][facevert] = [
                    new_vert_index
                ]  # Create a new "hash_dict" record
                face_verts.append(
                    new_vert_index)  # Add vertex to the actual face
                piece_dict[loc_material_i]['verts'].append(
                    (facevert_common_data,
                     facevert_unique_data))  # Create a new vertex to 'verts'

        # FACES
        face_verts = face_verts[::
                                -1]  # NOTE: Vertex order is swapped here to make the face normal flipped! Needs a check if it is right.
        if len(face_verts) == 4:  # Simple triangulation...
            piece_dict[loc_material_i]['faces'].append(face_verts[:3])
            piece_dict[loc_material_i]['faces'].append(
                (face_verts[2], face_verts[3], face_verts[0]))
        else:
            piece_dict[loc_material_i]['faces'].append(face_verts)

    # BONE NAME LIST
    skin_list = []
    skin_weights_cnt = skin_clones_cnt = 0
    if bone_list:
        # if _get_scs_globals().export_anim_file == 'anim':
        if root_object.scs_props.scs_root_animated == 'anim':
            bone_name_list = []
            for bone_i, bone in enumerate(bone_list):
                bone_name_list.append(bone.name)
                # print('%s bone: %r' % (str(bone_i).rjust(3, "0"), str(bone.name)))

            # SKINNING DATA
            for vert in mesh.vertices:

                # SKIN VECTOR
                # position = Vector(mesh.vertices[vert_i].co)
                scs_position = (
                    Matrix.Scale(scs_globals.export_scale, 4) *
                    _convert_utils.scs_to_blend_matrix().inverted() *
                    obj.matrix_world * vert.co)
                # NOTE: Vertex position - when exported from Maya the value get scaled *10, but it is old & unused in game engine anyway.
                skin_vector = scs_position
                # print('    vertex: %s: %s' % (str(vert.index), str(piece_dict[material_i]['hash_dict'][vert.index])))

                # SKIN WEIGHTS
                skin_weights = []
                for vg_elem in vert.groups:
                    group_name = vertex_groups[vg_elem.group].name
                    if group_name in bone_name_list:
                        # print('      group: %r - %s' % (group_name, str(group.weight)))
                        bone_index = _get_vertex_group_index(
                            bone_name_list, group_name)
                        skin_weights.append((bone_index, vg_elem.weight))
                        skin_weights_cnt += 1
                    else:
                        print(
                            'WARNING - Vertex Group %r is not a bone weight...'
                            % group_name
                        )  # TODO: Maybe handle this case? Useful?
                skin_clones = []

                # SKIN CLONES
                if loc_material_i is not None:
                    for v in piece_dict[loc_material_i]['hash_dict'][
                            vert.index]:
                        skin_clones.append((0, v))
                        skin_clones_cnt += 1
                else:
                    print(
                        'ERROR - Material indices incorrect! (get_geometry_dict())'
                    )

                skin_list.append((skin_vector, skin_weights, skin_clones))

    mesh.free_normals_split()

    # print(' ** piece_dict: %s' % str(piece_dict))
    # print('')

    return piece_dict, skin_list, skin_weights_cnt, skin_clones_cnt
コード例 #18
0
ファイル: pim.py プロジェクト: P-casper1/BlenderTools
def _get_geometry_dict(root_object, obj, mesh, offset_matrix, material_dict, used_materials, bone_list, scs_globals):
    """
    :param root_object: SCS Root Object
    :type root_object: bpy.types.Object
    :param obj: Actual Object data
    :type obj: bpy.types.Object
    :param mesh: Object's Mesh data
    :type mesh: bpy.types.Mesh
    :param offset_matrix: Matrix for specifying of pivot point
    :type offset_matrix: Matrix
    :param material_dict: Materials used in current Object
    :type material_dict: dict
    :param used_materials: All Materials used in 'SCS Game Object'
    :type used_materials: list
    :param bone_list: Bones for export
    :type bone_list: list
    :param scs_globals: SCS Tools Globals
    :type scs_globals: GlobalSCSProps
    :return: Piece dictionary, Skin list, Skin weight count, Skin clone count
    :rtype: list
    """
    # Create Index => Material Dictionary...
    index_material_dict = _create_index_material_dict(material_dict)

    # Create Piece (Material) Dictionary...
    piece_dict = {}
    for material in material_dict:
        material_i = loc_material_i = material_dict[material][0]  # Eliminates multiple Material assignment

        # Set correct Material index for Piece
        for used_mat_i, used_mat in enumerate(used_materials):
            if material == used_mat:
                material_i = used_mat_i

        # print(' "%s" = %s => %s' % (str(material), str(material_dict[material]), str(loc_material_i)))

        piece_dict[loc_material_i] = {}
        piece_dict[loc_material_i]['material_index'] = material_i
        piece_dict[loc_material_i]['hash_dict'] = {}
        piece_dict[loc_material_i]['verts'] = []
        piece_dict[loc_material_i]['faces'] = []

    # DATA LAYERS
    uv_layers = mesh.tessface_uv_textures
    vc_layers = mesh.tessface_vertex_colors
    vertex_groups = obj.vertex_groups

    # Make sure if everything is recalculated...
    mesh.calc_tessface()
    mesh.calc_normals()
    mesh.calc_normals_split()

    for tessface in mesh.tessfaces:
        material = index_material_dict[tessface.material_index]
        loc_material_i = material_dict[material][0]  # Eliminates multiple Material assignment
        # print('tessface [%s]: %s' % (str(tessface.index).rjust(2, "0"), str(tessface.vertices)))
        # print(' "%s" = %s => %s' % (str(material), str(material_dict[material]), str(material_i)))
        face_verts = []

        for facevert_i, facevert in enumerate(tessface.vertices):
            facevert_common_data = {}
            facevert_unique_data = {}
            # POSITION
            facevert_co = offset_matrix.inverted() * obj.matrix_world * mesh.vertices[facevert].co
            # print('  facevert [%s] - position: %s' % (str(facevert).rjust(2, "0"), str(facevert_co)))
            scs_position = (Matrix.Scale(scs_globals.export_scale, 4) *
                            _convert_utils.scs_to_blend_matrix().inverted() *
                            facevert_co)
            facevert_common_data['_POSITION'] = scs_position

            # NORMAL
            facevert_no = mesh.vertices[facevert].normal
            # print('    normal: %s' % str(facevert_no))
            scs_normal = (_convert_utils.scs_to_blend_matrix().inverted() *
                          offset_matrix.inverted() *
                          obj.matrix_world *
                          facevert_no)

            # normalize normal vector and set it
            facevert_common_data['_NORMAL'] = Vector(scs_normal).normalized()

            # VERTEX GROUPS - for every vertices we need weight for every existing vertex group
            # print('    vg : %s len(%i)' % (str(vertex_groups), len(vertex_groups)))
            vert_grp = {}
            unused_groups = vertex_groups.keys()  # store all groups that are not yet used
            for vg_elem in mesh.vertices[facevert].groups:
                vert_grp[vertex_groups[vg_elem.group].name] = vg_elem.weight
                unused_groups.remove(vertex_groups[vg_elem.group].name)  # remove it from unused
            for group_name in unused_groups:  # for all unused groups that are left write weight 0
                vert_grp[group_name] = 0.0

            # print('      vert_grp: %s' % str(vert_grp))
            facevert_common_data['_VG'] = vert_grp

            # VERTEX UV LAYERS
            # print('    uv: %s len(%i)' % (str(uv_layers), len(uv_layers)))
            uv_lyr = {}
            if scs_globals.active_uv_only:
                uv = uv_layers.active.data[tessface.index].uv[facevert_i][:]
                scs_uv = _convert_utils.change_to_scs_uv_coordinates(uv)
                uv_lyr[uv_layers.active.name] = Vector(scs_uv)
            else:
                for layer_i, layer in enumerate(uv_layers.keys()):
                    uv = uv_layers[layer_i].data[tessface.index].uv[facevert_i][:]
                    # print('      uv%i: %r %s' % (layer_i, layer, str(uv)))
                    scs_uv = _convert_utils.change_to_scs_uv_coordinates(uv)
                    uv_lyr[layer] = Vector(scs_uv)
            # print('      uv_lyr: %s' % str(uv_lyr))
            facevert_unique_data['_UV'] = uv_lyr

            # VERTEX COLOR LAYERS
            # NOTE: In current PIM version 5 there should be only one Color layer present,
            # but I'll leave the multilayer solution here just for the case it could
            # be used in the future.
            active_vc_only = True
            # print('    vc : %s len(%i)' % (str(vc_layers), len(vc_layers)))
            if vc_layers and scs_globals.export_vertex_color:
                vc_lyr = {}
                if active_vc_only:
                    if facevert_i == 0:
                        vc = vc_layers.active.data[tessface.index].color1[:]
                    elif facevert_i == 1:
                        vc = vc_layers.active.data[tessface.index].color2[:]
                    elif facevert_i == 2:
                        vc = vc_layers.active.data[tessface.index].color3[:]
                    elif facevert_i == 3:
                        vc = vc_layers.active.data[tessface.index].color4[:]
                    if scs_globals.export_vertex_color_type == 'rgbda':
                        vc = (vc[0], vc[1], vc[2], 1.0)
                    # print('      vc%i: %r %s' % (layer_i, layer, str(vc)))
                    vc_lyr[vc_layers.active.name] = Vector(vc)
                else:
                    for layer_i, layer in enumerate(vc_layers.keys()):
                        if facevert_i == 0:
                            vc = vc_layers[layer_i].data[tessface.index].color1[:]
                        elif facevert_i == 1:
                            vc = vc_layers[layer_i].data[tessface.index].color2[:]
                        elif facevert_i == 2:
                            vc = vc_layers[layer_i].data[tessface.index].color3[:]
                        elif facevert_i == 3:
                            vc = vc_layers[layer_i].data[tessface.index].color4[:]
                        if scs_globals.export_vertex_color_type == 'rgbda':
                            vc = (vc[0], vc[1], vc[2], 1.0)
                        # print('      vc%i: %r %s' % (layer_i, layer, str(vc)))
                        vc_lyr[layer] = Vector(vc)
                # print('      vc_lyr: %s' % str(vc_lyr))
                facevert_unique_data['_RGBA'] = vc_lyr

            # DATA EVALUATION
            # print(' *** (%s) *** (%s) ***' % (str(facevert).rjust(3, "0"), str(tessface.vertices[facevert_i]).rjust(3, "0")))
            if facevert in piece_dict[loc_material_i]['hash_dict']:
                for vert in piece_dict[loc_material_i]['hash_dict'][facevert]:
                    # print(' %s > UD: %s' % (str(facevert).rjust(3, "0"), str(facevert_unique_data)))
                    vert_facevert_unique_data = piece_dict[loc_material_i]['verts'][vert][1]
                    # print(' %s < UD: %s' % (str(facevert).rjust(3, "0"), str(vert_facevert_unique_data)))
                    if facevert_unique_data == vert_facevert_unique_data:
                        # print(' %s O MATCH!' % str(facevert).rjust(3, "0"))
                        face_verts.append(vert)
                        break
                else:
                    # print(' %s - NOT in existing record...' % str(facevert).rjust(3, "0"))
                    new_vert_index = len(piece_dict[loc_material_i]['verts'])
                    piece_dict[loc_material_i]['hash_dict'][facevert].append(new_vert_index)  # Add the new vertex index to "hash_dict" record
                    face_verts.append(new_vert_index)  # Add the vertex to the actual face
                    piece_dict[loc_material_i]['verts'].append((facevert_common_data, facevert_unique_data))  # Create a new vertex to 'verts'
            else:
                # print(' %s | NOT a record... %s' % (str(facevert).rjust(3, "0"), str(facevert_common_data['_POSITION'][:])))
                new_vert_index = len(piece_dict[loc_material_i]['verts'])
                piece_dict[loc_material_i]['hash_dict'][facevert] = [new_vert_index]  # Create a new "hash_dict" record
                face_verts.append(new_vert_index)  # Add vertex to the actual face
                piece_dict[loc_material_i]['verts'].append((facevert_common_data, facevert_unique_data))  # Create a new vertex to 'verts'

        # FACES
        face_verts = face_verts[::-1]  # NOTE: Vertex order is swapped here to make the face normal flipped! Needs a check if it is right.
        if len(face_verts) == 4:  # Simple triangulation...
            piece_dict[loc_material_i]['faces'].append(face_verts[:3])
            piece_dict[loc_material_i]['faces'].append((face_verts[2], face_verts[3], face_verts[0]))
        else:
            piece_dict[loc_material_i]['faces'].append(face_verts)

    # BONE NAME LIST
    skin_list = []
    skin_weights_cnt = skin_clones_cnt = 0
    if bone_list:
        # if _get_scs_globals().export_anim_file == 'anim':
        if root_object.scs_props.scs_root_animated == 'anim':
            bone_name_list = []
            for bone_i, bone in enumerate(bone_list):
                bone_name_list.append(bone.name)
                # print('%s bone: %r' % (str(bone_i).rjust(3, "0"), str(bone.name)))

            # SKINNING DATA
            for vert in mesh.vertices:

                # SKIN VECTOR
                # position = Vector(mesh.vertices[vert_i].co)
                scs_position = (Matrix.Scale(scs_globals.export_scale, 4) *
                                _convert_utils.scs_to_blend_matrix().inverted() *
                                obj.matrix_world *
                                vert.co)
                # NOTE: Vertex position - when exported from Maya the value get scaled *10, but it is old & unused in game engine anyway.
                skin_vector = scs_position
                # print('    vertex: %s: %s' % (str(vert.index), str(piece_dict[material_i]['hash_dict'][vert.index])))

                # SKIN WEIGHTS
                skin_weights = []
                for vg_elem in vert.groups:
                    group_name = vertex_groups[vg_elem.group].name
                    if group_name in bone_name_list:
                        # print('      group: %r - %s' % (group_name, str(group.weight)))
                        bone_index = _get_vertex_group_index(bone_name_list, group_name)
                        skin_weights.append((bone_index, vg_elem.weight))
                        skin_weights_cnt += 1
                    else:
                        print('WARNING - Vertex Group %r is not a bone weight...' % group_name)  # TODO: Maybe handle this case? Useful?
                skin_clones = []

                # SKIN CLONES
                if loc_material_i is not None:
                    for v in piece_dict[loc_material_i]['hash_dict'][vert.index]:
                        skin_clones.append((0, v))
                        skin_clones_cnt += 1
                else:
                    print('ERROR - Material indices incorrect! (get_geometry_dict())')

                skin_list.append((skin_vector, skin_weights, skin_clones))

    mesh.free_normals_split()

    # print(' ** piece_dict: %s' % str(piece_dict))
    # print('')

    return piece_dict, skin_list, skin_weights_cnt, skin_clones_cnt
コード例 #19
0
def _create_piece(
    context,
    name,
    mesh_vertices,
    mesh_normals,
    mesh_tangents,
    mesh_rgb,
    mesh_rgba,
    mesh_scalars,
    object_skinning,
    mesh_uv,
    mesh_uv_aliases,
    mesh_tuv,
    mesh_faces,
    mesh_face_materials,
    mesh_edges,
    terrain_points_trans,
    materials_data,
):
    handle_unused_arg(__file__, _create_piece.__name__, "mesh_normals",
                      mesh_normals)
    handle_unused_arg(__file__, _create_piece.__name__, "mesh_tangents",
                      mesh_tangents)
    handle_unused_arg(__file__, _create_piece.__name__, "mesh_tuv", mesh_tuv)

    context.window_manager.progress_begin(0.0, 1.0)
    context.window_manager.progress_update(0)

    import_scale = _get_scs_globals().import_scale
    mesh = bpy.data.meshes.new(name)

    # COORDINATES TRANSFORMATION
    transformed_mesh_vertices = [
        _convert_utils.change_to_scs_xyz_coordinates(vec, import_scale)
        for vec in mesh_vertices
    ]

    context.window_manager.progress_update(0.1)

    # VISUALISE IMPORTED NORMALS (DEBUG)
    # NOTE: NOT functional for PIM version 7 since mesh normals are not provided in per vertex fashion!
    # visualise_normals(name, transformed_mesh_vertices, mesh_normals, import_scale)

    bm = bmesh.new()

    # VERTICES
    _mesh_utils.bm_make_vertices(bm, transformed_mesh_vertices)
    context.window_manager.progress_update(0.2)

    # FACES
    # for fac_i, fac in enumerate(mesh_faces): print('     face[%i]: %s' % (fac_i, str(fac)))
    mesh_faces, back_faces = _mesh_utils.bm_make_faces(bm, mesh_faces, [])
    context.window_manager.progress_update(0.3)

    # SHARP EDGES
    # print('mesh_edges: %s' % str(mesh_edges))
    for edge in bm.edges:
        edge_verts = [edge.verts[0].index, edge.verts[1].index]
        edge_verts_inv = [edge.verts[1].index, edge.verts[0].index]
        if edge_verts in mesh_edges or edge_verts_inv in mesh_edges:
            # print('edge: %s' % str(edge_verts))
            edge.smooth = False
    context.window_manager.progress_update(0.4)

    # UV LAYERS
    if mesh_uv:
        for uv_layer_name in mesh_uv:
            _mesh_utils.bm_make_uv_layer(7, bm, mesh_faces, uv_layer_name,
                                         mesh_uv[uv_layer_name])
    context.window_manager.progress_update(0.5)

    # VERTEX COLOR
    mesh_rgb_final = {}
    if mesh_rgba:
        mesh_rgb_final.update(mesh_rgba)
    if mesh_rgb:
        mesh_rgb_final.update(mesh_rgb)

    for vc_layer_name in mesh_rgb_final:
        max_value = mesh_rgb_final[vc_layer_name][0][0][0] / 2

        for vc_entry in mesh_rgb_final[vc_layer_name]:
            for v_i in vc_entry:
                for i, value in enumerate(v_i):
                    if max_value < value / 2:
                        max_value = value / 2

        if max_value > mesh.scs_props.vertex_color_multiplier:
            mesh.scs_props.vertex_color_multiplier = max_value

        _mesh_utils.bm_make_vc_layer(7, bm, vc_layer_name,
                                     mesh_rgb_final[vc_layer_name],
                                     mesh.scs_props.vertex_color_multiplier)

    bm.to_mesh(mesh)
    mesh.update()
    bm.free()

    # NORMALS - has to be applied after bmesh creation as they are set directly to mesh
    if _get_scs_globals().import_use_normals:

        mesh.create_normals_split()

        # first set normals directly to loops
        for poly_i, poly in enumerate(mesh.polygons):

            for poly_loop_i, loop_i in enumerate(poly.loop_indices):

                curr_n = _convert_utils.scs_to_blend_matrix() @ Vector(
                    mesh_normals[poly_i][poly_loop_i])
                mesh.loops[loop_i].normal[:] = curr_n

        # then we have to go trough very important step they say,
        # as without validation we get wrong result for some normals
        mesh.validate(clean_customdata=False
                      )  # *Very* important to not remove lnors here!

        # set polygons to use smooth representation
        mesh.polygons.foreach_set("use_smooth", [True] * len(mesh.polygons))

        # finally fill clnors from loops normals and apply them (taken from official Blenders scripts)
        clnors = array.array('f', [0.0] * (len(mesh.loops) * 3))
        mesh.loops.foreach_get("normal", clnors)
        mesh.normals_split_custom_set(tuple(zip(*(iter(clnors), ) * 3)))
        mesh.use_auto_smooth = True

        mesh.free_normals_split()
    else:
        # set polygons to use smooth representation only
        mesh.polygons.foreach_set("use_smooth", [True] * len(mesh.polygons))

    context.window_manager.progress_update(0.6)

    # Create object out of mesh and link it to active layer collection.
    obj = bpy.data.objects.new(mesh.name, mesh)
    obj.scs_props.object_identity = obj.name
    obj.location = (0.0, 0.0, 0.0)
    context.view_layer.active_layer_collection.collection.objects.link(obj)

    obj.select_set(True)
    bpy.context.view_layer.objects.active = obj

    # SCALAR LAYERS
    if mesh_scalars:
        for sca_layer_name in mesh_scalars:
            vertex_group = obj.vertex_groups.new(name=sca_layer_name)
            for val_i, val in enumerate(mesh_scalars[sca_layer_name]):
                val = float(val[0])
                if val != 0.0:
                    vertex_group.add([val_i], val, "ADD")
    context.window_manager.progress_update(0.7)

    # TERRAIN POINTS (VERTEX GROUPS)
    for vertex_i, vertex_pos in enumerate(mesh_vertices):

        tp_entries = terrain_points_trans.get(vertex_pos)

        # add current vertex to all combinations of variants/nodes
        # from found terrain points transitional structures
        for tp_entry in tp_entries:

            # first 6 chars in vertex group name will represent variant index
            # this way we will be able to identify variant during vertex groups
            # cleanup if this vertex will be set to multiple variants
            vg_name = str(tp_entry.variant_i).zfill(
                6) + _OP_consts.TerrainPoints.vg_name_prefix + str(
                    tp_entry.node_i)

            if vg_name not in obj.vertex_groups:
                obj.vertex_groups.new(name=vg_name)

            vertex_group = obj.vertex_groups[vg_name]
            vertex_group.add([vertex_i], 1.0, "REPLACE")

    # SKINNING (VERTEX GROUPS)
    if object_skinning:
        if name in object_skinning:
            for vertex_group_name in object_skinning[name]:
                vertex_group = obj.vertex_groups.new(name=vertex_group_name)
                for vertex_i, vertex in enumerate(
                        object_skinning[name][vertex_group_name]):
                    weight = object_skinning[name][vertex_group_name][vertex]
                    if weight != 0.0:
                        vertex_group.add([vertex], weight, "ADD")
        else:
            lprint('\nE Missing skin group %r! Skipping...', name)

    # ADD EDGE SPLIT MODIFIER
    bpy.ops.object.shade_smooth()
    bpy.ops.object.modifier_add(type='EDGE_SPLIT')
    bpy.context.object.modifiers["EdgeSplit"].use_edge_angle = False
    bpy.context.object.modifiers["EdgeSplit"].name = "ES_" + name

    # MATERIALS
    used_mat_indices = set()
    # print('\n  mesh_face_materials:\n%s' % str(mesh_face_materials))
    for mat_index in mesh_face_materials:
        used_mat_indices.add(mat_index)
    # print('  used_mats:\n%s' % str(used_mats))
    context.window_manager.progress_update(0.8)

    # ADD MATERIALS TO SLOTS
    # print('  materials_data:\n%s' % str(materials_data))
    mat_index_to_mat_slot_map = {}
    if len(materials_data) > 0:
        for used_mat_idx in used_mat_indices:
            material_name = materials_data[used_mat_idx][0]
            bpy.ops.object.material_slot_add()  # Add a material slot
            last_slot = obj.material_slots.__len__() - 1

            # now as we created slot and we know index of it, write down indices of material slots to dictionary
            # for later usage by assigning faces to proper slots
            mat_index_to_mat_slot_map[used_mat_idx] = last_slot

            # print('    used_mat: %s (%i) => %s : %s' % (str(used_mat), mat_i, str(last_slot), str(material)))
            obj.material_slots[last_slot].material = bpy.data.materials[
                material_name]  # Assign a material to the slot

            # NOTE: we are setting texture aliases only first time to avoid duplicates etc.
            # So we assume that pieces which are using same material will also have same uv aliases alignment
            used_material = bpy.data.materials[material_name]
            if "scs_tex_aliases" not in used_material:

                alias_mapping = {}
                for uv_lay in mesh_uv_aliases[used_mat_idx]:

                    import re

                    for alias in mesh_uv_aliases[used_mat_idx][uv_lay]:
                        numbers = re.findall("\d+", alias)
                        number = numbers[len(numbers) - 1]
                        alias_mapping[number] = uv_lay

                used_material["scs_tex_aliases"] = alias_mapping

    mesh = obj.data
    context.window_manager.progress_update(0.9)

    # APPLY MATERIAL SLOT INDICES TO FACES
    for face_i, face in enumerate(mesh.polygons):
        face.material_index = mat_index_to_mat_slot_map[
            mesh_face_materials[face_i]]
    context.window_manager.progress_update(1.0)

    return obj
コード例 #20
0
def load(filepath, armature, get_only=False):
    scs_globals = _get_scs_globals()
    import_scale = scs_globals.import_scale
    bone_import_scale = scs_globals.import_bone_scale
    connected_bones = scs_globals.import_connected_bones

    print("\n************************************")
    print("**      SCS PIS Importer          **")
    print("**      (c)2014 SCS Software      **")
    print("************************************\n")

    # scene = context.scene
    ind = '    '
    pis_container = _pix_container.get_data_from_file(filepath, ind)

    # TEST PRINTOUTS
    # ind = '  '
    # for section in pis_container:
    # print('SEC.: "%s"' % section.type)
    # for prop in section.props:
    # print('%sProp: %s' % (ind, prop))
    # for data in section.data:
    # print('%sdata: %s' % (ind, data))
    # for sec in section.sections:
    # print_section(sec, ind)
    # print('\nTEST - Source: "%s"' % pis_container[0].props[1][1])
    # print('')

    # TEST EXPORT
    # path, file = os.path.splitext(filepath)
    # export_filepath = str(path + '_reex' + file)
    # result = pix_write.write_data(pis_container, export_filepath, ind)
    # if result == {'FINISHED'}:
    # Print(dump_level, '\nI Test export succesful! The new file:\n  "%s"', export_filepath)
    # else:
    # Print(dump_level, '\nE Test export failed! File:\n  "%s"', export_filepath)

    # LOAD HEADER
    '''
    NOTE: skipped for now as no data needs to be readed
    format_version, source, f_type, f_name, source_filename, author = _get_header(pis_container)
    '''

    # LOAD GLOBALS
    '''
    NOTE: skipped for now as no data needs to be readed
    # bone_count = _get_global(pis_container)
    '''

    # LOAD BONES
    bones = _get_bones(pis_container)

    if get_only:  # only return bones (used when importing PIA from panel)
        return bones

    # PROVIDE AN ARMATURE
    if not armature:
        lprint('\nE No Armature for file "%s"!',
               (os.path.basename(filepath), ))
        return {'CANCELLED'}, None

    bpy.context.view_layer.objects.active = armature
    bpy.ops.object.mode_set(mode='EDIT')

    # CONNECTED BONES - Add information about all children...
    if connected_bones:
        for bone in bones:
            # print('  bone: %r - %r\n%s\n' % (bone, bones[bone][0], str(bones[bone][1])))
            children = []
            for item in bones:
                if bone == bones[item][0]:
                    children.append(item)
            bones[bone].append(children)
            # print('  bone: %r - %r\n%s\n' % (bone, bones[bone][0], str(bones[bone][2])))

    for bone_i, bone in enumerate(armature.data.bones):
        # print('----- bone: %r ------------------------------' % bone.name)

        # SET PARENT
        if bones[bone.name][0] != "":  # if bone has parent...
            # print('  %r --> %r' % (bone.name, bones[bone.name][0]))
            # armature.data.edit_bones[bone.name].use_connect = False
            armature.data.edit_bones[
                bone.name].parent = armature.data.edit_bones[bones[bone.name]
                                                             [0]]
            # else:
            # print('  %r - NO parent' % bone.name)

        # COMPUTE BONE TRANSFORMATION
        matrix = bones[bone.name][1]
        bone_matrix = _convert_utils.scs_to_blend_matrix() @ matrix.transposed(
        )
        axis, angle = _convert_utils.mat3_to_vec_roll(bone_matrix)
        # print(' * %r - angle: %s' % (bone.name, angle))

        # SET BONE TRANSFORMATION
        armature.data.edit_bones[bone.name].head = bone_matrix.to_translation(
        ).to_3d() * import_scale
        armature.data.edit_bones[bone.name].tail = (
            armature.data.edit_bones[bone.name].head +
            Vector(axis).normalized() * bone_import_scale * import_scale)
        armature.data.edit_bones[bone.name].roll = angle

        # save initial bone scaling to use it in calculation when importing PIA animations
        # NOTE: bones after import always have scale of 1:
        # 1. because edit bones don't have scale, just tail and head
        # 2. because any scaling in pose bones will be overwritten by animation itself
        armature.pose.bones[bone.name][
            _BONE_consts.init_scale_key] = bone_matrix.to_scale()

        # CONNECTED BONES
        # NOTE: Doesn't work as expected! Disabled for now in UI.
        # Child bones gets position offset and there is also a problem when translation
        # is animated, for which connected bones doesn't allow.
        if connected_bones:
            if len(bones[bone.name][2]) == 1:
                matrix = bones[bones[bone.name][2][0]][1]
                bone_matrix = _convert_utils.scs_to_blend_matrix(
                ) @ matrix.transposed()
                armature.data.edit_bones[
                    bone.name].tail = bone_matrix.to_translation().to_3d(
                    ) * import_scale
                armature.data.edit_bones[bones[bone.name][2]
                                         [0]].use_connect = True

    bpy.ops.object.mode_set(mode='OBJECT')
    armature.data.show_axes = True
    armature.display_type = 'WIRE'

    # WARNING PRINTOUTS
    # if piece_count < 0: Print(dump_level, '\nW More Pieces found than were declared!')
    # if piece_count > 0: Print(dump_level, '\nW Some Pieces not found, but were declared!')
    # if dump_level > 1: print('')

    print("************************************")
    return bones
コード例 #21
0
def _create_piece(context,
                  preview_model,
                  name,
                  ob_material,
                  mesh_vertices,
                  mesh_normals,
                  mesh_tangents,
                  mesh_rgb,
                  mesh_rgba,
                  mesh_scalars,
                  object_skinning,
                  mesh_uv,
                  mesh_tuv,
                  mesh_triangles,
                  materials_data,
                  points_to_weld_list,
                  terrain_points_trans,
                  ignore_backfaces=False):
    handle_unused_arg(__file__, _create_piece.__name__, "mesh_tangents",
                      mesh_tangents)
    handle_unused_arg(__file__, _create_piece.__name__, "mesh_scalars",
                      mesh_scalars)
    handle_unused_arg(__file__, _create_piece.__name__, "mesh_tuv", mesh_tuv)

    context.window_manager.progress_begin(0.0, 1.0)
    context.window_manager.progress_update(0)

    import_scale = _get_scs_globals().import_scale

    mesh = bpy.data.meshes.new(name)

    # COORDINATES TRANSFORMATION
    transformed_mesh_vertices = [
        _convert_utils.change_to_scs_xyz_coordinates(vec, import_scale)
        for vec in mesh_vertices
    ]

    context.window_manager.progress_update(0.1)

    # VISUALISE IMPORTED NORMALS (DEBUG)
    # visualise_normals(name, transformed_mesh_vertices, mesh_normals, import_scale)

    # MESH CREATION
    bm = bmesh.new()

    # VERTICES
    _mesh_utils.bm_make_vertices(bm, transformed_mesh_vertices)
    context.window_manager.progress_update(0.2)

    # FACES
    mesh_triangles, back_triangles = _mesh_utils.bm_make_faces(
        bm, mesh_triangles, points_to_weld_list)
    context.window_manager.progress_update(0.3)

    # UV LAYERS
    if mesh_uv:
        for uv_layer_name in mesh_uv:
            _mesh_utils.bm_make_uv_layer(5, bm, mesh_triangles, uv_layer_name,
                                         mesh_uv[uv_layer_name]["data"])
    context.window_manager.progress_update(0.4)

    # VERTEX COLOR
    if mesh_rgba:
        mesh_rgb_final = mesh_rgba
    elif mesh_rgb:
        mesh_rgb_final = mesh_rgb
    else:
        mesh_rgb_final = []

    for vc_layer_name in mesh_rgb_final:
        max_value = mesh_rgb_final[vc_layer_name][0][0] / 2

        for vc_entry in mesh_rgb_final[vc_layer_name]:
            for i, value in enumerate(vc_entry):
                if max_value < value / 2:
                    max_value = value / 2

        if max_value > mesh.scs_props.vertex_color_multiplier:
            mesh.scs_props.vertex_color_multiplier = max_value

        _mesh_utils.bm_make_vc_layer(5, bm, vc_layer_name,
                                     mesh_rgb_final[vc_layer_name],
                                     mesh.scs_props.vertex_color_multiplier)

    context.window_manager.progress_update(0.5)

    bm.to_mesh(mesh)
    mesh.update()
    bm.free()

    # NORMALS - has to be applied after bmesh creation as they are set directly to mesh
    if _get_scs_globals().import_use_normals:

        mesh.create_normals_split()

        # first set normals directly to loops
        for loop in mesh.loops:
            curr_n = _convert_utils.scs_to_blend_matrix() @ Vector(
                mesh_normals[loop.vertex_index])
            loop.normal[:] = curr_n

        # then we have to go trough very important step they say,
        # as without validation we get wrong result for some normals
        mesh.validate(clean_customdata=False
                      )  # *Very* important to not remove lnors here!

        # set polygons to use smooth representation
        mesh.polygons.foreach_set("use_smooth", [True] * len(mesh.polygons))

        # finally fill clnors from loops normals and apply them (taken from official Blenders scripts)
        clnors = array.array('f', [0.0] * (len(mesh.loops) * 3))
        mesh.loops.foreach_get("normal", clnors)
        mesh.normals_split_custom_set(tuple(zip(*(iter(clnors), ) * 3)))
        mesh.use_auto_smooth = True

        mesh.free_normals_split()
    else:
        # set polygons to use smooth representation only
        mesh.polygons.foreach_set("use_smooth", [True] * len(mesh.polygons))

    context.window_manager.progress_update(0.6)

    # Create object out of mesh and link it to active layer collection.
    obj = bpy.data.objects.new(mesh.name, mesh)
    obj.scs_props.object_identity = obj.name
    obj.location = (0.0, 0.0, 0.0)
    context.view_layer.active_layer_collection.collection.objects.link(obj)

    obj.select_set(True)
    bpy.context.view_layer.objects.active = obj

    context.window_manager.progress_update(0.7)

    context.window_manager.progress_update(0.8)

    # TERRAIN POINTS (VERTEX GROUPS)
    for vertex_i, vertex_pos in enumerate(mesh_vertices):

        tp_entries = terrain_points_trans.get(vertex_pos)

        # add current vertex to all combinations of variants/nodes
        # from found terrain points transitional structures
        for tp_entry in tp_entries:

            # first 6 chars in vertex group name will represent variant index
            # this way we will be able to identify variant during vertex groups
            # cleanup if this vertex will be set to multiple variants
            vg_name = str(tp_entry.variant_i).zfill(
                6) + _OP_consts.TerrainPoints.vg_name_prefix + str(
                    tp_entry.node_i)

            if vg_name not in obj.vertex_groups:
                obj.vertex_groups.new(name=vg_name)

            vertex_group = obj.vertex_groups[vg_name]
            vertex_group.add([vertex_i], 1.0, "REPLACE")

    # SKINNING (VERTEX GROUPS)
    if object_skinning:
        if name in object_skinning:
            for vertex_group_name in object_skinning[name]:
                vertex_group = obj.vertex_groups.new(name=vertex_group_name)
                for vertex_i, vertex in enumerate(
                        object_skinning[name][vertex_group_name]):
                    weight = object_skinning[name][vertex_group_name][vertex]
                    if weight != 0.0:
                        if vertex in points_to_weld_list:
                            vertex = points_to_weld_list[vertex]
                        vertex_group.add([vertex], weight, "ADD")
        else:
            lprint('\nE Missing skin group %r! Skipping...', name)

    context.window_manager.progress_update(0.9)

    # DELETE ORPHAN VERTICES (LEFT IN THE GEOMETRY FROM SMOOTHING RECONSTRUCTION)
    if points_to_weld_list:
        bm = bmesh.new()
        bm.from_mesh(mesh)

        bm.verts.ensure_lookup_table()
        for vert_i in points_to_weld_list.keys():
            bm.verts[vert_i].select_set(True)

        verts = [v for v in bm.verts if v.select]
        if verts:
            bmesh.ops.delete(bm, geom=verts, context='VERTS')

        # APPLYING BMESH TO MESH
        bm.to_mesh(mesh)
        bm.free()

    context.window_manager.progress_update(1.0)

    # MATERIAL
    if len(materials_data) > 0 and not preview_model:
        # Assign a material to the last slot
        used_material = bpy.data.materials[materials_data[ob_material][0]]
        obj.data.materials.append(used_material)

        # NOTE: we are setting texture aliases only first time to avoid duplicates etc.
        # So we assume that pieces which are using same material will also have same  uv aliases alignement
        if "scs_tex_aliases" not in used_material:

            alias_mapping = {}
            for uv_lay in mesh_uv:
                if "aliases" in mesh_uv[uv_lay]:

                    import re

                    for alias in mesh_uv[uv_lay]["aliases"]:
                        numbers = re.findall("\d+", alias)
                        number = numbers[len(numbers) - 1]
                        alias_mapping[number] = uv_lay

            used_material["scs_tex_aliases"] = alias_mapping

    context.window_manager.progress_end()

    # if back triangles are present, then create new object with
    # back triangles and merge it to original
    if len(back_triangles) > 0 and not ignore_backfaces:

        back_obj = _create_piece(context,
                                 preview_model,
                                 "back_" + name,
                                 ob_material,
                                 mesh_vertices,
                                 mesh_normals,
                                 mesh_tangents,
                                 mesh_rgb,
                                 mesh_rgba,
                                 mesh_scalars,
                                 object_skinning,
                                 mesh_uv,
                                 mesh_tuv,
                                 back_triangles,
                                 materials_data,
                                 points_to_weld_list,
                                 terrain_points_trans,
                                 ignore_backfaces=True)

        lprint(
            "W Found %s back face(s) without it's own vertices on object %r, additional vertices were added!",
            (len(back_obj.data.polygons), obj.name))

        # creation of back face object used all original vertices
        # for proper index accessing during binding all of the data blocks to vertices.
        # Because of that we have to remove vertices which are not really used
        # in back faces mesh, so called "loose" vertices
        back_obj.data = _mesh_utils.bm_delete_loose(back_obj.data)

        # finally join back object with original
        override = context.copy()
        override["active_object"] = obj
        override["selected_editable_objects"] = (obj, back_obj)
        bpy.ops.object.join(override)

    return obj
コード例 #22
0
def _fill_piece_sections_7(root_object, object_list, bone_list, scene, vg_list,
                           used_materials, offset_matrix, scs_globals,
                           output_type):
    """
    Fills up "Piece" sections for file version 7 (exchange format).
    :param object_list:
    :param bone_list:
    :param scene:
    :param vg_list:
    :param used_materials:
    :param offset_matrix:
    :return:
    """
    piece_sections = []  # container for all "Pieces"
    global_vertex_count = 0
    global_face_count = 0
    global_edge_count = 0
    piece_index_obj = {}
    skin_list = []
    skin_weights_cnt = 0
    skin_clones_cnt = 0
    for piece_index, obj in enumerate(object_list):
        mat_world = obj.matrix_world
        piece_index_obj[piece_index] = obj
        object_materials = _object_utils.get_object_materials(
            obj)  # get object materials

        # Get Object's Mesh data and list of temporarily disabled "Edge Split" Modifiers...
        mesh = _object_utils.get_mesh(obj)

        # VERTICES
        # TEMPORAL VERTEX STREAM DATA FORMAT:
        # example: ('_POSITION', [(0.0, 0.0, 0.0), (0.0, 0.0, 0.0), ...])
        # example: ('_SCALAR', [(0.0), (0.0), ...])

        stream_pos = ('_POSITION', [])
        # stream_nor = ('_NORMAL', [])
        # if scs_globals.export_vertex_groups:
        vg_layers_for_export, streams_vg = _object_utils.get_stream_vgs(
            obj)  # get Vertex Group layers (SCALARs)

        vertex_stream_count = 1
        vertex_streams = []
        stream_vg_container = []
        # print('bone_list: %s' % str(bone_list.keys))
        for vert_i, vert in enumerate(mesh.vertices):
            position = offset_matrix.inverted() * mesh.vertices[vert_i].co
            # scs_position = io_utils.change_to_scs_xyz_coordinates(mat_world * position, scs_globals.export_scale) ## POSITION
            scs_position = Matrix.Scale(
                scs_globals.export_scale,
                4) * _convert_utils.scs_to_blend_matrix().inverted(
                ) * mat_world * position  # POSITION
            stream_pos[1].append(scs_position)
            # stream_nor[1].append(io_utils.get_vertex_normal(mesh, vert_i))              # NORMAL
            if scs_globals.export_vertex_groups:
                if streams_vg:
                    vg_i = 0
                    for vg in vg_layers_for_export:  # weights (even unused) all vertices become 0.0
                        if vg.name in vg_list:
                            vg_weight = (_object_utils.get_vertex_group(
                                vg, vert_i), )  # SCALARs
                            key = str("_SCALAR" + str(vg_i))
                            if vg_i == len(stream_vg_container) and len(
                                    stream_vg_container) != len(
                                        vg_layers_for_export):
                                stream_vg_container.append(
                                    (vg.name, key, [vg_weight]))
                            else:
                                stream_vg_container[vg_i][2].append(vg_weight)
                            vg_i += 1

                            # SKINNING (OLD STYLE FOR PIM VER. 7)
            # if scs_globals.export_anim_file == 'anim':
            if root_object.scs_props.scs_root_animated == 'anim':
                skin_vector = scs_position  # NOTE: Vertex position - from Maya scaled *10 (old & unused in game engine)
                skin_weights = []
                for group in vert.groups:
                    for vg in vg_layers_for_export:
                        if vg.index == group.group:
                            for bone_i, bone in enumerate(bone_list):
                                if vg.name == bone.name:
                                    skin_weights.append((bone_i, group.weight))
                                    skin_weights_cnt += 1
                                    # print('vert: %i - group: %r (%i) - %s' % (vert_i, vg.name, bone_i, str(group.weight)))
                                    break
                            break
                skin_clones = ((piece_index, vert_i), )
                skin_clones_cnt += 1
                skin_list.append((skin_vector, skin_weights, skin_clones))

                # ##
        vertex_streams.append(stream_pos)
        # print('\nstream_pos:\n  %s' % str(stream_pos))
        # vertex_streams.append(stream_nor)
        # print('\nstream_nor:\n  %s' % str(stream_nor))
        for vg_stream in stream_vg_container:
            vertex_stream_count += 1
            vertex_streams.append(vg_stream)
            # print('\nvg_stream:\n  %s' % str(vg_stream))
        # FACES
        # TEMPORAL FACE STREAM DATA FORMAT:
        # faces = [face_data, face_data, ...]
        # face_data = (material, [vertex indices], [face-vertex streams])
        # face_streams = [('_UV0', [(0.0, 0.0), (0.0, 0.0), ...]), ...]
        # example: [(0, [0, 1, 2], [('_UV0', [(0.0, 0.0), (0.0, 0.0)]), ('_UV0', [(0.0, 0.0), (0.0, 0.0)])]), (), ...]

        faces = []
        face_cnt = 0
        uv_layers_exists = 1
        rgb_layers_exists = 1
        # print('used_materials: %s' % str(used_materials))
        for face_i, face in enumerate(mesh.polygons):
            face_cnt += 1
            streams_uv = None
            streams_vcolor = None
            if uv_layers_exists:
                requested_uv_layers, streams_uv = _mesh_utils.get_stream_uvs(
                    mesh, scs_globals.active_uv_only)  # get UV layers (UVs)
                if not streams_uv:
                    uv_layers_exists = 0
            if rgb_layers_exists and scs_globals.export_vertex_color:
                if scs_globals.export_vertex_color_type_7 == 'rgb':
                    rgb_all_layers, streams_vcolor = _mesh_utils.get_stream_rgb(
                        mesh, output_type,
                        False)  # get Vertex Color layers (RGB)
                elif scs_globals.export_vertex_color_type_7 == 'rgbda':
                    rgb_all_layers, streams_vcolor = _mesh_utils.get_stream_rgb(
                        mesh, output_type, True)  # get Vertex Color layers (
                    # RGBdA)
                else:
                    streams_vcolor = None  # TODO: Alpha from another layer
                if not streams_vcolor:
                    rgb_layers_exists = 0
            mat_index = used_materials.index(
                object_materials[face.material_index])
            # print('face-mat_index: %s; object_materials[f-m_i]: %s; used_materials.index(o_m[f-m_i]): %s' % (face.material_index,
            # object_materials[face.material_index], used_materials.index(object_materials[face.material_index])))
            face_verts = []
            for vert in face.vertices:
                face_verts.append(vert)
            face_verts = face_verts[::-1]  # revert vertex order in face
            # print('face_verts: %s' % str(face_verts))
            face_streams = []
            stream_fv_nor = ("_NORMAL", [])
            stream_fv_uv_container = []
            stream_fv_rgb_container = []
            stream_names = {}
            for loop_index in range(face.loop_start,
                                    face.loop_start + face.loop_total):
                # edge_index = mesh.loops[loop_index].edge_index
                vert_index = mesh.loops[loop_index].vertex_index
                # print('face i.: %s\tloop i.: %s\tedge i.: %s\tvert i.: %s' % (face_i, loop_index, edge_index, vert_index))
                # Normals
                stream_fv_nor[1].append(
                    offset_matrix.inverted() *
                    Vector(_mesh_utils.get_vertex_normal(mesh, vert_index)))
                # UV Layers
                if streams_uv:
                    for uv_i, uv_l in enumerate(requested_uv_layers):
                        uv_values = _mesh_utils.get_face_vertex_uv(
                            uv_l.data, loop_index, uv_i)
                        key = str("_UV" + str(uv_i))
                        if uv_i == len(stream_fv_uv_container
                                       ) and len(stream_fv_uv_container
                                                 ) != len(requested_uv_layers):
                            stream_fv_uv_container.append((key, [uv_values]))
                            stream_names[key] = uv_l.name
                        else:
                            stream_fv_uv_container[uv_i][1].append(uv_values)
                            # Vertex Color Layers (RGB)
                if scs_globals.export_vertex_color:
                    if streams_vcolor:
                        for rgb_i, rgb_l in enumerate(rgb_all_layers):
                            if scs_globals.export_vertex_color_type_7 == 'rgb':
                                rgb_values = _mesh_utils.get_face_vertex_color(
                                    rgb_l.data, loop_index, False, rgb_i)
                                key = str("_RGB" + str(rgb_i))
                            elif scs_globals.export_vertex_color_type_7 == 'rgbda':
                                rgb_values = _mesh_utils.get_face_vertex_color(
                                    rgb_l.data, loop_index, True, rgb_i)
                                key = str("_RGBA" + str(rgb_i))
                            else:
                                streams_vcolor = None  # TODO: Alpha from another layer
                            if rgb_i == len(stream_fv_rgb_container
                                            ) and len(stream_fv_rgb_container
                                                      ) != len(rgb_all_layers):
                                stream_fv_rgb_container.append(
                                    (key, [rgb_values]))
                                stream_names[key] = rgb_l.name
                            else:
                                stream_fv_rgb_container[rgb_i][1].append(
                                    rgb_values)
                                # Data Assembling
            face_streams.append(stream_fv_nor)
            for stream in stream_fv_uv_container:
                face_streams.append(stream)
            for stream in stream_fv_rgb_container:
                face_streams.append(stream)
            face_data = (mat_index, face_verts, face_streams)
            faces.append(face_data)

        # SHARP EDGES
        sharp_edges = []
        for edge in mesh.edges:
            if edge.use_edge_sharp:
                sharp_edges.append(edge.vertices[:])

        # BUILD FACE SECTION
        faces_container = _SectionData("Faces")
        faces_container.props.append(("StreamCount", len(faces[0][2])))
        for face_i, face_data in enumerate(faces):
            face_container = _SectionData("Face")
            face_container.props.append(("Index", face_i))
            face_container.props.append(("Material", face_data[0]))
            face_container.props.append(("Indices", face_data[1]))
            for stream in face_data[2]:
                if stream[0] in stream_names:
                    face_container.sections.append(
                        _pix_container.make_vertex_stream(
                            stream, stream_names[stream[0]]))
                else:
                    face_container.sections.append(
                        _pix_container.make_vertex_stream(stream))
            faces_container.sections.append(face_container)

        # BUILD PIECE SECTION
        piece_section = _SectionData("Piece")
        piece_section.props.append(("Index", piece_index))
        global_vertex_count += len(stream_pos[1])
        piece_section.props.append(("VertexCount", len(stream_pos[1])))
        global_face_count += face_cnt
        piece_section.props.append(("FaceCount", face_cnt))
        global_edge_count += len(sharp_edges)
        piece_section.props.append(("EdgeCount", len(sharp_edges)))
        piece_section.props.append(("StreamCount", vertex_stream_count))
        piece_section.props.append(("", ""))
        # vertex streams...
        for stream in vertex_streams:
            if len(stream) == 3:
                # print('\nstream:\n  %s' % str(stream))
                piece_section.sections.append(
                    _pix_container.make_vertex_stream(stream[1:], stream[0]))
            else:
                piece_section.sections.append(
                    _pix_container.make_vertex_stream(stream))
                # faces...
        piece_section.sections.append(faces_container)

        # BUILD AND STORE EDGE SECTION
        if sharp_edges:
            edges_container = _SectionData("Edges")
            for edge in sharp_edges:
                edges_container.data.append(edge)
            piece_section.sections.append(edges_container)

        # STORE PIECE SECTION
        piece_sections.append(piece_section)  # add a piece
    return piece_sections, global_vertex_count, global_face_count, global_edge_count, piece_index_obj, skin_list, skin_weights_cnt, skin_clones_cnt
コード例 #23
0
def _get_bone_channels(scs_root_obj, armature, scs_animation, action,
                       export_scale):
    """Takes armature and action and returns bone channels.
    bone_channels structure example:
    [("Bone", [("_TIME", [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]), ("_MATRIX", [])])]"""
    bone_channels = []
    frame_start = scs_animation.anim_start
    frame_end = scs_animation.anim_end
    anim_export_step = action.scs_props.anim_export_step
    total_frames = (frame_end - frame_start) / anim_export_step

    # armature matrix stores transformation of armature object against scs root
    # and has to be added to all bones as they only armature space transformations
    armature_mat = scs_root_obj.matrix_world.inverted() * armature.matrix_world

    invalid_data = False  # flag to indicate invalid data state
    curves_per_bone = OrderedDict(
    )  # store all the curves we are interested in per bone names

    for bone in armature.data.bones:
        for fcurve in action.fcurves:

            # check if curve belongs to bone
            if '["' + bone.name + '"]' in fcurve.data_path:

                data_path = fcurve.data_path
                array_index = fcurve.array_index

                if data_path.endswith("location"):
                    curve_type = "location"
                elif data_path.endswith("rotation_euler"):
                    curve_type = "euler_rotation"
                elif data_path.endswith("rotation_quaternion"):
                    curve_type = "quat_rotation"
                elif data_path.endswith("scale"):
                    curve_type = "scale"
                else:
                    curve_type = None

                # write only recognized curves
                if curve_type is not None:
                    if bone.name not in curves_per_bone:
                        curves_per_bone[bone.name] = {
                            "location": {},
                            "euler_rotation": {},
                            "quat_rotation": {},
                            "scale": {}
                        }

                    curves_per_bone[
                        bone.name][curve_type][array_index] = fcurve

    for bone_name, bone_curves in curves_per_bone.items():

        bone = armature.data.bones[bone_name]
        pose_bone = armature.pose.bones[bone_name]
        loc_curves = bone_curves["location"]
        euler_rot_curves = bone_curves["euler_rotation"]
        quat_rot_curves = bone_curves["quat_rotation"]
        sca_curves = bone_curves["scale"]

        bone_rest_mat = armature_mat * bone.matrix_local
        if bone.parent:
            parent_bone_rest_mat = (
                Matrix.Scale(export_scale, 4) *
                _convert_utils.scs_to_blend_matrix().inverted() *
                armature_mat * bone.parent.matrix_local)
        else:
            parent_bone_rest_mat = Matrix()

        # GO THOUGH FRAMES
        actual_frame = frame_start
        timings_stream = []
        matrices_stream = []
        while actual_frame <= frame_end:
            mat_loc = Matrix()
            mat_rot = Matrix()
            mat_sca = Matrix()

            # LOCATION MATRIX
            if len(loc_curves) > 0:
                location = Vector()
                for index in range(3):
                    if index in loc_curves:
                        location[index] = loc_curves[index].evaluate(
                            actual_frame)
                mat_loc = Matrix.Translation(location)

            # ROTATION MATRIX
            if len(euler_rot_curves) > 0:
                rotation = Euler()
                for index in range(3):
                    if index in euler_rot_curves:
                        rotation[index] = euler_rot_curves[index].evaluate(
                            actual_frame)
                mat_rot = Euler(rotation, pose_bone.rotation_mode).to_matrix(
                ).to_4x4()  # calc rotation by pose rotation mode

            elif len(quat_rot_curves) > 0:
                rotation = Quaternion()
                for index in range(4):
                    if index in quat_rot_curves:
                        rotation[index] = quat_rot_curves[index].evaluate(
                            actual_frame)
                mat_rot = rotation.to_matrix().to_4x4()

            # SCALE MATRIX
            if len(sca_curves) > 0:
                scale = Vector((1.0, 1.0, 1.0))
                for index in range(3):
                    if index in sca_curves:
                        scale[index] = sca_curves[index].evaluate(actual_frame)

                        if scale[index] < 0:
                            lprint(
                                str("E Negative scale detected on bone %r:\n\t   "
                                    "(Action: %r, keyframe no.: %s, SCS Animation: %r)."
                                    ), (bone_name, action.name, actual_frame,
                                        scs_animation.name))
                            invalid_data = True

                mat_sca = Matrix()
                mat_sca[0] = (scale[0], 0, 0, 0)
                mat_sca[1] = (0, scale[2], 0, 0)
                mat_sca[2] = (0, 0, scale[1], 0)
                mat_sca[3] = (0, 0, 0, 1)

            # BLENDER FRAME MATRIX
            mat = mat_loc * mat_rot * mat_sca

            # SCALE REMOVAL MATRIX
            rest_location, rest_rotation, rest_scale = bone_rest_mat.decompose(
            )
            # print(' BONES rest_scale: %s' % str(rest_scale))
            rest_scale = rest_scale * export_scale
            scale_removal_matrix = Matrix()
            scale_removal_matrix[0] = (1.0 / rest_scale[0], 0, 0, 0)
            scale_removal_matrix[1] = (0, 1.0 / rest_scale[1], 0, 0)
            scale_removal_matrix[2] = (0, 0, 1.0 / rest_scale[2], 0)
            scale_removal_matrix[3] = (0, 0, 0, 1)

            # SCALE MATRIX
            scale_matrix = Matrix.Scale(export_scale, 4)

            # COMPUTE SCS FRAME MATRIX
            frame_matrix = (parent_bone_rest_mat.inverted() *
                            _convert_utils.scs_to_blend_matrix().inverted() *
                            scale_matrix.inverted() * bone_rest_mat * mat *
                            scale_removal_matrix.inverted())

            # print('          actual_frame: %s - value: %s' % (actual_frame, frame_matrix))
            timings_stream.append(
                ("__time__", scs_animation.length / total_frames), )
            matrices_stream.append(("__matrix__", frame_matrix.transposed()), )
            actual_frame += anim_export_step

        anim_timing = ("_TIME", timings_stream)
        anim_matrices = ("_MATRIX", matrices_stream)
        bone_anim = (anim_timing, anim_matrices)
        bone_data = (bone_name, bone_anim)
        bone_channels.append(bone_data)

    # return empty bone channels if data are invalid
    if invalid_data:
        return []

    return bone_channels
コード例 #24
0
ファイル: pim.py プロジェクト: P-casper1/BlenderTools
def _fill_piece_sections_7(root_object, object_list, bone_list, scene, vg_list, used_materials, offset_matrix, scs_globals, output_type):
    """
    Fills up "Piece" sections for file version 7 (exchange format).
    :param object_list:
    :param bone_list:
    :param scene:
    :param vg_list:
    :param used_materials:
    :param offset_matrix:
    :return:
    """
    piece_sections = []  # container for all "Pieces"
    global_vertex_count = 0
    global_face_count = 0
    global_edge_count = 0
    piece_index_obj = {}
    skin_list = []
    skin_weights_cnt = 0
    skin_clones_cnt = 0
    for piece_index, obj in enumerate(object_list):
        mat_world = obj.matrix_world
        piece_index_obj[piece_index] = obj
        object_materials = _object_utils.get_object_materials(obj)  # get object materials

        # Get Object's Mesh data and list of temporarily disabled "Edge Split" Modifiers...
        mesh = _object_utils.get_mesh(obj)

        # VERTICES
        # TEMPORAL VERTEX STREAM DATA FORMAT:
        # example: ('_POSITION', [(0.0, 0.0, 0.0), (0.0, 0.0, 0.0), ...])
        # example: ('_SCALAR', [(0.0), (0.0), ...])

        stream_pos = ('_POSITION', [])
        # stream_nor = ('_NORMAL', [])
        # if scs_globals.export_vertex_groups:
        vg_layers_for_export, streams_vg = _object_utils.get_stream_vgs(obj)  # get Vertex Group layers (SCALARs)

        vertex_stream_count = 1
        vertex_streams = []
        stream_vg_container = []
        # print('bone_list: %s' % str(bone_list.keys))
        for vert_i, vert in enumerate(mesh.vertices):
            position = offset_matrix.inverted() * mesh.vertices[vert_i].co
            # scs_position = io_utils.change_to_scs_xyz_coordinates(mat_world * position, scs_globals.export_scale) ## POSITION
            scs_position = Matrix.Scale(scs_globals.export_scale,
                                        4) * _convert_utils.scs_to_blend_matrix().inverted() * mat_world * position  # POSITION
            stream_pos[1].append(scs_position)
            # stream_nor[1].append(io_utils.get_vertex_normal(mesh, vert_i))              # NORMAL
            if scs_globals.export_vertex_groups:
                if streams_vg:
                    vg_i = 0
                    for vg in vg_layers_for_export:  # weights (even unused) all vertices become 0.0
                        if vg.name in vg_list:
                            vg_weight = (_object_utils.get_vertex_group(vg, vert_i),)  # SCALARs
                            key = str("_SCALAR" + str(vg_i))
                            if vg_i == len(stream_vg_container) and len(stream_vg_container) != len(vg_layers_for_export):
                                stream_vg_container.append((vg.name, key, [vg_weight]))
                            else:
                                stream_vg_container[vg_i][2].append(vg_weight)
                            vg_i += 1

                            # SKINNING (OLD STYLE FOR PIM VER. 7)
            # if scs_globals.export_anim_file == 'anim':
            if root_object.scs_props.scs_root_animated == 'anim':
                skin_vector = scs_position  # NOTE: Vertex position - from Maya scaled *10 (old & unused in game engine)
                skin_weights = []
                for group in vert.groups:
                    for vg in vg_layers_for_export:
                        if vg.index == group.group:
                            for bone_i, bone in enumerate(bone_list):
                                if vg.name == bone.name:
                                    skin_weights.append((bone_i, group.weight))
                                    skin_weights_cnt += 1
                                    # print('vert: %i - group: %r (%i) - %s' % (vert_i, vg.name, bone_i, str(group.weight)))
                                    break
                            break
                skin_clones = ((piece_index, vert_i), )
                skin_clones_cnt += 1
                skin_list.append((skin_vector, skin_weights, skin_clones))

                # ##
        vertex_streams.append(stream_pos)
        # print('\nstream_pos:\n  %s' % str(stream_pos))
        # vertex_streams.append(stream_nor)
        # print('\nstream_nor:\n  %s' % str(stream_nor))
        for vg_stream in stream_vg_container:
            vertex_stream_count += 1
            vertex_streams.append(vg_stream)
            # print('\nvg_stream:\n  %s' % str(vg_stream))
        # FACES
        # TEMPORAL FACE STREAM DATA FORMAT:
        # faces = [face_data, face_data, ...]
        # face_data = (material, [vertex indices], [face-vertex streams])
        # face_streams = [('_UV0', [(0.0, 0.0), (0.0, 0.0), ...]), ...]
        # example: [(0, [0, 1, 2], [('_UV0', [(0.0, 0.0), (0.0, 0.0)]), ('_UV0', [(0.0, 0.0), (0.0, 0.0)])]), (), ...]

        faces = []
        face_cnt = 0
        uv_layers_exists = 1
        rgb_layers_exists = 1
        # print('used_materials: %s' % str(used_materials))
        for face_i, face in enumerate(mesh.polygons):
            face_cnt += 1
            streams_uv = None
            streams_vcolor = None
            if uv_layers_exists:
                requested_uv_layers, streams_uv = _mesh_utils.get_stream_uvs(mesh, scs_globals.active_uv_only)  # get UV layers (UVs)
                if not streams_uv:
                    uv_layers_exists = 0
            if rgb_layers_exists and scs_globals.export_vertex_color:
                if scs_globals.export_vertex_color_type_7 == 'rgb':
                    rgb_all_layers, streams_vcolor = _mesh_utils.get_stream_rgb(mesh, output_type, False)  # get Vertex Color layers (RGB)
                elif scs_globals.export_vertex_color_type_7 == 'rgbda':
                    rgb_all_layers, streams_vcolor = _mesh_utils.get_stream_rgb(mesh, output_type, True)  # get Vertex Color layers (
                    # RGBdA)
                else:
                    streams_vcolor = None  # TODO: Alpha from another layer
                if not streams_vcolor:
                    rgb_layers_exists = 0
            mat_index = used_materials.index(object_materials[face.material_index])
            # print('face-mat_index: %s; object_materials[f-m_i]: %s; used_materials.index(o_m[f-m_i]): %s' % (face.material_index,
            # object_materials[face.material_index], used_materials.index(object_materials[face.material_index])))
            face_verts = []
            for vert in face.vertices:
                face_verts.append(vert)
            face_verts = face_verts[::-1]  # revert vertex order in face
            # print('face_verts: %s' % str(face_verts))
            face_streams = []
            stream_fv_nor = ("_NORMAL", [])
            stream_fv_uv_container = []
            stream_fv_rgb_container = []
            stream_names = {}
            for loop_index in range(face.loop_start, face.loop_start + face.loop_total):
                # edge_index = mesh.loops[loop_index].edge_index
                vert_index = mesh.loops[loop_index].vertex_index
                # print('face i.: %s\tloop i.: %s\tedge i.: %s\tvert i.: %s' % (face_i, loop_index, edge_index, vert_index))
                # Normals
                stream_fv_nor[1].append(offset_matrix.inverted() * Vector(_mesh_utils.get_vertex_normal(mesh, vert_index)))
                # UV Layers
                if streams_uv:
                    for uv_i, uv_l in enumerate(requested_uv_layers):
                        uv_values = _mesh_utils.get_face_vertex_uv(uv_l.data, loop_index, uv_i)
                        key = str("_UV" + str(uv_i))
                        if uv_i == len(stream_fv_uv_container) and len(stream_fv_uv_container) != len(requested_uv_layers):
                            stream_fv_uv_container.append((key, [uv_values]))
                            stream_names[key] = uv_l.name
                        else:
                            stream_fv_uv_container[uv_i][1].append(uv_values)
                            # Vertex Color Layers (RGB)
                if scs_globals.export_vertex_color:
                    if streams_vcolor:
                        for rgb_i, rgb_l in enumerate(rgb_all_layers):
                            if scs_globals.export_vertex_color_type_7 == 'rgb':
                                rgb_values = _mesh_utils.get_face_vertex_color(rgb_l.data, loop_index, False, rgb_i)
                                key = str("_RGB" + str(rgb_i))
                            elif scs_globals.export_vertex_color_type_7 == 'rgbda':
                                rgb_values = _mesh_utils.get_face_vertex_color(rgb_l.data, loop_index, True, rgb_i)
                                key = str("_RGBA" + str(rgb_i))
                            else:
                                streams_vcolor = None  # TODO: Alpha from another layer
                            if rgb_i == len(stream_fv_rgb_container) and len(stream_fv_rgb_container) != len(rgb_all_layers):
                                stream_fv_rgb_container.append((key, [rgb_values]))
                                stream_names[key] = rgb_l.name
                            else:
                                stream_fv_rgb_container[rgb_i][1].append(rgb_values)
                                # Data Assembling
            face_streams.append(stream_fv_nor)
            for stream in stream_fv_uv_container:
                face_streams.append(stream)
            for stream in stream_fv_rgb_container:
                face_streams.append(stream)
            face_data = (mat_index, face_verts, face_streams)
            faces.append(face_data)

        # SHARP EDGES
        sharp_edges = []
        for edge in mesh.edges:
            if edge.use_edge_sharp:
                sharp_edges.append(edge.vertices[:])

        # BUILD FACE SECTION
        faces_container = _SectionData("Faces")
        faces_container.props.append(("StreamCount", len(faces[0][2])))
        for face_i, face_data in enumerate(faces):
            face_container = _SectionData("Face")
            face_container.props.append(("Index", face_i))
            face_container.props.append(("Material", face_data[0]))
            face_container.props.append(("Indices", face_data[1]))
            for stream in face_data[2]:
                if stream[0] in stream_names:
                    face_container.sections.append(_pix_container.make_vertex_stream(stream, stream_names[stream[0]]))
                else:
                    face_container.sections.append(_pix_container.make_vertex_stream(stream))
            faces_container.sections.append(face_container)

        # BUILD PIECE SECTION
        piece_section = _SectionData("Piece")
        piece_section.props.append(("Index", piece_index))
        global_vertex_count += len(stream_pos[1])
        piece_section.props.append(("VertexCount", len(stream_pos[1])))
        global_face_count += face_cnt
        piece_section.props.append(("FaceCount", face_cnt))
        global_edge_count += len(sharp_edges)
        piece_section.props.append(("EdgeCount", len(sharp_edges)))
        piece_section.props.append(("StreamCount", vertex_stream_count))
        piece_section.props.append(("", ""))
        # vertex streams...
        for stream in vertex_streams:
            if len(stream) == 3:
                # print('\nstream:\n  %s' % str(stream))
                piece_section.sections.append(_pix_container.make_vertex_stream(stream[1:], stream[0]))
            else:
                piece_section.sections.append(_pix_container.make_vertex_stream(stream))
                # faces...
        piece_section.sections.append(faces_container)

        # BUILD AND STORE EDGE SECTION
        if sharp_edges:
            edges_container = _SectionData("Edges")
            for edge in sharp_edges:
                edges_container.data.append(edge)
            piece_section.sections.append(edges_container)

        # STORE PIECE SECTION
        piece_sections.append(piece_section)  # add a piece
    return piece_sections, global_vertex_count, global_face_count, global_edge_count, piece_index_obj, skin_list, skin_weights_cnt, skin_clones_cnt