def create_locator_empty(name, loc, rot=(0, 0, 0), scale=(1, 1, 1), size=1.0, data_type='Prefab', hookup=None, blend_coords=False): """ Creates an empty object for a Locator. :param name: :param loc: :param rot: :param scale: :param size: :param data_type: :param hookup: :param blend_coords: :return: """ rot_quaternion = None if len(rot) == 4: rot_quaternion = rot rot = (0, 0, 0) if blend_coords: location = loc else: location = _convert.change_to_scs_xyz_coordinates(loc, _get_scs_globals().import_scale) unique_name = _name.get_unique(name, bpy.data.objects, sep=".") locator = bpy.data.objects.new(unique_name, None) locator.empty_display_type = 'PLAIN_AXES' locator.scs_props.object_identity = locator.name # link to active layer and scene and make it active and selected bpy.context.view_layer.active_layer_collection.collection.objects.link(locator) bpy.context.view_layer.objects.active = locator locator.select_set(True) # fix scene objects count to avoid callback of new object bpy.context.scene.scs_cached_num_objects = len(bpy.context.scene.objects) locator.location = location if rot_quaternion: locator.rotation_mode = 'QUATERNION' if blend_coords: locator.rotation_quaternion = rot_quaternion else: locator.rotation_quaternion = _convert.change_to_blender_quaternion_coordinates(rot_quaternion) else: locator.rotation_mode = 'XYZ' locator.rotation_euler = rot locator.scale = scale locator.scs_props.empty_object_type = 'Locator' locator.scs_props.locator_type = data_type if data_type == "Prefab": locator.scs_props.scs_part = "" if hookup: locator.scs_props.locator_model_hookup = hookup return locator
def create_locator_empty(name, loc, rot=(0, 0, 0), scale=(1, 1, 1), size=1.0, data_type='Prefab', hookup=None): """ Creates an empty object for a Locator. :param name: :param loc: :param rot: :param scale: :param size: :param data_type: :param hookup: :return: """ rot_quaternion = None if len(rot) == 4: rot_quaternion = rot rot = (0, 0, 0) bpy.ops.object.empty_add( type='PLAIN_AXES', view_align=False, location=_convert.change_to_scs_xyz_coordinates( loc, _get_scs_globals().import_scale), rotation=rot, ) bpy.context.active_object.name = _name.get_unique(name, bpy.data.objects, sep=".") locator = bpy.context.active_object locator.scs_props.object_identity = locator.name if rot_quaternion: locator.rotation_mode = 'QUATERNION' locator.rotation_quaternion = _convert.change_to_blender_quaternion_coordinates( rot_quaternion) locator.scale = scale locator.scs_props.empty_object_type = 'Locator' locator.scs_props.locator_type = data_type if data_type == "Prefab": locator.scs_props.scs_part = "" if hookup: locator.scs_props.locator_model_hookup = hookup return locator
def create_locator_empty(name, loc, rot=(0, 0, 0), scale=(1, 1, 1), size=1.0, data_type='Prefab', hookup=None): """ Creates an empty object for a Locator. :param name: :param loc: :param rot: :param scale: :param size: :param data_type: :param hookup: :return: """ rot_quaternion = None if len(rot) == 4: rot_quaternion = rot rot = (0, 0, 0) bpy.ops.object.empty_add( type='PLAIN_AXES', view_align=False, location=_convert.change_to_scs_xyz_coordinates(loc, _get_scs_globals().import_scale), rotation=rot, ) name = _name.make_unique_name(object, name) bpy.context.active_object.name = name locator = bpy.data.objects.get(name) locator.scs_props.object_identity = locator.name if rot_quaternion: locator.rotation_mode = 'QUATERNION' locator.rotation_quaternion = _convert.change_to_blender_quaternion_coordinates(rot_quaternion) locator.scale = scale locator.scs_props.empty_object_type = 'Locator' locator.scs_props.locator_type = data_type if data_type == "Prefab": locator.scs_props.scs_part = "" if hookup: locator.scs_props.locator_model_hookup = hookup return locator
def _create_piece( context, name, mesh_vertices, mesh_normals, mesh_tangents, mesh_rgb, mesh_rgba, mesh_scalars, object_skinning, mesh_uv, mesh_uv_aliases, mesh_tuv, mesh_faces, mesh_face_materials, mesh_edges, terrain_points_trans, materials_data, ): handle_unused_arg(__file__, _create_piece.__name__, "mesh_normals", mesh_normals) handle_unused_arg(__file__, _create_piece.__name__, "mesh_tangents", mesh_tangents) handle_unused_arg(__file__, _create_piece.__name__, "mesh_tuv", mesh_tuv) context.window_manager.progress_begin(0.0, 1.0) context.window_manager.progress_update(0) import_scale = _get_scs_globals().import_scale mesh = bpy.data.meshes.new(name) # COORDINATES TRANSFORMATION transformed_mesh_vertices = [ _convert_utils.change_to_scs_xyz_coordinates(vec, import_scale) for vec in mesh_vertices ] context.window_manager.progress_update(0.1) # VISUALISE IMPORTED NORMALS (DEBUG) # NOTE: NOT functional for PIM version 7 since mesh normals are not provided in per vertex fashion! # visualise_normals(name, transformed_mesh_vertices, mesh_normals, import_scale) bm = bmesh.new() # VERTICES _mesh_utils.bm_make_vertices(bm, transformed_mesh_vertices) context.window_manager.progress_update(0.2) # FACES # for fac_i, fac in enumerate(mesh_faces): print(' face[%i]: %s' % (fac_i, str(fac))) mesh_faces, back_faces = _mesh_utils.bm_make_faces(bm, mesh_faces, []) context.window_manager.progress_update(0.3) # SHARP EDGES # print('mesh_edges: %s' % str(mesh_edges)) for edge in bm.edges: edge_verts = [edge.verts[0].index, edge.verts[1].index] edge_verts_inv = [edge.verts[1].index, edge.verts[0].index] if edge_verts in mesh_edges or edge_verts_inv in mesh_edges: # print('edge: %s' % str(edge_verts)) edge.smooth = False context.window_manager.progress_update(0.4) # UV LAYERS if mesh_uv: for uv_layer_name in mesh_uv: _mesh_utils.bm_make_uv_layer(7, bm, mesh_faces, uv_layer_name, mesh_uv[uv_layer_name]) context.window_manager.progress_update(0.5) # VERTEX COLOR mesh_rgb_final = {} if mesh_rgba: mesh_rgb_final.update(mesh_rgba) if mesh_rgb: mesh_rgb_final.update(mesh_rgb) for vc_layer_name in mesh_rgb_final: max_value = mesh_rgb_final[vc_layer_name][0][0][0] / 2 for vc_entry in mesh_rgb_final[vc_layer_name]: for v_i in vc_entry: for i, value in enumerate(v_i): if max_value < value / 2: max_value = value / 2 if max_value > mesh.scs_props.vertex_color_multiplier: mesh.scs_props.vertex_color_multiplier = max_value _mesh_utils.bm_make_vc_layer(7, bm, vc_layer_name, mesh_rgb_final[vc_layer_name], mesh.scs_props.vertex_color_multiplier) bm.to_mesh(mesh) mesh.update() bm.free() # NORMALS - has to be applied after bmesh creation as they are set directly to mesh if _get_scs_globals().import_use_normals: mesh.create_normals_split() # first set normals directly to loops for poly_i, poly in enumerate(mesh.polygons): for poly_loop_i, loop_i in enumerate(poly.loop_indices): curr_n = _convert_utils.scs_to_blend_matrix() @ Vector( mesh_normals[poly_i][poly_loop_i]) mesh.loops[loop_i].normal[:] = curr_n # then we have to go trough very important step they say, # as without validation we get wrong result for some normals mesh.validate(clean_customdata=False ) # *Very* important to not remove lnors here! # set polygons to use smooth representation mesh.polygons.foreach_set("use_smooth", [True] * len(mesh.polygons)) # finally fill clnors from loops normals and apply them (taken from official Blenders scripts) clnors = array.array('f', [0.0] * (len(mesh.loops) * 3)) mesh.loops.foreach_get("normal", clnors) mesh.normals_split_custom_set(tuple(zip(*(iter(clnors), ) * 3))) mesh.use_auto_smooth = True mesh.free_normals_split() else: # set polygons to use smooth representation only mesh.polygons.foreach_set("use_smooth", [True] * len(mesh.polygons)) context.window_manager.progress_update(0.6) # Create object out of mesh and link it to active layer collection. obj = bpy.data.objects.new(mesh.name, mesh) obj.scs_props.object_identity = obj.name obj.location = (0.0, 0.0, 0.0) context.view_layer.active_layer_collection.collection.objects.link(obj) obj.select_set(True) bpy.context.view_layer.objects.active = obj # SCALAR LAYERS if mesh_scalars: for sca_layer_name in mesh_scalars: vertex_group = obj.vertex_groups.new(name=sca_layer_name) for val_i, val in enumerate(mesh_scalars[sca_layer_name]): val = float(val[0]) if val != 0.0: vertex_group.add([val_i], val, "ADD") context.window_manager.progress_update(0.7) # TERRAIN POINTS (VERTEX GROUPS) for vertex_i, vertex_pos in enumerate(mesh_vertices): tp_entries = terrain_points_trans.get(vertex_pos) # add current vertex to all combinations of variants/nodes # from found terrain points transitional structures for tp_entry in tp_entries: # first 6 chars in vertex group name will represent variant index # this way we will be able to identify variant during vertex groups # cleanup if this vertex will be set to multiple variants vg_name = str(tp_entry.variant_i).zfill( 6) + _OP_consts.TerrainPoints.vg_name_prefix + str( tp_entry.node_i) if vg_name not in obj.vertex_groups: obj.vertex_groups.new(name=vg_name) vertex_group = obj.vertex_groups[vg_name] vertex_group.add([vertex_i], 1.0, "REPLACE") # SKINNING (VERTEX GROUPS) if object_skinning: if name in object_skinning: for vertex_group_name in object_skinning[name]: vertex_group = obj.vertex_groups.new(name=vertex_group_name) for vertex_i, vertex in enumerate( object_skinning[name][vertex_group_name]): weight = object_skinning[name][vertex_group_name][vertex] if weight != 0.0: vertex_group.add([vertex], weight, "ADD") else: lprint('\nE Missing skin group %r! Skipping...', name) # ADD EDGE SPLIT MODIFIER bpy.ops.object.shade_smooth() bpy.ops.object.modifier_add(type='EDGE_SPLIT') bpy.context.object.modifiers["EdgeSplit"].use_edge_angle = False bpy.context.object.modifiers["EdgeSplit"].name = "ES_" + name # MATERIALS used_mat_indices = set() # print('\n mesh_face_materials:\n%s' % str(mesh_face_materials)) for mat_index in mesh_face_materials: used_mat_indices.add(mat_index) # print(' used_mats:\n%s' % str(used_mats)) context.window_manager.progress_update(0.8) # ADD MATERIALS TO SLOTS # print(' materials_data:\n%s' % str(materials_data)) mat_index_to_mat_slot_map = {} if len(materials_data) > 0: for used_mat_idx in used_mat_indices: material_name = materials_data[used_mat_idx][0] bpy.ops.object.material_slot_add() # Add a material slot last_slot = obj.material_slots.__len__() - 1 # now as we created slot and we know index of it, write down indices of material slots to dictionary # for later usage by assigning faces to proper slots mat_index_to_mat_slot_map[used_mat_idx] = last_slot # print(' used_mat: %s (%i) => %s : %s' % (str(used_mat), mat_i, str(last_slot), str(material))) obj.material_slots[last_slot].material = bpy.data.materials[ material_name] # Assign a material to the slot # NOTE: we are setting texture aliases only first time to avoid duplicates etc. # So we assume that pieces which are using same material will also have same uv aliases alignment used_material = bpy.data.materials[material_name] if "scs_tex_aliases" not in used_material: alias_mapping = {} for uv_lay in mesh_uv_aliases[used_mat_idx]: import re for alias in mesh_uv_aliases[used_mat_idx][uv_lay]: numbers = re.findall("\d+", alias) number = numbers[len(numbers) - 1] alias_mapping[number] = uv_lay used_material["scs_tex_aliases"] = alias_mapping mesh = obj.data context.window_manager.progress_update(0.9) # APPLY MATERIAL SLOT INDICES TO FACES for face_i, face in enumerate(mesh.polygons): face.material_index = mat_index_to_mat_slot_map[ mesh_face_materials[face_i]] context.window_manager.progress_update(1.0) return obj
def _create_piece(context, preview_model, name, ob_material, mesh_vertices, mesh_normals, mesh_tangents, mesh_rgb, mesh_rgba, mesh_scalars, object_skinning, mesh_uv, mesh_tuv, mesh_triangles, materials_data, points_to_weld_list, terrain_points_trans, ignore_backfaces=False): handle_unused_arg(__file__, _create_piece.__name__, "mesh_tangents", mesh_tangents) handle_unused_arg(__file__, _create_piece.__name__, "mesh_scalars", mesh_scalars) handle_unused_arg(__file__, _create_piece.__name__, "mesh_tuv", mesh_tuv) context.window_manager.progress_begin(0.0, 1.0) context.window_manager.progress_update(0) import_scale = _get_scs_globals().import_scale mesh = bpy.data.meshes.new(name) # COORDINATES TRANSFORMATION transformed_mesh_vertices = [ _convert_utils.change_to_scs_xyz_coordinates(vec, import_scale) for vec in mesh_vertices ] context.window_manager.progress_update(0.1) # VISUALISE IMPORTED NORMALS (DEBUG) # visualise_normals(name, transformed_mesh_vertices, mesh_normals, import_scale) # MESH CREATION bm = bmesh.new() # VERTICES _mesh_utils.bm_make_vertices(bm, transformed_mesh_vertices) context.window_manager.progress_update(0.2) # FACES mesh_triangles, back_triangles = _mesh_utils.bm_make_faces( bm, mesh_triangles, points_to_weld_list) context.window_manager.progress_update(0.3) # UV LAYERS if mesh_uv: for uv_layer_name in mesh_uv: _mesh_utils.bm_make_uv_layer(5, bm, mesh_triangles, uv_layer_name, mesh_uv[uv_layer_name]["data"]) context.window_manager.progress_update(0.4) # VERTEX COLOR if mesh_rgba: mesh_rgb_final = mesh_rgba elif mesh_rgb: mesh_rgb_final = mesh_rgb else: mesh_rgb_final = [] for vc_layer_name in mesh_rgb_final: max_value = mesh_rgb_final[vc_layer_name][0][0] / 2 for vc_entry in mesh_rgb_final[vc_layer_name]: for i, value in enumerate(vc_entry): if max_value < value / 2: max_value = value / 2 if max_value > mesh.scs_props.vertex_color_multiplier: mesh.scs_props.vertex_color_multiplier = max_value _mesh_utils.bm_make_vc_layer(5, bm, vc_layer_name, mesh_rgb_final[vc_layer_name], mesh.scs_props.vertex_color_multiplier) context.window_manager.progress_update(0.5) bm.to_mesh(mesh) mesh.update() bm.free() # NORMALS - has to be applied after bmesh creation as they are set directly to mesh if _get_scs_globals().import_use_normals: mesh.create_normals_split() # first set normals directly to loops for loop in mesh.loops: curr_n = _convert_utils.scs_to_blend_matrix() @ Vector( mesh_normals[loop.vertex_index]) loop.normal[:] = curr_n # then we have to go trough very important step they say, # as without validation we get wrong result for some normals mesh.validate(clean_customdata=False ) # *Very* important to not remove lnors here! # set polygons to use smooth representation mesh.polygons.foreach_set("use_smooth", [True] * len(mesh.polygons)) # finally fill clnors from loops normals and apply them (taken from official Blenders scripts) clnors = array.array('f', [0.0] * (len(mesh.loops) * 3)) mesh.loops.foreach_get("normal", clnors) mesh.normals_split_custom_set(tuple(zip(*(iter(clnors), ) * 3))) mesh.use_auto_smooth = True mesh.free_normals_split() else: # set polygons to use smooth representation only mesh.polygons.foreach_set("use_smooth", [True] * len(mesh.polygons)) context.window_manager.progress_update(0.6) # Create object out of mesh and link it to active layer collection. obj = bpy.data.objects.new(mesh.name, mesh) obj.scs_props.object_identity = obj.name obj.location = (0.0, 0.0, 0.0) context.view_layer.active_layer_collection.collection.objects.link(obj) obj.select_set(True) bpy.context.view_layer.objects.active = obj context.window_manager.progress_update(0.7) context.window_manager.progress_update(0.8) # TERRAIN POINTS (VERTEX GROUPS) for vertex_i, vertex_pos in enumerate(mesh_vertices): tp_entries = terrain_points_trans.get(vertex_pos) # add current vertex to all combinations of variants/nodes # from found terrain points transitional structures for tp_entry in tp_entries: # first 6 chars in vertex group name will represent variant index # this way we will be able to identify variant during vertex groups # cleanup if this vertex will be set to multiple variants vg_name = str(tp_entry.variant_i).zfill( 6) + _OP_consts.TerrainPoints.vg_name_prefix + str( tp_entry.node_i) if vg_name not in obj.vertex_groups: obj.vertex_groups.new(name=vg_name) vertex_group = obj.vertex_groups[vg_name] vertex_group.add([vertex_i], 1.0, "REPLACE") # SKINNING (VERTEX GROUPS) if object_skinning: if name in object_skinning: for vertex_group_name in object_skinning[name]: vertex_group = obj.vertex_groups.new(name=vertex_group_name) for vertex_i, vertex in enumerate( object_skinning[name][vertex_group_name]): weight = object_skinning[name][vertex_group_name][vertex] if weight != 0.0: if vertex in points_to_weld_list: vertex = points_to_weld_list[vertex] vertex_group.add([vertex], weight, "ADD") else: lprint('\nE Missing skin group %r! Skipping...', name) context.window_manager.progress_update(0.9) # DELETE ORPHAN VERTICES (LEFT IN THE GEOMETRY FROM SMOOTHING RECONSTRUCTION) if points_to_weld_list: bm = bmesh.new() bm.from_mesh(mesh) bm.verts.ensure_lookup_table() for vert_i in points_to_weld_list.keys(): bm.verts[vert_i].select_set(True) verts = [v for v in bm.verts if v.select] if verts: bmesh.ops.delete(bm, geom=verts, context='VERTS') # APPLYING BMESH TO MESH bm.to_mesh(mesh) bm.free() context.window_manager.progress_update(1.0) # MATERIAL if len(materials_data) > 0 and not preview_model: # Assign a material to the last slot used_material = bpy.data.materials[materials_data[ob_material][0]] obj.data.materials.append(used_material) # NOTE: we are setting texture aliases only first time to avoid duplicates etc. # So we assume that pieces which are using same material will also have same uv aliases alignement if "scs_tex_aliases" not in used_material: alias_mapping = {} for uv_lay in mesh_uv: if "aliases" in mesh_uv[uv_lay]: import re for alias in mesh_uv[uv_lay]["aliases"]: numbers = re.findall("\d+", alias) number = numbers[len(numbers) - 1] alias_mapping[number] = uv_lay used_material["scs_tex_aliases"] = alias_mapping context.window_manager.progress_end() # if back triangles are present, then create new object with # back triangles and merge it to original if len(back_triangles) > 0 and not ignore_backfaces: back_obj = _create_piece(context, preview_model, "back_" + name, ob_material, mesh_vertices, mesh_normals, mesh_tangents, mesh_rgb, mesh_rgba, mesh_scalars, object_skinning, mesh_uv, mesh_tuv, back_triangles, materials_data, points_to_weld_list, terrain_points_trans, ignore_backfaces=True) lprint( "W Found %s back face(s) without it's own vertices on object %r, additional vertices were added!", (len(back_obj.data.polygons), obj.name)) # creation of back face object used all original vertices # for proper index accessing during binding all of the data blocks to vertices. # Because of that we have to remove vertices which are not really used # in back faces mesh, so called "loose" vertices back_obj.data = _mesh_utils.bm_delete_loose(back_obj.data) # finally join back object with original override = context.copy() override["active_object"] = obj override["selected_editable_objects"] = (obj, back_obj) bpy.ops.object.join(override) return obj
def load(filepath): scs_globals = _get_scs_globals() print("\n************************************") print("** SCS PIC Importer **") print("** (c)2014 SCS Software **") print("************************************\n") ind = ' ' pic_container = _pix_container.get_data_from_file(filepath, ind) # TEST PRINTOUTS # ind = ' ' # for section in pic_container: # print('SEC.: "%s"' % section.type) # for prop in section.props: # print('%sProp: %s' % (ind, prop)) # for data in section.data: # print('%sdata: %s' % (ind, data)) # for sec in section.sections: # print_section(sec, ind) # print('\nTEST - Source: "%s"' % pic_container[0].props[1][1]) # print('') # TEST EXPORT # path, file = os.path.splitext(filepath) # export_filepath = str(path + '_reex' + file) # result = pix_write.write_data(pic_container, export_filepath, ind) # if result == {'FINISHED'}: # Print(dump_level, '\nI Test export succesful! The new file:\n "%s"', export_filepath) # else: # Print(dump_level, '\nE Test export failed! File:\n "%s"', export_filepath) # LOAD HEADER ''' NOTE: skipped for now as no data needs to be readed format_version, source, f_type, f_name, source_filename, author = _get_header(pic_container) ''' # LOAD GLOBALS ''' NOTE: skipped for now as no data needs to be readed vertex_count, triangle_count, material_count, piece_count, part_count, locator_count = _get_global(pic_container) ''' # LOAD MATERIALS if 0: # NOTE: NO MATERIALS USED FOR COLLIDERS AT A MOMENT! loaded_materials = [] for section in pic_container: if section.type == 'Material': material_alias, material_effect = _get_material(section) lprint('I Adding a Material Alias: "%s"', material_alias) loaded_materials.append(material_alias) # PRINT "MATERIAL SETTINGS" TO CONSOLE... if 0: import pprint pp = pprint.PrettyPrinter(indent=1) print("=== MATERIAL SETTINGS ==========================") pp.pprint(material_effect) print("==================================================") # LOAD PARTS parts = [] for section in pic_container: if section.type == "Part": (name, pieces, locators) = _get_part(section) parts.append({ "name": name, "pieces": pieces, "locators": locators }) # LOAD (CONVEX) PIECES pieces = [] for section in pic_container: if section.type == 'Piece': pieces.append(_get_piece(section)) # LOAD AND CREATE LOCATORS import_scale = scs_globals.import_scale locators = [] for section in pic_container: if section.type == 'Locator': (locator_name, locator_index, locator_position, locator_rotation, locator_alias, locator_weight, locator_type, locator_parameters, locator_convex_piece) = _get_locator(section) lprint('I Adding a Locator: "%s"', locator_name) locator = _object_utils.create_locator_empty( locator_name, locator_position, locator_rotation, (1, 1, 1), 1.0, 'Collision') locator.scs_props.scs_part = _get_locator_part( parts, locator_index) locator.scs_props.locator_collider_centered = True locator.scs_props.locator_collider_mass = locator_weight locator.scs_props.locator_collider_type = locator_type if locator_type == 'Box': locator.scs_props.locator_collider_box_x = locator_parameters[ 0] * import_scale locator.scs_props.locator_collider_box_y = locator_parameters[ 2] * import_scale locator.scs_props.locator_collider_box_z = locator_parameters[ 1] * import_scale elif locator_type in ('Sphere', 'Capsule', 'Cylinder'): locator.scs_props.locator_collider_dia = locator_parameters[ 0] * 2 * import_scale locator.scs_props.locator_collider_len = locator_parameters[ 1] * import_scale elif locator_type == 'Convex': piece_index, piece_material, verts, faces = pieces[ locator_convex_piece] if verts and faces: # BOUNDING BOX DATA CREATION AND SPACE CONVERSION min_val = [None, None, None] max_val = [None, None, None] scs_verts = [] for vert in verts: scs_vert = _convert_utils.change_to_scs_xyz_coordinates( vert, import_scale) scs_verts.append(scs_vert) min_val, max_val = _math_utils.evaluate_minmax( scs_vert, min_val, max_val) bbox, bbcenter = _math_utils.get_bb(min_val, max_val) # FACE FLIPPING flipped_faces = _mesh_utils.flip_faceverts(faces) # COLLIDER CREATION geom_data = (scs_verts, flipped_faces, bbox, bbcenter) _object_utils.add_collider_convex_locator( geom_data, {}, locator) locators.append(locator) # DATA BUILDING # WARNING PRINTOUTS # if piece_count < 0: Print(dump_level, '\nW More Pieces found than were declared!') # if piece_count > 0: Print(dump_level, '\nW Some Pieces not found, but were declared!') # if dump_level > 1: print('') print("************************************") return {'FINISHED'}, locators
def load(root_object, pia_files, armature, pis_filepath=None, bones=None): scs_globals = _get_scs_globals() print("\n************************************") print("** SCS PIA Importer **") print("** (c)2014 SCS Software **") print("************************************\n") import_scale = scs_globals.import_scale ind = ' ' imported_count = 0 for pia_filepath in pia_files: # Check if PIA file is for the actual skeleton... if pis_filepath and bones: skeleton_match = _pix_container.fast_check_for_pia_skeleton( pia_filepath, pis_filepath) else: skeleton_match, pia_skeleton = _pix_container.utter_check_for_pia_skeleton( pia_filepath, armature) if skeleton_match: path = os.path.split(pia_filepath)[0] pia_skeleton = os.path.join(path, pia_skeleton) if os.path.isfile(pia_skeleton): bones = _pis.load(pia_skeleton, armature, get_only=True) else: lprint("\nE The filepath %r doesn't exist!", (_path_utils.readable_norm(pia_skeleton), )) else: lprint( str("E Animation doesn't match the skeleton. Animation won't be loaded!\n\t " "Animation file: %r"), (pia_filepath, )) if skeleton_match: lprint('I ++ "%s" IMPORTING animation data...', (os.path.basename(pia_filepath), )) pia_container = _pix_container.get_data_from_file( pia_filepath, ind) if not pia_container: lprint('\nE File "%s" is empty!', (_path_utils.readable_norm(pia_filepath), )) continue # TEST PRINTOUTS # ind = ' ' # for section in pia_container: # print('SEC.: "%s"' % section.type) # for prop in section.props: # print('%sProp: %s' % (ind, prop)) # for data in section.data: # print('%sdata: %s' % (ind, data)) # for sec in section.sections: # print_section(sec, ind) # print('\nTEST - Source: "%s"' % pia_container[0].props[1][1]) # print('') # TEST EXPORT # path, file = os.path.splitext(pia_filepath) # export_filepath = str(path + '_reex' + file) # result = pix_write.write_data(pia_container, export_filepath, ind) # if result == {'FINISHED'}: # Print(dump_level, '\nI Test export succesful! The new file:\n "%s"', export_filepath) # else: # Print(dump_level, '\nE Test export failed! File:\n "%s"', export_filepath) # LOAD HEADER format_version, source, f_type, animation_name, source_filename, author = _get_header( pia_container) if format_version != 3 or f_type != "Animation": continue # LOAD GLOBALS skeleton, total_time, bone_channel_count, custom_channel_count = _get_globals( pia_container) # CREATE ANIMATION ACTIONS anim_action = bpy.data.actions.new(animation_name + "_action") anim_action.use_fake_user = True anim_data = armature.animation_data if armature.animation_data else armature.animation_data_create( ) anim_data.action = anim_action # LOAD BONE CHANNELS bone_channels = _get_anim_channels(pia_container, section_name="BoneChannel") if len(bone_channels) > 0: for bone_name in bone_channels: if bone_name in armature.data.bones: ''' NOTE: skipped for now as no data needs to be readed stream_count = bone_channels[bone_name][0] keyframe_count = bone_channels[bone_name][1] ''' streams = bone_channels[bone_name][2] # CREATE ANIMATION GROUP anim_group = anim_action.groups.new(bone_name) armature.pose.bones[ bone_name].rotation_mode = 'XYZ' # Set rotation mode. # use pose bone scale set on PIS import init_scale = Vector((1, 1, 1)) if _BONE_consts.init_scale_key in armature.pose.bones[ bone_name]: init_scale = armature.pose.bones[bone_name][ _BONE_consts.init_scale_key] # CREATE FCURVES (pos_fcurves, rot_fcurves, sca_fcurves) = _create_fcurves(anim_action, anim_group, str('pose.bones["' + bone_name + '"]'), rot_euler=True) # GET BONE REST POSITION MATRIX bone_rest_matrix_scs = bones[bone_name][1].transposed() parent_bone_name = bones[bone_name][0] if parent_bone_name in bones: parent_bone_rest_matrix_scs = bones[ parent_bone_name][1].transposed() else: parent_bone_rest_matrix_scs = Matrix() parent_bone_rest_matrix_scs.identity() for key_time_i, key_time in enumerate(streams[0]): keyframe = key_time_i + 1 # GET BONE ANIMATION MATRIX bone_animation_matrix_scs = streams[1][ key_time_i].transposed() # CREATE DELTA MATRIX delta_matrix = _get_delta_matrix( bone_rest_matrix_scs, parent_bone_rest_matrix_scs, bone_animation_matrix_scs, import_scale) # DECOMPOSE ANIMATION MATRIX location, rotation, scale = delta_matrix.decompose( ) # CALCULATE CURRENT SCALE - subtract difference between initial bone scale and current scale from 1 # NOTE: if imported PIS had initial bone scale different than 1, # initial scale was saved into pose bones custom properties and # has to be used here as bones after import in Blender always have scale of 1 scale = Vector((1 + scale[0] - init_scale[0], 1 + scale[1] - init_scale[1], 1 + scale[2] - init_scale[2])) # NOTE: this scaling rotation switch came from UK variants which had scale -1 loc, rot, sca = bone_rest_matrix_scs.decompose() if sca.y < 0: rotation.y *= -1 if sca.z < 0: rotation.z *= -1 rotation = rotation.to_euler('XYZ') # BUILD TRANSFORMATION CURVES for i in range(0, 3): pos_fcurves[i].keyframe_points.insert( frame=float(keyframe), value=location[i], options={'FAST'}) rot_fcurves[i].keyframe_points.insert( frame=float(keyframe), value=rotation[i], options={'FAST'}) sca_fcurves[i].keyframe_points.insert( frame=float(keyframe), value=scale[i], options={'FAST'}) # SET LINEAR INTERPOLATION FOR ALL CURVES color_mode = 'AUTO_RAINBOW' # Or better 'AUTO_RGB'? for curve in pos_fcurves: curve.color_mode = color_mode for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' for curve in rot_fcurves: curve.color_mode = color_mode for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' for curve in sca_fcurves: curve.color_mode = color_mode for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' for curve in rot_fcurves: _animation_utils.apply_euler_filter(curve) # LOAD CUSTOM CHANNELS (ARMATURE OFFSET ANIMATION) custom_channels = _get_anim_channels(pia_container, section_name="CustomChannel") if len(custom_channels) > 0: for channel_name in custom_channels: # print(' >>> channel %r - %s' % (channel_name, str(custom_channels[channel_name]))) if channel_name == 'Prism Movement': ''' NOTE: skipped for now as no data needs to be readed stream_count = custom_channels[channel_name][0] keyframe_count = custom_channels[channel_name][1] ''' streams = custom_channels[channel_name][2] # print(' channel %r - streams %s - keyframes %s' % (channel_name, stream_count, keyframe_count)) # CREATE ANIMATION GROUP # anim_group = anim_action.groups.new(channel_name) anim_group = anim_action.groups.new('Location') # armature.[channel_name].rotation_mode = 'XYZ' ## Set rotation mode. # active_bone = armature.data.bones[channel_name] # parent_bone = active_bone.parent # CREATE FCURVES # pos_fcurves, rot_fcurves, sca_fcurves = _create_fcurves(anim_action, anim_group, anim_curve, rot_euler=True, # types='LocRotSca') # pos_fcurves, rot_fcurves, sca_fcurves = _create_fcurves(anim_action, anim_group, anim_curve, types='Loc') fcurve_pos_x = anim_action.fcurves.new('location', 0) fcurve_pos_y = anim_action.fcurves.new('location', 1) fcurve_pos_z = anim_action.fcurves.new('location', 2) fcurve_pos_x.group = anim_group fcurve_pos_y.group = anim_group fcurve_pos_z.group = anim_group pos_fcurves = (fcurve_pos_x, fcurve_pos_y, fcurve_pos_z) location = None for key_time_i, key_time in enumerate(streams[0]): # print(' key_time: %s' % str(key_time[0])) # keyframe = key_time_i * (key_time[0] * 10) ## TODO: Do proper timing... keyframe = key_time_i + 1 scs_offset = _convert_utils.change_to_scs_xyz_coordinates( custom_channels[channel_name][2][1] [key_time_i], import_scale) offset = Vector(scs_offset) if location is None: location = offset else: location = location + offset # print(' > location: %s' % str(location)) # BUILD TRANSLATION CURVES pos_fcurves[0].keyframe_points.insert( frame=float(keyframe), value=location[0], options={'FAST'}) pos_fcurves[1].keyframe_points.insert( frame=float(keyframe), value=location[1], options={'FAST'}) pos_fcurves[2].keyframe_points.insert( frame=float(keyframe), value=location[2], options={'FAST'}) # SET LINEAR INTERPOLATION FOR ALL CURVES for curve in pos_fcurves: for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' else: lprint('W Unknown channel %r in "%s" file.', (channel_name, os.path.basename(pia_filepath))) # CREATE SCS ANIMATION animation = _animation_utils.add_animation_to_root( root_object, animation_name) animation.export = True animation.action = anim_action.name animation.anim_start = anim_action.frame_range[0] animation.anim_end = anim_action.frame_range[1] if total_time: animation.length = total_time # WARNING PRINTOUTS # if piece_count < 0: Print(dump_level, '\nW More Pieces found than were declared!') # if piece_count > 0: Print(dump_level, '\nW Some Pieces not found, but were declared!') # if dump_level > 1: print('') imported_count += 1 else: lprint('I "%s" file REJECTED', (os.path.basename(pia_filepath), )) # at the end of batch import make sure to select last animation always if imported_count > 0: root_object.scs_props.active_scs_animation = len( root_object.scs_object_animation_inventory) - 1 print("************************************") return imported_count
def load(root_object, pia_files, armature, skeleton=None, bones=None): if not bones: bones = {} scs_globals = _get_scs_globals() print("\n************************************") print("** SCS PIA Importer **") print("** (c)2014 SCS Software **") print("************************************\n") import_scale = scs_globals.import_scale ind = ' ' for pia_filepath in pia_files: # Check if PIA file is for the actual skeleton... if skeleton: skeleton_match = _fast_check_for_pia_skeleton(pia_filepath, skeleton) else: skeleton_match, skeleton = _utter_check_for_pia_skeleton(pia_filepath, armature) # print('%r - %s' %(os.path.basename(pia_filepath), skeleton_match)) # print(' skeleton: %r' % skeleton) if skeleton_match: path = os.path.split(pia_filepath)[0] pis_filepath = os.path.join(path, skeleton) if os.path.isfile(pis_filepath): # print(' pis_filepath: %r' % pis_filepath) bones = _pis.load(pis_filepath, armature) else: lprint("""\nE The filepath "%s" doesn't exist!""", (pis_filepath.replace("\\", "/"),)) if skeleton_match: lprint('I ++ "%s" IMPORTING animation data...', (os.path.basename(pia_filepath),)) pia_container, state = _pix_parser.read_data(pia_filepath, ind) if not pia_container: lprint('\nE File "%s" is empty!', (pia_filepath.replace("\\", "/"),)) return {'CANCELLED'} if state == 'ERR': lprint('\nE File "%s" is not SCS Animation file!', (pia_filepath.replace("\\", "/"),)) return {'CANCELLED'} # TEST PRINTOUTS # ind = ' ' # for section in pia_container: # print('SEC.: "%s"' % section.type) # for prop in section.props: # print('%sProp: %s' % (ind, prop)) # for data in section.data: # print('%sdata: %s' % (ind, data)) # for sec in section.sections: # print_section(sec, ind) # print('\nTEST - Source: "%s"' % pia_container[0].props[1][1]) # print('') # TEST EXPORT # path, file = os.path.splitext(pia_filepath) # export_filepath = str(path + '_reex' + file) # result = pix_write.write_data(pia_container, export_filepath, ind) # if result == {'FINISHED'}: # Print(dump_level, '\nI Test export succesful! The new file:\n "%s"', export_filepath) # else: # Print(dump_level, '\nE Test export failed! File:\n "%s"', export_filepath) # LOAD HEADER format_version, source, f_type, animation_name, source_filename, author = _get_header(pia_container) if format_version != 3 or f_type != "Animation": return {'CANCELLED'} # LOAD GLOBALS skeleton, total_time, bone_channel_count, custom_channel_count = _get_globals(pia_container) # CREATE ANIMATION ACTIONS anim_action = bpy.data.actions.new(animation_name) anim_action.use_fake_user = True if total_time: anim_action.scs_props.action_length = total_time anim_data = armature.animation_data_create() anim_data.action = anim_action # LOAD BONE CHANNELS # print(' * armature: %r' % armature.name) if bone_channel_count > 0: bone_channels = _get_anim_channels(pia_container, section_name="BoneChannel") # ... for bone_name in bone_channels: # bone_name = channel[0] if bone_name in armature.data.bones: # print('%r is in armature %r' % (bone_name, armature.name)) ''' NOTE: skipped for now as no data needs to be readed stream_count = bone_channels[bone_name][0] keyframe_count = bone_channels[bone_name][1] ''' streams = bone_channels[bone_name][2] # print(' channel %r - streams %s - keyframes %s' % (bone_name, stream_count, keyframe_count)) # CREATE ANIMATION GROUP anim_group = anim_action.groups.new(bone_name) armature.pose.bones[bone_name].rotation_mode = 'XYZ' # Set rotation mode. active_bone = armature.data.bones[bone_name] # parent_bone = active_bone.parent # CREATE FCURVES (pos_fcurves, rot_fcurves, sca_fcurves) = _create_fcurves(anim_action, anim_group, str('pose.bones["' + bone_name + '"]')) # GET BONE REST POSITION MATRIX bone_rest_matrix = active_bone.matrix_local bone_rest_matrix_scs = bones[bone_name][1].transposed() parent_bone_name = bones[bone_name][0] if parent_bone_name in bones: parent_bone_rest_matrix_scs = bones[parent_bone_name][1].transposed() else: parent_bone_rest_matrix_scs = Matrix() parent_bone_rest_matrix_scs.identity() # if bone_name in ('LeftHand1', 'LeftHand'): # print('\n %r - bone_rest_matrix_scs:\n%s' % (bone_name, bone_rest_matrix_scs)) # print(' %r - bone_rest_matrix:\n%s' % (bone_name, bone_rest_matrix)) # print(' %r - parent_bone_rest_matrix_scs:\n%s' % (bone_name, parent_bone_rest_matrix_scs)) for key_time_i, key_time in enumerate(streams[0]): # print(' key_time: %s' % str(key_time[0])) # keyframe = key_time_i * (key_time[0] * 10) ## TODO: Do proper timing... keyframe = key_time_i + 1 # GET BONE ANIMATION MATRIX bone_animation_matrix_scs = streams[1][key_time_i].transposed() # if bone_name in ('LeftHand1', 'LeftHand') and key_time_i == 0: print(' %r - bone_animation_matrix_scs (%i):\n%s' % ( # bone_name, key_time_i, bone_animation_matrix_scs)) # CREATE DELTA MATRIX delta_matrix = _get_delta_matrix(bone_rest_matrix, bone_rest_matrix_scs, parent_bone_rest_matrix_scs, bone_animation_matrix_scs, import_scale) # DECOMPOSE ANIMATION MATRIX location, rotation, scale = delta_matrix.decompose() # if bone_name in ('left_leg', 'root') and key_time_i == 0: print(' location:\n%s' % str(location)) rotation = rotation.to_euler('XYZ') # BUILD TRANSLATION CURVES pos_fcurves[0].keyframe_points.insert(frame=float(keyframe), value=location[0], options={'FAST'}) pos_fcurves[1].keyframe_points.insert(frame=float(keyframe), value=location[1], options={'FAST'}) pos_fcurves[2].keyframe_points.insert(frame=float(keyframe), value=location[2], options={'FAST'}) # BUILD ROTATION CURVES rot_fcurves[0].keyframe_points.insert(frame=float(keyframe), value=rotation[0], options={'FAST'}) rot_fcurves[1].keyframe_points.insert(frame=float(keyframe), value=rotation[1], options={'FAST'}) rot_fcurves[2].keyframe_points.insert(frame=float(keyframe), value=rotation[2], options={'FAST'}) # BUILD SCALE CURVES sca_fcurves[0].keyframe_points.insert(frame=float(keyframe), value=scale[0], options={'FAST'}) sca_fcurves[1].keyframe_points.insert(frame=float(keyframe), value=scale[1], options={'FAST'}) sca_fcurves[2].keyframe_points.insert(frame=float(keyframe), value=scale[2], options={'FAST'}) # SET LINEAR INTERPOLATION FOR ALL CURVES color_mode = 'AUTO_RAINBOW' # Or better 'AUTO_RGB'? for curve in pos_fcurves: curve.color_mode = color_mode for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' for curve in rot_fcurves: curve.color_mode = color_mode for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' for curve in sca_fcurves: curve.color_mode = color_mode for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' # LOAD CUSTOM CHANNELS (ARMATURE OFFSET ANIMATION) # if custom_channel_count > 0: ## NOTE: Can't be used because exporter from Maya saves always 0 even if there are Custom Channels. custom_channels = _get_anim_channels(pia_container, section_name="CustomChannel") if len(custom_channels) > 0: for channel_name in custom_channels: # print(' >>> channel %r - %s' % (channel_name, str(custom_channels[channel_name]))) if channel_name == 'Prism Movement': ''' NOTE: skipped for now as no data needs to be readed stream_count = custom_channels[channel_name][0] keyframe_count = custom_channels[channel_name][1] ''' streams = custom_channels[channel_name][2] # print(' channel %r - streams %s - keyframes %s' % (channel_name, stream_count, keyframe_count)) # CREATE ANIMATION GROUP # anim_group = anim_action.groups.new(channel_name) anim_group = anim_action.groups.new('Location') # armature.[channel_name].rotation_mode = 'XYZ' ## Set rotation mode. # active_bone = armature.data.bones[channel_name] # parent_bone = active_bone.parent # CREATE FCURVES # pos_fcurves, rot_fcurves, sca_fcurves = _create_fcurves(anim_action, anim_group, anim_curve, rot_euler=True, # types='LocRotSca') # pos_fcurves, rot_fcurves, sca_fcurves = _create_fcurves(anim_action, anim_group, anim_curve, types='Loc') fcurve_pos_x = anim_action.fcurves.new('location', 0) fcurve_pos_y = anim_action.fcurves.new('location', 1) fcurve_pos_z = anim_action.fcurves.new('location', 2) fcurve_pos_x.group = anim_group fcurve_pos_y.group = anim_group fcurve_pos_z.group = anim_group pos_fcurves = (fcurve_pos_x, fcurve_pos_y, fcurve_pos_z) location = None for key_time_i, key_time in enumerate(streams[0]): # print(' key_time: %s' % str(key_time[0])) # keyframe = key_time_i * (key_time[0] * 10) ## TODO: Do proper timing... keyframe = key_time_i + 1 scs_offset = _convert_utils.change_to_scs_xyz_coordinates(custom_channels[channel_name][2][1][key_time_i], import_scale) offset = Vector(scs_offset) if location is None: location = offset else: location = location + offset # print(' > location: %s' % str(location)) # BUILD TRANSLATION CURVES pos_fcurves[0].keyframe_points.insert(frame=float(keyframe), value=location[0], options={'FAST'}) pos_fcurves[1].keyframe_points.insert(frame=float(keyframe), value=location[1], options={'FAST'}) pos_fcurves[2].keyframe_points.insert(frame=float(keyframe), value=location[2], options={'FAST'}) # SET LINEAR INTERPOLATION FOR ALL CURVES for curve in pos_fcurves: for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' else: lprint('W Unknown channel %r in "%s" file.', (channel_name, os.path.basename(pia_filepath))) # CREATE SCS ANIMATION animation = _animation_utils.add_animation_to_root(root_object, animation_name) animation.export = True animation.action = anim_action.name animation.anim_start = anim_action.frame_range[0] animation.anim_end = anim_action.frame_range[1] # animation.anim_export_step = # animation.anim_export_filepath = if total_time: animation.length = total_time # WARNING PRINTOUTS # if piece_count < 0: Print(dump_level, '\nW More Pieces found than were declared!') # if piece_count > 0: Print(dump_level, '\nW Some Pieces not found, but were declared!') # if dump_level > 1: print('') else: lprint('I "%s" file REJECTED', (os.path.basename(pia_filepath),)) print("************************************") return {'FINISHED'}
def load(root_object, pia_files, armature, pis_filepath=None, bones=None): scs_globals = _get_scs_globals() print("\n************************************") print("** SCS PIA Importer **") print("** (c)2014 SCS Software **") print("************************************\n") import_scale = scs_globals.import_scale ind = ' ' imported_count = 0 for pia_filepath in pia_files: # Check if PIA file is for the actual skeleton... if pis_filepath and bones: skeleton_match = _pix_container.fast_check_for_pia_skeleton(pia_filepath, pis_filepath) else: skeleton_match, pia_skeleton = _pix_container.utter_check_for_pia_skeleton(pia_filepath, armature) if skeleton_match: path = os.path.split(pia_filepath)[0] pia_skeleton = os.path.join(path, pia_skeleton) if os.path.isfile(pia_skeleton): bones = _pis.load(pia_skeleton, armature, get_only=True) else: lprint("\nE The filepath %r doesn't exist!", (pia_skeleton.replace("\\", "/"),)) else: lprint(str("E Animation doesn't match the skeleton. Animation won't be loaded!\n\t " "Animation file: %r"), (pia_filepath,)) if skeleton_match: lprint('I ++ "%s" IMPORTING animation data...', (os.path.basename(pia_filepath),)) pia_container = _pix_container.get_data_from_file(pia_filepath, ind) if not pia_container: lprint('\nE File "%s" is empty!', (pia_filepath.replace("\\", "/"),)) continue # TEST PRINTOUTS # ind = ' ' # for section in pia_container: # print('SEC.: "%s"' % section.type) # for prop in section.props: # print('%sProp: %s' % (ind, prop)) # for data in section.data: # print('%sdata: %s' % (ind, data)) # for sec in section.sections: # print_section(sec, ind) # print('\nTEST - Source: "%s"' % pia_container[0].props[1][1]) # print('') # TEST EXPORT # path, file = os.path.splitext(pia_filepath) # export_filepath = str(path + '_reex' + file) # result = pix_write.write_data(pia_container, export_filepath, ind) # if result == {'FINISHED'}: # Print(dump_level, '\nI Test export succesful! The new file:\n "%s"', export_filepath) # else: # Print(dump_level, '\nE Test export failed! File:\n "%s"', export_filepath) # LOAD HEADER format_version, source, f_type, animation_name, source_filename, author = _get_header(pia_container) if format_version != 3 or f_type != "Animation": continue # LOAD GLOBALS skeleton, total_time, bone_channel_count, custom_channel_count = _get_globals(pia_container) # CREATE ANIMATION ACTIONS anim_action = bpy.data.actions.new(animation_name + "_action") anim_action.use_fake_user = True anim_data = armature.animation_data if armature.animation_data else armature.animation_data_create() anim_data.action = anim_action # LOAD BONE CHANNELS bone_channels = _get_anim_channels(pia_container, section_name="BoneChannel") if len(bone_channels) > 0: for bone_name in bone_channels: if bone_name in armature.data.bones: ''' NOTE: skipped for now as no data needs to be readed stream_count = bone_channels[bone_name][0] keyframe_count = bone_channels[bone_name][1] ''' streams = bone_channels[bone_name][2] # CREATE ANIMATION GROUP anim_group = anim_action.groups.new(bone_name) armature.pose.bones[bone_name].rotation_mode = 'XYZ' # Set rotation mode. # use pose bone scale set on PIS import init_scale = Vector((1, 1, 1)) if _BONE_consts.init_scale_key in armature.pose.bones[bone_name]: init_scale = armature.pose.bones[bone_name][_BONE_consts.init_scale_key] # CREATE FCURVES (pos_fcurves, rot_fcurves, sca_fcurves) = _create_fcurves(anim_action, anim_group, str('pose.bones["' + bone_name + '"]'), rot_euler=True) # GET BONE REST POSITION MATRIX bone_rest_matrix_scs = bones[bone_name][1].transposed() parent_bone_name = bones[bone_name][0] if parent_bone_name in bones: parent_bone_rest_matrix_scs = bones[parent_bone_name][1].transposed() else: parent_bone_rest_matrix_scs = Matrix() parent_bone_rest_matrix_scs.identity() for key_time_i, key_time in enumerate(streams[0]): keyframe = key_time_i + 1 # GET BONE ANIMATION MATRIX bone_animation_matrix_scs = streams[1][key_time_i].transposed() # CREATE DELTA MATRIX delta_matrix = _get_delta_matrix(bone_rest_matrix_scs, parent_bone_rest_matrix_scs, bone_animation_matrix_scs, import_scale) # DECOMPOSE ANIMATION MATRIX location, rotation, scale = delta_matrix.decompose() # CALCULATE CURRENT SCALE - subtract difference between initial bone scale and current scale from 1 # NOTE: if imported PIS had initial bone scale different than 1, # initial scale was saved into pose bones custom properties and # has to be used here as bones after import in Blender always have scale of 1 scale = Vector((1 + scale[0] - init_scale[0], 1 + scale[1] - init_scale[1], 1 + scale[2] - init_scale[2])) # NOTE: this scaling rotation switch came from UK variants which had scale -1 loc, rot, sca = bone_rest_matrix_scs.decompose() if sca.y < 0: rotation.y *= -1 if sca.z < 0: rotation.z *= -1 rotation = rotation.to_euler('XYZ') # BUILD TRANSFORMATION CURVES for i in range(0, 3): pos_fcurves[i].keyframe_points.insert(frame=float(keyframe), value=location[i], options={'FAST'}) rot_fcurves[i].keyframe_points.insert(frame=float(keyframe), value=rotation[i], options={'FAST'}) sca_fcurves[i].keyframe_points.insert(frame=float(keyframe), value=scale[i], options={'FAST'}) # SET LINEAR INTERPOLATION FOR ALL CURVES color_mode = 'AUTO_RAINBOW' # Or better 'AUTO_RGB'? for curve in pos_fcurves: curve.color_mode = color_mode for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' for curve in rot_fcurves: curve.color_mode = color_mode for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' for curve in sca_fcurves: curve.color_mode = color_mode for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' for curve in rot_fcurves: _animation_utils.apply_euler_filter(curve) # LOAD CUSTOM CHANNELS (ARMATURE OFFSET ANIMATION) custom_channels = _get_anim_channels(pia_container, section_name="CustomChannel") if len(custom_channels) > 0: for channel_name in custom_channels: # print(' >>> channel %r - %s' % (channel_name, str(custom_channels[channel_name]))) if channel_name == 'Prism Movement': ''' NOTE: skipped for now as no data needs to be readed stream_count = custom_channels[channel_name][0] keyframe_count = custom_channels[channel_name][1] ''' streams = custom_channels[channel_name][2] # print(' channel %r - streams %s - keyframes %s' % (channel_name, stream_count, keyframe_count)) # CREATE ANIMATION GROUP # anim_group = anim_action.groups.new(channel_name) anim_group = anim_action.groups.new('Location') # armature.[channel_name].rotation_mode = 'XYZ' ## Set rotation mode. # active_bone = armature.data.bones[channel_name] # parent_bone = active_bone.parent # CREATE FCURVES # pos_fcurves, rot_fcurves, sca_fcurves = _create_fcurves(anim_action, anim_group, anim_curve, rot_euler=True, # types='LocRotSca') # pos_fcurves, rot_fcurves, sca_fcurves = _create_fcurves(anim_action, anim_group, anim_curve, types='Loc') fcurve_pos_x = anim_action.fcurves.new('location', 0) fcurve_pos_y = anim_action.fcurves.new('location', 1) fcurve_pos_z = anim_action.fcurves.new('location', 2) fcurve_pos_x.group = anim_group fcurve_pos_y.group = anim_group fcurve_pos_z.group = anim_group pos_fcurves = (fcurve_pos_x, fcurve_pos_y, fcurve_pos_z) location = None for key_time_i, key_time in enumerate(streams[0]): # print(' key_time: %s' % str(key_time[0])) # keyframe = key_time_i * (key_time[0] * 10) ## TODO: Do proper timing... keyframe = key_time_i + 1 scs_offset = _convert_utils.change_to_scs_xyz_coordinates(custom_channels[channel_name][2][1][key_time_i], import_scale) offset = Vector(scs_offset) if location is None: location = offset else: location = location + offset # print(' > location: %s' % str(location)) # BUILD TRANSLATION CURVES pos_fcurves[0].keyframe_points.insert(frame=float(keyframe), value=location[0], options={'FAST'}) pos_fcurves[1].keyframe_points.insert(frame=float(keyframe), value=location[1], options={'FAST'}) pos_fcurves[2].keyframe_points.insert(frame=float(keyframe), value=location[2], options={'FAST'}) # SET LINEAR INTERPOLATION FOR ALL CURVES for curve in pos_fcurves: for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' else: lprint('W Unknown channel %r in "%s" file.', (channel_name, os.path.basename(pia_filepath))) # CREATE SCS ANIMATION animation = _animation_utils.add_animation_to_root(root_object, animation_name) animation.export = True animation.action = anim_action.name animation.anim_start = anim_action.frame_range[0] animation.anim_end = anim_action.frame_range[1] if total_time: animation.length = total_time # WARNING PRINTOUTS # if piece_count < 0: Print(dump_level, '\nW More Pieces found than were declared!') # if piece_count > 0: Print(dump_level, '\nW Some Pieces not found, but were declared!') # if dump_level > 1: print('') imported_count += 1 else: lprint('I "%s" file REJECTED', (os.path.basename(pia_filepath),)) # at the end of batch import make sure to select last animation always if imported_count > 0: root_object.scs_props.active_scs_animation = len(root_object.scs_object_animation_inventory) - 1 print("************************************") return imported_count
def load(root_object, pia_files, armature, skeleton=None, bones=None): if not bones: bones = {} scs_globals = _get_scs_globals() print("\n************************************") print("** SCS PIA Importer **") print("** (c)2014 SCS Software **") print("************************************\n") import_scale = scs_globals.import_scale ind = ' ' for pia_filepath in pia_files: # Check if PIA file is for the actual skeleton... if skeleton: skeleton_match = _fast_check_for_pia_skeleton( pia_filepath, skeleton) else: skeleton_match, skeleton = _utter_check_for_pia_skeleton( pia_filepath, armature) # print('%r - %s' %(os.path.basename(pia_filepath), skeleton_match)) # print(' skeleton: %r' % skeleton) if skeleton_match: path = os.path.split(pia_filepath)[0] pis_filepath = os.path.join(path, skeleton) if os.path.isfile(pis_filepath): # print(' pis_filepath: %r' % pis_filepath) bones = _pis.load(pis_filepath, armature) else: lprint("""\nE The filepath "%s" doesn't exist!""", (pis_filepath.replace("\\", "/"), )) if skeleton_match: lprint('I ++ "%s" IMPORTING animation data...', (os.path.basename(pia_filepath), )) pia_container, state = _pix_parser.read_data(pia_filepath, ind) if not pia_container: lprint('\nE File "%s" is empty!', (pia_filepath.replace("\\", "/"), )) return {'CANCELLED'} if state == 'ERR': lprint('\nE File "%s" is not SCS Animation file!', (pia_filepath.replace("\\", "/"), )) return {'CANCELLED'} # TEST PRINTOUTS # ind = ' ' # for section in pia_container: # print('SEC.: "%s"' % section.type) # for prop in section.props: # print('%sProp: %s' % (ind, prop)) # for data in section.data: # print('%sdata: %s' % (ind, data)) # for sec in section.sections: # print_section(sec, ind) # print('\nTEST - Source: "%s"' % pia_container[0].props[1][1]) # print('') # TEST EXPORT # path, file = os.path.splitext(pia_filepath) # export_filepath = str(path + '_reex' + file) # result = pix_write.write_data(pia_container, export_filepath, ind) # if result == {'FINISHED'}: # Print(dump_level, '\nI Test export succesful! The new file:\n "%s"', export_filepath) # else: # Print(dump_level, '\nE Test export failed! File:\n "%s"', export_filepath) # LOAD HEADER format_version, source, f_type, animation_name, source_filename, author = _get_header( pia_container) if format_version != 3 or f_type != "Animation": return {'CANCELLED'} # LOAD GLOBALS skeleton, total_time, bone_channel_count, custom_channel_count = _get_globals( pia_container) # CREATE ANIMATION ACTIONS anim_action = bpy.data.actions.new(animation_name) anim_action.use_fake_user = True if total_time: anim_action.scs_props.action_length = total_time anim_data = armature.animation_data_create() anim_data.action = anim_action # LOAD BONE CHANNELS # print(' * armature: %r' % armature.name) if bone_channel_count > 0: bone_channels = _get_anim_channels(pia_container, section_name="BoneChannel") # ... for bone_name in bone_channels: # bone_name = channel[0] if bone_name in armature.data.bones: # print('%r is in armature %r' % (bone_name, armature.name)) ''' NOTE: skipped for now as no data needs to be readed stream_count = bone_channels[bone_name][0] keyframe_count = bone_channels[bone_name][1] ''' streams = bone_channels[bone_name][2] # print(' channel %r - streams %s - keyframes %s' % (bone_name, stream_count, keyframe_count)) # CREATE ANIMATION GROUP anim_group = anim_action.groups.new(bone_name) armature.pose.bones[ bone_name].rotation_mode = 'XYZ' # Set rotation mode. active_bone = armature.data.bones[bone_name] # parent_bone = active_bone.parent # CREATE FCURVES (pos_fcurves, rot_fcurves, sca_fcurves) = _create_fcurves( anim_action, anim_group, str('pose.bones["' + bone_name + '"]')) # GET BONE REST POSITION MATRIX bone_rest_matrix = active_bone.matrix_local bone_rest_matrix_scs = bones[bone_name][1].transposed() parent_bone_name = bones[bone_name][0] if parent_bone_name in bones: parent_bone_rest_matrix_scs = bones[ parent_bone_name][1].transposed() else: parent_bone_rest_matrix_scs = Matrix() parent_bone_rest_matrix_scs.identity() # if bone_name in ('LeftHand1', 'LeftHand'): # print('\n %r - bone_rest_matrix_scs:\n%s' % (bone_name, bone_rest_matrix_scs)) # print(' %r - bone_rest_matrix:\n%s' % (bone_name, bone_rest_matrix)) # print(' %r - parent_bone_rest_matrix_scs:\n%s' % (bone_name, parent_bone_rest_matrix_scs)) for key_time_i, key_time in enumerate(streams[0]): # print(' key_time: %s' % str(key_time[0])) # keyframe = key_time_i * (key_time[0] * 10) ## TODO: Do proper timing... keyframe = key_time_i + 1 # GET BONE ANIMATION MATRIX bone_animation_matrix_scs = streams[1][ key_time_i].transposed() # if bone_name in ('LeftHand1', 'LeftHand') and key_time_i == 0: print(' %r - bone_animation_matrix_scs (%i):\n%s' % ( # bone_name, key_time_i, bone_animation_matrix_scs)) # CREATE DELTA MATRIX delta_matrix = _get_delta_matrix( bone_rest_matrix, bone_rest_matrix_scs, parent_bone_rest_matrix_scs, bone_animation_matrix_scs, import_scale) # DECOMPOSE ANIMATION MATRIX location, rotation, scale = delta_matrix.decompose( ) # if bone_name in ('left_leg', 'root') and key_time_i == 0: print(' location:\n%s' % str(location)) rotation = rotation.to_euler('XYZ') # BUILD TRANSLATION CURVES pos_fcurves[0].keyframe_points.insert( frame=float(keyframe), value=location[0], options={'FAST'}) pos_fcurves[1].keyframe_points.insert( frame=float(keyframe), value=location[1], options={'FAST'}) pos_fcurves[2].keyframe_points.insert( frame=float(keyframe), value=location[2], options={'FAST'}) # BUILD ROTATION CURVES rot_fcurves[0].keyframe_points.insert( frame=float(keyframe), value=rotation[0], options={'FAST'}) rot_fcurves[1].keyframe_points.insert( frame=float(keyframe), value=rotation[1], options={'FAST'}) rot_fcurves[2].keyframe_points.insert( frame=float(keyframe), value=rotation[2], options={'FAST'}) # BUILD SCALE CURVES sca_fcurves[0].keyframe_points.insert( frame=float(keyframe), value=scale[0], options={'FAST'}) sca_fcurves[1].keyframe_points.insert( frame=float(keyframe), value=scale[1], options={'FAST'}) sca_fcurves[2].keyframe_points.insert( frame=float(keyframe), value=scale[2], options={'FAST'}) # SET LINEAR INTERPOLATION FOR ALL CURVES color_mode = 'AUTO_RAINBOW' # Or better 'AUTO_RGB'? for curve in pos_fcurves: curve.color_mode = color_mode for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' for curve in rot_fcurves: curve.color_mode = color_mode for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' for curve in sca_fcurves: curve.color_mode = color_mode for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' # LOAD CUSTOM CHANNELS (ARMATURE OFFSET ANIMATION) # if custom_channel_count > 0: ## NOTE: Can't be used because exporter from Maya saves always 0 even if there are Custom Channels. custom_channels = _get_anim_channels(pia_container, section_name="CustomChannel") if len(custom_channels) > 0: for channel_name in custom_channels: # print(' >>> channel %r - %s' % (channel_name, str(custom_channels[channel_name]))) if channel_name == 'Prism Movement': ''' NOTE: skipped for now as no data needs to be readed stream_count = custom_channels[channel_name][0] keyframe_count = custom_channels[channel_name][1] ''' streams = custom_channels[channel_name][2] # print(' channel %r - streams %s - keyframes %s' % (channel_name, stream_count, keyframe_count)) # CREATE ANIMATION GROUP # anim_group = anim_action.groups.new(channel_name) anim_group = anim_action.groups.new('Location') # armature.[channel_name].rotation_mode = 'XYZ' ## Set rotation mode. # active_bone = armature.data.bones[channel_name] # parent_bone = active_bone.parent # CREATE FCURVES # pos_fcurves, rot_fcurves, sca_fcurves = _create_fcurves(anim_action, anim_group, anim_curve, rot_euler=True, # types='LocRotSca') # pos_fcurves, rot_fcurves, sca_fcurves = _create_fcurves(anim_action, anim_group, anim_curve, types='Loc') fcurve_pos_x = anim_action.fcurves.new('location', 0) fcurve_pos_y = anim_action.fcurves.new('location', 1) fcurve_pos_z = anim_action.fcurves.new('location', 2) fcurve_pos_x.group = anim_group fcurve_pos_y.group = anim_group fcurve_pos_z.group = anim_group pos_fcurves = (fcurve_pos_x, fcurve_pos_y, fcurve_pos_z) location = None for key_time_i, key_time in enumerate(streams[0]): # print(' key_time: %s' % str(key_time[0])) # keyframe = key_time_i * (key_time[0] * 10) ## TODO: Do proper timing... keyframe = key_time_i + 1 scs_offset = _convert_utils.change_to_scs_xyz_coordinates( custom_channels[channel_name][2][1] [key_time_i], import_scale) offset = Vector(scs_offset) if location is None: location = offset else: location = location + offset # print(' > location: %s' % str(location)) # BUILD TRANSLATION CURVES pos_fcurves[0].keyframe_points.insert( frame=float(keyframe), value=location[0], options={'FAST'}) pos_fcurves[1].keyframe_points.insert( frame=float(keyframe), value=location[1], options={'FAST'}) pos_fcurves[2].keyframe_points.insert( frame=float(keyframe), value=location[2], options={'FAST'}) # SET LINEAR INTERPOLATION FOR ALL CURVES for curve in pos_fcurves: for keyframe in curve.keyframe_points: keyframe.interpolation = 'LINEAR' else: lprint('W Unknown channel %r in "%s" file.', (channel_name, os.path.basename(pia_filepath))) # CREATE SCS ANIMATION animation = _animation_utils.add_animation_to_root( root_object, animation_name) animation.export = True animation.action = anim_action.name animation.anim_start = anim_action.frame_range[0] animation.anim_end = anim_action.frame_range[1] # animation.anim_export_step = # animation.anim_export_filepath = if total_time: animation.length = total_time # WARNING PRINTOUTS # if piece_count < 0: Print(dump_level, '\nW More Pieces found than were declared!') # if piece_count > 0: Print(dump_level, '\nW Some Pieces not found, but were declared!') # if dump_level > 1: print('') else: lprint('I "%s" file REJECTED', (os.path.basename(pia_filepath), )) print("************************************") return {'FINISHED'}
def load(filepath): scs_globals = _get_scs_globals() print("\n************************************") print("** SCS PIC Importer **") print("** (c)2014 SCS Software **") print("************************************\n") ind = ' ' pic_container = _pix_container.get_data_from_file(filepath, ind) # TEST PRINTOUTS # ind = ' ' # for section in pic_container: # print('SEC.: "%s"' % section.type) # for prop in section.props: # print('%sProp: %s' % (ind, prop)) # for data in section.data: # print('%sdata: %s' % (ind, data)) # for sec in section.sections: # print_section(sec, ind) # print('\nTEST - Source: "%s"' % pic_container[0].props[1][1]) # print('') # TEST EXPORT # path, file = os.path.splitext(filepath) # export_filepath = str(path + '_reex' + file) # result = pix_write.write_data(pic_container, export_filepath, ind) # if result == {'FINISHED'}: # Print(dump_level, '\nI Test export succesful! The new file:\n "%s"', export_filepath) # else: # Print(dump_level, '\nE Test export failed! File:\n "%s"', export_filepath) # LOAD HEADER ''' NOTE: skipped for now as no data needs to be readed format_version, source, f_type, f_name, source_filename, author = _get_header(pic_container) ''' # LOAD GLOBALS ''' NOTE: skipped for now as no data needs to be readed vertex_count, triangle_count, material_count, piece_count, part_count, locator_count = _get_global(pic_container) ''' # LOAD MATERIALS if 0: # NOTE: NO MATERIALS USED FOR COLLIDERS AT A MOMENT! loaded_materials = [] for section in pic_container: if section.type == 'Material': material_alias, material_effect = _get_material(section) lprint('I Adding a Material Alias: "%s"', material_alias) loaded_materials.append(material_alias) # PRINT "MATERIAL SETTINGS" TO CONSOLE... if 0: import pprint pp = pprint.PrettyPrinter(indent=1) print("=== MATERIAL SETTINGS ==========================") pp.pprint(material_effect) print("==================================================") # LOAD PARTS parts = [] for section in pic_container: if section.type == "Part": (name, pieces, locators) = _get_part(section) parts.append({"name": name, "pieces": pieces, "locators": locators}) # LOAD (CONVEX) PIECES pieces = [] for section in pic_container: if section.type == 'Piece': pieces.append(_get_piece(section)) # LOAD AND CREATE LOCATORS import_scale = scs_globals.import_scale locators = [] for section in pic_container: if section.type == 'Locator': (locator_name, locator_index, locator_position, locator_rotation, locator_alias, locator_weight, locator_type, locator_parameters, locator_convex_piece) = _get_locator(section) lprint('I Adding a Locator: "%s"', locator_name) locator = _object_utils.create_locator_empty(locator_name, locator_position, locator_rotation, (1, 1, 1), 1.0, 'Collision') locator.scs_props.scs_part = _get_locator_part(parts, locator_index) locator.scs_props.locator_collider_centered = True locator.scs_props.locator_collider_mass = locator_weight locator.scs_props.locator_collider_type = locator_type if locator_type == 'Box': locator.scs_props.locator_collider_box_x = locator_parameters[0] * import_scale locator.scs_props.locator_collider_box_y = locator_parameters[2] * import_scale locator.scs_props.locator_collider_box_z = locator_parameters[1] * import_scale elif locator_type in ('Sphere', 'Capsule', 'Cylinder'): locator.scs_props.locator_collider_dia = locator_parameters[0] * 2 * import_scale locator.scs_props.locator_collider_len = locator_parameters[1] * import_scale elif locator_type == 'Convex': piece_index, piece_material, verts, faces = pieces[locator_convex_piece] if verts and faces: # BOUNDING BOX DATA CREATION AND SPACE CONVERSION min_val = [None, None, None] max_val = [None, None, None] scs_verts = [] for vert in verts: scs_vert = _convert_utils.change_to_scs_xyz_coordinates(vert, import_scale) scs_verts.append(scs_vert) min_val, max_val = _math_utils.evaluate_minmax(scs_vert, min_val, max_val) bbox, bbcenter = _math_utils.get_bb(min_val, max_val) # FACE FLIPPING flipped_faces = _mesh_utils.flip_faceverts(faces) # COLLIDER CREATION geom_data = (scs_verts, flipped_faces, bbox, bbcenter) _object_utils.add_collider_convex_locator(geom_data, {}, locator) locators.append(locator) # DATA BUILDING # WARNING PRINTOUTS # if piece_count < 0: Print(dump_level, '\nW More Pieces found than were declared!') # if piece_count > 0: Print(dump_level, '\nW Some Pieces not found, but were declared!') # if dump_level > 1: print('') print("************************************") return {'FINISHED'}, locators