def create_vert_material(mesh, vertMat): mat = bpy.data.materials.new(mesh.header.meshName + "." + vertMat.vmName) mat.use_nodes = True #mat.blend_method = 'BLEND' principled = PrincipledBSDFWrapper(mat, is_readonly=False) principled.base_color = rgb_to_vector(vertMat.vmInfo.diffuse) principled.alpha = vertMat.vmInfo.opacity mat["Shininess"] = vertMat.vmInfo.shininess mat["Specular"] = rgb_to_vector(vertMat.vmInfo.specular) mat["Emission"] = rgb_to_vector(vertMat.vmInfo.emissive) mat["Diffuse"] = rgb_to_vector(vertMat.vmInfo.diffuse) mat["Translucency"] = vertMat.vmInfo.translucency return mat
def create_new_bmat(self, bmat_name, rgba, func_data): bmat = bpy.data.materials.new(name=bmat_name) bmat.use_nodes = True # link bmat to PrincipledBSDFWrapper principled = PrincipledBSDFWrapper(bmat, is_readonly=False) principled.base_color = rgba[:3] # check for alpha if rgba[3] < 1.0: bmat.diffuse_color = rgba principled.alpha = rgba[3] bmat.blend_method = "BLEND" if self.config["sharemats"]: func_data["matdatabase"][rgba] = bmat return bmat
def createBubbleMaterial(name, baseColor): mat = bpy.data.materials.new(name) mat.use_nodes = True mat.blend_method = 'BLEND' principled = PrincipledBSDFWrapper(mat, is_readonly=False) principled.base_color = baseColor principled.metallic = 0.5 principled.specular = 0.2 principled.roughness = 0.05 #principled.IOR = 1.1 principled.alpha = 0.3 # principled.specular_texture.image = load_image("/path/to/image.png") # Export principled = PrincipledBSDFWrapper(mat, is_readonly=True) base_color = principled.base_color specular_texture = principled.specular_texture if specular_texture and specular_texture.image: specular_texture_filepath = principled.specular_texture.image.filepath
def read_pmx_data(context, filepath="", adjust_bone_position=False, bone_transfer=False, ): prefs = context.preferences.addons[GV.FolderName].preferences use_japanese_name = prefs.use_japanese_name use_custom_shape = prefs.use_custom_shape xml_save_versions = prefs.saveVersions GV.SetStartTime() if bpy.ops.object.mode_set.poll(): bpy.ops.object.mode_set(mode='OBJECT') if bpy.ops.object.select_all.poll(): bpy.ops.object.select_all(action='DESELECT') with open(filepath, "rb") as f: from . import pmx pmx_data = pmx.Model() pmx_data.Load(f) if pmx_data.Status.Magic == 0: # Echo("Loading Pmd ") from . import pmd from . import pmd2pmx f.seek(0) d_pmd = pmd.Model() d_pmd.Load(f) pmx_data = pmd2pmx.Convert(d_pmd) scene = context.scene base_path = os.path.dirname(filepath) for ob in scene.objects: ob.select_set(False) tmp_name = Get_JP_or_EN_Name(pmx_data.Name, pmx_data.Name_E, use_japanese_name) arm_dat = bpy.data.armatures.new(tmp_name + "_Arm") arm_obj = bpy.data.objects.new(tmp_name + "_Arm", arm_dat) arm_obj.show_in_front = True arm_dat.display_type = "STICK" bpy.context.collection.objects.link(arm_obj) bpy.context.view_layer.objects.active = arm_obj bpy.context.view_layer.update() # Make XML blender_bone_list = make_xml(pmx_data, filepath, use_japanese_name, xml_save_versions) arm_obj.select_set(True) bone_id = {} # Set Bone Position bone_id = Set_Bone_Position(pmx_data, arm_dat, blender_bone_list) bpy.ops.object.mode_set(mode="POSE", toggle=False) # Set Bone Status for (bone_index, data_bone) in enumerate(pmx_data.Bones): bone_name = blender_bone_list[bone_index] pb = arm_obj.pose.bones.get(bone_name) if pb is None: continue # Find name (True or False) find_master = Search_Master(bone_name) find_eyes = Search_Eyes(bone_name) find_twist_m = Search_Twist_Master(bone_name) find_twist_n = Search_Twist_Num(bone_name) find_auto = Search_Auto_Bone(bone_name) if find_twist_n: pb.lock_rotation = [True, False, True] if data_bone.Rotatable == 0: pb.lock_rotation = [True, True, True] if data_bone.Movable == 0: pb.lock_location = [True, True, True] if data_bone.Operational == 0: pb.lock_rotation = [True, True, True] pb.lock_location = [True, True, True] if data_bone.AdditionalRotation == 1: const = pb.constraints.new('COPY_ROTATION') const.target = arm_obj const.subtarget = blender_bone_list[data_bone.AdditionalBoneIndex] const.target_space = 'LOCAL' const.owner_space = 'LOCAL' const.influence = abs(data_bone.AdditionalPower) if data_bone.AdditionalPower < 0: const.invert_x = True const.invert_y = True const.invert_z = True if data_bone.AdditionalMovement == 1: const = pb.constraints.new('COPY_LOCATION') const.target = arm_obj const.subtarget = blender_bone_list[data_bone.AdditionalBoneIndex] const.target_space = 'LOCAL' const.owner_space = 'LOCAL' const.influence = abs(data_bone.AdditionalPower) if data_bone.AdditionalPower < 0: const.invert_x = True const.invert_y = True const.invert_z = True if data_bone.UseFixedAxis == 1: const = pb.constraints.new('LIMIT_ROTATION') const.use_limit_x = True const.use_limit_z = True const.owner_space = 'LOCAL' pb.lock_rotation = [True, False, True] if data_bone.UseLocalAxis == 0: pass if data_bone.AfterPhysical == 0: pass if data_bone.ExternalBone == 0: pass # Set Custom Shape if use_custom_shape: len_const = len(pb.constraints) if find_master: add_function.set_custom_shape(context, pb, shape=GV.ShapeMaster) elif find_eyes: add_function.set_custom_shape(context, pb, shape=GV.ShapeEyes) elif find_twist_m and len_const: add_function.set_custom_shape(context, pb, shape=GV.ShapeTwist1) elif find_twist_n and len_const: add_function.set_custom_shape(context, pb, shape=GV.ShapeTwist2) elif find_auto and len_const: add_function.set_custom_shape(context, pb, shape=GV.ShapeAuto) # Set IK if data_bone.UseIK != 0: pb["IKLoops"] = data_bone.IK.Loops pb["IKLimit"] = data_bone.IK.Limit if len(data_bone.IK.Member) > 0: ik_name = blender_bone_list[data_bone.IK.Member[0].Index] new_ik = arm_obj.pose.bones[ik_name].constraints.new("IK") new_ik.target = arm_obj new_ik.subtarget = blender_bone_list[bone_index] new_ik.chain_count = len(data_bone.IK.Member) for ik_member in data_bone.IK.Member: if ik_member.UseLimit == 1: member_name = blender_bone_list[ik_member.Index] pose_member = arm_obj.pose.bones[member_name] if ik_member.UpperLimit.x == ik_member.LowerLimit.x: pose_member.lock_ik_x = True else: pose_member.use_ik_limit_x = True pose_member.ik_min_x = ik_member.LowerLimit.x pose_member.ik_max_x = ik_member.UpperLimit.x if ik_member.UpperLimit.y == ik_member.LowerLimit.y: pose_member.lock_ik_y = True else: pose_member.use_ik_limit_y = True pose_member.ik_min_y = ik_member.LowerLimit.y pose_member.ik_max_y = ik_member.UpperLimit.y if ik_member.UpperLimit.z == ik_member.LowerLimit.z: pose_member.lock_ik_z = True else: pose_member.use_ik_limit_z = True pose_member.ik_min_z = ik_member.LowerLimit.z pose_member.ik_max_z = ik_member.UpperLimit.z bpy.ops.object.mode_set(mode="EDIT", toggle=False) # Adjust Bone Position if adjust_bone_position: # Get_Adjust_Data(edit_bones, jp_name, en_name) arm_L, vec_arm_L, axis_arm_L, len_arm_L = Get_Adjust_Data(arm_dat.edit_bones, "腕_L", "arm_L") arm_R, vec_arm_R, axis_arm_R, len_arm_R = Get_Adjust_Data(arm_dat.edit_bones, "腕_R", "arm_R") elb_L, vec_elb_L, axis_elb_L, len_elb_L = Get_Adjust_Data(arm_dat.edit_bones, "ひじ_L", "elbow_L") elb_R, vec_elb_R, axis_elb_R, len_elb_R = Get_Adjust_Data(arm_dat.edit_bones, "ひじ_R", "elbow_R") for eb in arm_dat.edit_bones: # Find name (True or False) find_master = Search_Master(eb.name) find_eyes = Search_Eyes(eb.name) find_twist_m = Search_Twist_Master(eb.name) find_twist_n = Search_Twist_Num(eb.name) find_auto = Search_Auto_Bone(eb.name) find_leg_d = Search_Leg_Dummy(eb.name) # Master if find_master: eb_center = Get_Edit_Bone(arm_dat.edit_bones, "センター", "center") if eb_center is not None: eb.head = [0.0, 0.0, 0.0] eb.tail = eb_center.head # Eyes elif find_eyes: eb_eye = Get_Edit_Bone(arm_dat.edit_bones, "目_L", "eye_L") if eb_eye is not None: eb.head.x = 0.0 eb.head.y = 0.0 eb.head.z = eb.tail.z = eb_eye.head.z * 1.16 eb.tail.x = 0.0 eb.tail.y = -0.25 # Auto Bone (Sub Bone), Leg_D Bone elif find_auto or find_leg_d: pb = arm_obj.pose.bones[eb.name] for const in pb.constraints: if hasattr(const, "subtarget"): eb.use_connect = False for child in eb.children: child.use_connect = False eb_sub = arm_dat.edit_bones[const.subtarget] multi = 0.3 if find_auto else 1.0 axis = (eb_sub.tail - eb_sub.head) * multi eb.head = eb_sub.head eb.tail = eb_sub.head + axis break # Twist elif find_twist_m or find_twist_n: eb.use_connect = False for child in eb.children: child.use_connect = False # Set_Adjust_Data(active, eb, vec, axis, length) if re.search(r'^(\u8155|arm)', eb.name) is not None: if eb.name.endswith("_L") and arm_L is not None: Set_Adjust_Data(eb, arm_L, vec_arm_L, axis_arm_L, len_arm_L) elif eb.name.endswith("_R") and arm_R is not None: Set_Adjust_Data(eb, arm_R, vec_arm_R, axis_arm_R, len_arm_R) else: # "手" or "wrist" if eb.name.endswith("_L") and elb_L is not None: Set_Adjust_Data(eb, elb_L, vec_elb_L, axis_elb_L, len_elb_L) elif eb.name.endswith("_R") and elb_R is not None: Set_Adjust_Data(eb, elb_R, vec_elb_R, axis_elb_R, len_elb_R) # BoneItem Direction bpy.ops.armature.select_all(action='SELECT') bpy.ops.b2pmxe.calculate_roll() bpy.ops.armature.select_all(action='DESELECT') bpy.ops.object.mode_set(mode='OBJECT') # Create Mash mesh = bpy.data.meshes.new(tmp_name) obj_mesh = bpy.data.objects.new(mesh.name, mesh) bpy.context.collection.objects.link(obj_mesh) # Link Parent mod = obj_mesh.modifiers.new('RigModif', 'ARMATURE') mod.object = arm_obj mod.use_bone_envelopes = False mod.use_vertex_groups = True # Add Vertex Group vert_group = {} vert_group_index = {} for bone_index, bone_data in enumerate(pmx_data.Bones): bone_name = blender_bone_list[bone_index] target_name = arm_dat.bones[bone_id[bone_name]].name vert_group_index[bone_index] = target_name if target_name not in vert_group.keys(): vert_group[target_name] = obj_mesh.vertex_groups.new(name=target_name) mesh.update() # Add Vertex mesh.vertices.add(len(pmx_data.Vertices)) for vert_index, vert_data in enumerate(pmx_data.Vertices): mesh.vertices[vert_index].co = GT(vert_data.Position, GlobalMatrix) mesh.vertices[vert_index].normal = GT_normal(vert_data.Normal, GlobalMatrix) # mesh.vertices[vert_index].uv = pmx_data.Vertices[vert_index].UV # BDEF1 if vert_data.Type == 0: vert_group[vert_group_index[vert_data.Bones[0]]].add([vert_index], 1.0, 'REPLACE') # BDEF2 elif vert_data.Type == 1: vert_group[vert_group_index[vert_data.Bones[0]]].add([vert_index], vert_data.Weights[0], 'ADD') vert_group[vert_group_index[vert_data.Bones[1]]].add([vert_index], 1.0 - vert_data.Weights[0], 'ADD') # BDEF4 elif vert_data.Type == 2: vert_group[vert_group_index[vert_data.Bones[0]]].add([vert_index], vert_data.Weights[0], 'ADD') vert_group[vert_group_index[vert_data.Bones[1]]].add([vert_index], vert_data.Weights[1], 'ADD') vert_group[vert_group_index[vert_data.Bones[2]]].add([vert_index], vert_data.Weights[2], 'ADD') vert_group[vert_group_index[vert_data.Bones[3]]].add([vert_index], vert_data.Weights[3], 'ADD') # SDEF elif vert_data.Type == 3: vert_group[vert_group_index[vert_data.Bones[0]]].add([vert_index], vert_data.Weights[0], 'ADD') vert_group[vert_group_index[vert_data.Bones[1]]].add([vert_index], 1.0 - vert_data.Weights[0], 'ADD') # Todo? SDEF # QDEF elif vert_data.Type == 4: vert_group[vert_group_index[vert_data.Bones[0]]].add([vert_index], vert_data.Weights[0], 'ADD') vert_group[vert_group_index[vert_data.Bones[1]]].add([vert_index], vert_data.Weights[1], 'ADD') vert_group[vert_group_index[vert_data.Bones[2]]].add([vert_index], vert_data.Weights[2], 'ADD') vert_group[vert_group_index[vert_data.Bones[3]]].add([vert_index], vert_data.Weights[3], 'ADD') # Todo? QDEF mesh.update() # Add Face poly_count = len(pmx_data.Faces) // 3 mesh.polygons.add(poly_count) mesh.polygons.foreach_set("loop_start", range(0, poly_count * 3, 3)) mesh.polygons.foreach_set("loop_total", (3,) * poly_count) mesh.polygons.foreach_set("use_smooth", (True,) * poly_count) mesh.loops.add(len(pmx_data.Faces)) # mesh.loops.foreach_set("vertex_index" ,pmx_data.Faces) for faceIndex in range(poly_count): mesh.loops[faceIndex * 3].vertex_index = pmx_data.Faces[faceIndex * 3] mesh.loops[faceIndex * 3 + 1].vertex_index = pmx_data.Faces[faceIndex * 3 + 2] mesh.loops[faceIndex * 3 + 2].vertex_index = pmx_data.Faces[faceIndex * 3 + 1] mesh.update() if bone_transfer: context.view_layer.update() return arm_obj, obj_mesh # Add Textures # image_dic = {} textures_dic = {} NG_tex_list = [] for (tex_index, tex_data) in enumerate(pmx_data.Textures): tex_path = os.path.join(base_path, tex_data.Path) try: bpy.ops.image.open(filepath=tex_path) # image_dic[tex_index] = bpy.data.images[len(bpy.data.images)-1] textures_dic[tex_index] = bpy.data.textures.new(os.path.basename(tex_path), type='IMAGE') textures_dic[tex_index].image = bpy.data.images[os.path.basename(tex_path)] # Use Alpha textures_dic[tex_index].image.alpha_mode = 'PREMUL' except: NG_tex_list.append(tex_data.Path) # print NG_tex_list if len(NG_tex_list): bpy.ops.b2pmxe.message('INVOKE_DEFAULT', type='INFO', line1="Some Texture file not found.", use_console=True) for data in NG_tex_list: print(" --> %s" % data) mesh.update() # Add Material mat_status = [] for (mat_index, mat_data) in enumerate(pmx_data.Materials): blender_mat_name = Get_JP_or_EN_Name(mat_data.Name, mat_data.Name_E, use_japanese_name) temp_mattrial = bpy.data.materials.new(blender_mat_name) temp_mattrial.use_nodes = True temp_principled = PrincipledBSDFWrapper(temp_mattrial, is_readonly=False) temp_principled.base_color = mat_data.Deffuse.xyz temp_principled.alpha = mat_data.Deffuse.w mat_status.append((len(mat_status), mat_data.FaceLength)) mesh.materials.append(temp_mattrial) # Flags # self.Both = 0 # self.GroundShadow = 1 # self.DropShadow = 1 # self.OnShadow = 1 # self.OnEdge = 1 # # Edge # self.EdgeColor = mathutils.Vector((0,0,0,1)) # self.EdgeSize = 1.0 # Texture if mat_data.TextureIndex != -1: temp_tex = textures_dic[mat_data.TextureIndex] temp_principled.base_color_texture.image = temp_tex.image temp_principled.base_color_texture.use_alpha = True temp_principled.base_color_texture.texcoords = "UV" mesh.update() # Set Material & UV # Set UV Layer if mesh.uv_layers.active_index < 0: mesh.uv_layers.new(name="UV_Data") mesh.uv_layers.active_index = 0 uv_data = mesh.uv_layers.active.data[:] # uvtex = mesh.uv_textures.new("UV_Data") # uv_data = uvtex.data index = 0 for dat in mat_status: for i in range(dat[1] // 3): # Set Material mesh.polygons[index].material_index = dat[0] # Set Texture # if pmx_data.Materials[dat[0]].TextureIndex < len(bpy.data.images) and pmx_data.Materials[dat[0]].TextureIndex >= 0: # if textures_dic.get(pmx_data.Materials[dat[0]].TextureIndex, None) is not None: # mesh.uv_layers[0].data[index].image = textures_dic[pmx_data.Materials[dat[0]].TextureIndex].image # Set UV poly_vert_index = mesh.polygons[index].loop_start uv_data[poly_vert_index + 0].uv = pmx_data.Vertices[mesh.polygons[index].vertices[0]].UV uv_data[poly_vert_index + 1].uv = pmx_data.Vertices[mesh.polygons[index].vertices[1]].UV uv_data[poly_vert_index + 2].uv = pmx_data.Vertices[mesh.polygons[index].vertices[2]].UV # Inv UV V uv_data[poly_vert_index + 0].uv[1] = 1 - uv_data[poly_vert_index + 0].uv[1] uv_data[poly_vert_index + 1].uv[1] = 1 - uv_data[poly_vert_index + 1].uv[1] uv_data[poly_vert_index + 2].uv[1] = 1 - uv_data[poly_vert_index + 2].uv[1] # TwoSide 2.6 not use? # todo set parameter # uv_data[index].use_twoside = True index = index + 1 mesh.update() # Add Shape Key if len(pmx_data.Morphs) > 0: # Add Basis key if mesh.shape_keys is None: obj_mesh.shape_key_add(name="Basis", from_mix=False) mesh.update() for data in pmx_data.Morphs: # Vertex Morph if data.Type == 1: blender_morph_name = Get_JP_or_EN_Name(data.Name, data.Name_E, use_japanese_name) temp_key = obj_mesh.shape_key_add(name=blender_morph_name, from_mix=False) for v in data.Offsets: temp_key.data[v.Index].co += GT(v.Move, GlobalMatrix) mesh.update() # To activate "Basis" shape obj_mesh.active_shape_key_index = 0 bpy.context.view_layer.update() GV.SetVertCount(len(pmx_data.Vertices)) GV.PrintTime(filepath, type='import') return
def process_next_chunk(context, file, previous_chunk, importedObjects, IMAGE_SEARCH): from bpy_extras.image_utils import load_image #print previous_chunk.bytes_read, 'BYTES READ' contextObName = None contextLamp = [None, None] # object, Data contextMaterial = None contextMaterialWrapper = None contextMatrix_rot = None # Blender.mathutils.Matrix(); contextMatrix.identity() #contextMatrix_tx = None # Blender.mathutils.Matrix(); contextMatrix.identity() contextMesh_vertls = None # flat array: (verts * 3) contextMesh_facels = None contextMeshMaterials = [] # (matname, [face_idxs]) contextMeshUV = None # flat array (verts * 2) TEXTURE_DICT = {} MATDICT = {} # TEXMODE = Mesh.FaceModes['TEX'] # Localspace variable names, faster. STRUCT_SIZE_FLOAT = struct.calcsize('f') STRUCT_SIZE_2FLOAT = struct.calcsize('2f') STRUCT_SIZE_3FLOAT = struct.calcsize('3f') STRUCT_SIZE_4FLOAT = struct.calcsize('4f') STRUCT_SIZE_UNSIGNED_SHORT = struct.calcsize('H') STRUCT_SIZE_4UNSIGNED_SHORT = struct.calcsize('4H') STRUCT_SIZE_4x3MAT = struct.calcsize('ffffffffffff') # STRUCT_SIZE_4x3MAT = calcsize('ffffffffffff') # print STRUCT_SIZE_4x3MAT, ' STRUCT_SIZE_4x3MAT' # only init once object_list = [] # for hierarchy object_parent = [] # index of parent in hierarchy, 0xFFFF = no parent pivot_list = [] # pivots with hierarchy handling def putContextMesh(context, myContextMesh_vertls, myContextMesh_facels, myContextMeshMaterials): bmesh = bpy.data.meshes.new(contextObName) if myContextMesh_facels is None: myContextMesh_facels = [] if myContextMesh_vertls: bmesh.vertices.add(len(myContextMesh_vertls) // 3) bmesh.vertices.foreach_set("co", myContextMesh_vertls) nbr_faces = len(myContextMesh_facels) bmesh.polygons.add(nbr_faces) bmesh.loops.add(nbr_faces * 3) eekadoodle_faces = [] for v1, v2, v3 in myContextMesh_facels: eekadoodle_faces.extend((v3, v1, v2) if v3 == 0 else (v1, v2, v3)) bmesh.polygons.foreach_set("loop_start", range(0, nbr_faces * 3, 3)) bmesh.polygons.foreach_set("loop_total", (3, ) * nbr_faces) bmesh.loops.foreach_set("vertex_index", eekadoodle_faces) if bmesh.polygons and contextMeshUV: bmesh.uv_layers.new() uv_faces = bmesh.uv_layers.active.data[:] else: uv_faces = None for mat_idx, (matName, faces) in enumerate(myContextMeshMaterials): if matName is None: bmat = None else: bmat = MATDICT.get(matName) # in rare cases no materials defined. if bmat: img = TEXTURE_DICT.get(bmat.name) else: print(" warning: material %r not defined!" % matName) bmat = MATDICT[matName] = bpy.data.materials.new( matName) img = None bmesh.materials.append(bmat) # can be None if uv_faces and img: for fidx in faces: bmesh.polygons[fidx].material_index = mat_idx # TODO: How to restore this? # uv_faces[fidx].image = img else: for fidx in faces: bmesh.polygons[fidx].material_index = mat_idx if uv_faces: uvl = bmesh.uv_layers.active.data[:] for fidx, pl in enumerate(bmesh.polygons): face = myContextMesh_facels[fidx] v1, v2, v3 = face # eekadoodle if v3 == 0: v1, v2, v3 = v3, v1, v2 uvl[pl.loop_start].uv = contextMeshUV[v1 * 2:(v1 * 2) + 2] uvl[pl.loop_start + 1].uv = contextMeshUV[v2 * 2:(v2 * 2) + 2] uvl[pl.loop_start + 2].uv = contextMeshUV[v3 * 2:(v3 * 2) + 2] # always a tri bmesh.validate() bmesh.update() ob = bpy.data.objects.new(contextObName, bmesh) object_dictionary[contextObName] = ob context.view_layer.active_layer_collection.collection.objects.link(ob) importedObjects.append(ob) if contextMatrix_rot: ob.matrix_local = contextMatrix_rot object_matrix[ob] = contextMatrix_rot.copy() #a spare chunk new_chunk = Chunk() temp_chunk = Chunk() CreateBlenderObject = False def read_float_color(temp_chunk): temp_data = file.read(STRUCT_SIZE_3FLOAT) temp_chunk.bytes_read += STRUCT_SIZE_3FLOAT return [float(col) for col in struct.unpack('<3f', temp_data)] def read_float(temp_chunk): temp_data = file.read(STRUCT_SIZE_FLOAT) temp_chunk.bytes_read += STRUCT_SIZE_FLOAT return struct.unpack('<f', temp_data)[0] def read_short(temp_chunk): temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) temp_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT return struct.unpack('<H', temp_data)[0] def read_byte_color(temp_chunk): temp_data = file.read(struct.calcsize('3B')) temp_chunk.bytes_read += 3 return [float(col) / 255 for col in struct.unpack('<3B', temp_data) ] # data [0,1,2] == rgb def read_texture(new_chunk, temp_chunk, name, mapto): # new_texture = bpy.data.textures.new(name, type='IMAGE') u_scale, v_scale, u_offset, v_offset = 1.0, 1.0, 0.0, 0.0 mirror = False extension = 'wrap' while (new_chunk.bytes_read < new_chunk.length): #print 'MAT_TEXTURE_MAP..while', new_chunk.bytes_read, new_chunk.length read_chunk(file, temp_chunk) if temp_chunk.ID == MAT_MAP_FILEPATH: texture_name, read_str_len = read_string(file) img = TEXTURE_DICT[contextMaterial.name] = load_image( texture_name, dirname, recursive=IMAGE_SEARCH) temp_chunk.bytes_read += read_str_len # plus one for the null character that gets removed elif temp_chunk.ID == MAT_MAP_USCALE: u_scale = read_float(temp_chunk) elif temp_chunk.ID == MAT_MAP_VSCALE: v_scale = read_float(temp_chunk) elif temp_chunk.ID == MAT_MAP_UOFFSET: u_offset = read_float(temp_chunk) elif temp_chunk.ID == MAT_MAP_VOFFSET: v_offset = read_float(temp_chunk) elif temp_chunk.ID == MAT_MAP_TILING: tiling = read_short(temp_chunk) if tiling & 0x2: extension = 'mirror' elif tiling & 0x10: extension = 'decal' elif temp_chunk.ID == MAT_MAP_ANG: print("\nwarning: ignoring UV rotation") skip_to_end(file, temp_chunk) new_chunk.bytes_read += temp_chunk.bytes_read # add the map to the material in the right channel if img: add_texture_to_material(img, (u_scale, v_scale, 1), (u_offset, v_offset, 0), extension, contextMaterialWrapper, mapto) dirname = os.path.dirname(file.name) #loop through all the data for this chunk (previous chunk) and see what it is while (previous_chunk.bytes_read < previous_chunk.length): #print '\t', previous_chunk.bytes_read, 'keep going' #read the next chunk #print 'reading a chunk' read_chunk(file, new_chunk) #is it a Version chunk? if new_chunk.ID == VERSION: #print 'if new_chunk.ID == VERSION:' #print 'found a VERSION chunk' #read in the version of the file #it's an unsigned short (H) temp_data = file.read(struct.calcsize('I')) version = struct.unpack('<I', temp_data)[0] new_chunk.bytes_read += 4 # read the 4 bytes for the version number #this loader works with version 3 and below, but may not with 4 and above if version > 3: print( '\tNon-Fatal Error: Version greater than 3, may not load correctly: ', version) #is it an object info chunk? elif new_chunk.ID == OBJECTINFO: #print 'elif new_chunk.ID == OBJECTINFO:' # print 'found an OBJECTINFO chunk' process_next_chunk(context, file, new_chunk, importedObjects, IMAGE_SEARCH) #keep track of how much we read in the main chunk new_chunk.bytes_read += temp_chunk.bytes_read #is it an object chunk? elif new_chunk.ID == OBJECT: if CreateBlenderObject: putContextMesh(context, contextMesh_vertls, contextMesh_facels, contextMeshMaterials) contextMesh_vertls = [] contextMesh_facels = [] ## preparando para receber o proximo objeto contextMeshMaterials = [] # matname:[face_idxs] contextMeshUV = None # Reset matrix contextMatrix_rot = None #contextMatrix_tx = None CreateBlenderObject = True contextObName, read_str_len = read_string(file) new_chunk.bytes_read += read_str_len #is it a material chunk? elif new_chunk.ID == MATERIAL: # print("read material") #print 'elif new_chunk.ID == MATERIAL:' contextMaterial = bpy.data.materials.new('Material') contextMaterialWrapper = PrincipledBSDFWrapper(contextMaterial, is_readonly=False, use_nodes=True) elif new_chunk.ID == MAT_NAME: #print 'elif new_chunk.ID == MAT_NAME:' material_name, read_str_len = read_string(file) # print("material name", material_name) #plus one for the null character that ended the string new_chunk.bytes_read += read_str_len contextMaterial.name = material_name.rstrip( ) # remove trailing whitespace MATDICT[material_name] = contextMaterial elif new_chunk.ID == MAT_AMBIENT: #print 'elif new_chunk.ID == MAT_AMBIENT:' read_chunk(file, temp_chunk) # TODO: consider ambient term somehow. maybe add to color # if temp_chunk.ID == MAT_FLOAT_COLOR: # contextMaterial.mirror_color = read_float_color(temp_chunk) # temp_data = file.read(struct.calcsize('3f')) # temp_chunk.bytes_read += 12 # contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)] # elif temp_chunk.ID == MAT_24BIT_COLOR: # contextMaterial.mirror_color = read_byte_color(temp_chunk) # temp_data = file.read(struct.calcsize('3B')) # temp_chunk.bytes_read += 3 # contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb # else: skip_to_end(file, temp_chunk) new_chunk.bytes_read += temp_chunk.bytes_read elif new_chunk.ID == MAT_DIFFUSE: #print 'elif new_chunk.ID == MAT_DIFFUSE:' read_chunk(file, temp_chunk) if temp_chunk.ID == MAT_FLOAT_COLOR: contextMaterialWrapper.base_color = read_float_color( temp_chunk) # temp_data = file.read(struct.calcsize('3f')) # temp_chunk.bytes_read += 12 # contextMaterial.rgbCol = [float(col) for col in struct.unpack('<3f', temp_data)] elif temp_chunk.ID == MAT_24BIT_COLOR: contextMaterialWrapper.base_color = read_byte_color(temp_chunk) # temp_data = file.read(struct.calcsize('3B')) # temp_chunk.bytes_read += 3 # contextMaterial.rgbCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb else: skip_to_end(file, temp_chunk) # print("read material diffuse color", contextMaterial.diffuse_color) new_chunk.bytes_read += temp_chunk.bytes_read elif new_chunk.ID == MAT_SPECULAR: #print 'elif new_chunk.ID == MAT_SPECULAR:' read_chunk(file, temp_chunk) # TODO: consider using specular term somehow # if temp_chunk.ID == MAT_FLOAT_COLOR: # contextMaterial.specular_color = read_float_color(temp_chunk) # temp_data = file.read(struct.calcsize('3f')) # temp_chunk.bytes_read += 12 # contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)] # elif temp_chunk.ID == MAT_24BIT_COLOR: # contextMaterial.specular_color = read_byte_color(temp_chunk) # temp_data = file.read(struct.calcsize('3B')) # temp_chunk.bytes_read += 3 # contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb # else: skip_to_end(file, temp_chunk) new_chunk.bytes_read += temp_chunk.bytes_read elif new_chunk.ID == MAT_TEXTURE_MAP: read_texture(new_chunk, temp_chunk, "Diffuse", "COLOR") elif new_chunk.ID == MAT_SPECULAR_MAP: read_texture(new_chunk, temp_chunk, "Specular", "SPECULARITY") elif new_chunk.ID == MAT_OPACITY_MAP: read_texture(new_chunk, temp_chunk, "Opacity", "ALPHA") elif new_chunk.ID == MAT_BUMP_MAP: read_texture(new_chunk, temp_chunk, "Bump", "NORMAL") elif new_chunk.ID == MAT_TRANSPARENCY: #print 'elif new_chunk.ID == MAT_TRANSPARENCY:' read_chunk(file, temp_chunk) if temp_chunk.ID == PERCENTAGE_SHORT: temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) temp_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT contextMaterialWrapper.alpha = 1 - ( float(struct.unpack('<H', temp_data)[0]) / 100) elif temp_chunk.ID == PERCENTAGE_FLOAT: temp_data = file.read(STRUCT_SIZE_FLOAT) temp_chunk.bytes_read += STRUCT_SIZE_FLOAT contextMaterialWrapper.alpha = 1 - float( struct.unpack('f', temp_data)[0]) else: print("Cannot read material transparency") new_chunk.bytes_read += temp_chunk.bytes_read elif new_chunk.ID == OBJECT_LIGHT: # Basic lamp support. temp_data = file.read(STRUCT_SIZE_3FLOAT) x, y, z = struct.unpack('<3f', temp_data) new_chunk.bytes_read += STRUCT_SIZE_3FLOAT # no lamp in dict that would be confusing contextLamp[1] = bpy.data.lights.new("Lamp", 'POINT') contextLamp[0] = ob = bpy.data.objects.new("Lamp", contextLamp[1]) context.view_layer.active_layer_collection.collection.objects.link( ob) importedObjects.append(contextLamp[0]) #print 'number of faces: ', num_faces #print x,y,z contextLamp[0].location = x, y, z # Reset matrix contextMatrix_rot = None #contextMatrix_tx = None #print contextLamp.name, elif new_chunk.ID == OBJECT_MESH: # print 'Found an OBJECT_MESH chunk' pass elif new_chunk.ID == OBJECT_VERTICES: """ Worldspace vertex locations """ # print 'elif new_chunk.ID == OBJECT_VERTICES:' temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) num_verts = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += 2 # print 'number of verts: ', num_verts contextMesh_vertls = struct.unpack( '<%df' % (num_verts * 3), file.read(STRUCT_SIZE_3FLOAT * num_verts)) new_chunk.bytes_read += STRUCT_SIZE_3FLOAT * num_verts # dummyvert is not used atm! #print 'object verts: bytes read: ', new_chunk.bytes_read elif new_chunk.ID == OBJECT_FACES: # print 'elif new_chunk.ID == OBJECT_FACES:' temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) num_faces = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += 2 #print 'number of faces: ', num_faces # print '\ngetting a face' temp_data = file.read(STRUCT_SIZE_4UNSIGNED_SHORT * num_faces) new_chunk.bytes_read += STRUCT_SIZE_4UNSIGNED_SHORT * num_faces # 4 short ints x 2 bytes each contextMesh_facels = struct.unpack('<%dH' % (num_faces * 4), temp_data) contextMesh_facels = [ contextMesh_facels[i - 3:i] for i in range(3, (num_faces * 4) + 3, 4) ] elif new_chunk.ID == OBJECT_MATERIAL: # print 'elif new_chunk.ID == OBJECT_MATERIAL:' material_name, read_str_len = read_string(file) new_chunk.bytes_read += read_str_len # remove 1 null character. temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) num_faces_using_mat = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * num_faces_using_mat) new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * num_faces_using_mat temp_data = struct.unpack("<%dH" % (num_faces_using_mat), temp_data) contextMeshMaterials.append((material_name, temp_data)) #look up the material in all the materials elif new_chunk.ID == OBJECT_UV: temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) num_uv = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += 2 temp_data = file.read(STRUCT_SIZE_2FLOAT * num_uv) new_chunk.bytes_read += STRUCT_SIZE_2FLOAT * num_uv contextMeshUV = struct.unpack('<%df' % (num_uv * 2), temp_data) elif new_chunk.ID == OBJECT_TRANS_MATRIX: # How do we know the matrix size? 54 == 4x4 48 == 4x3 temp_data = file.read(STRUCT_SIZE_4x3MAT) data = list(struct.unpack('<ffffffffffff', temp_data)) new_chunk.bytes_read += STRUCT_SIZE_4x3MAT contextMatrix_rot = mathutils.Matrix(( data[:3] + [0], data[3:6] + [0], data[6:9] + [0], data[9:] + [1], )).transposed() elif (new_chunk.ID == MAT_MAP_FILEPATH): texture_name, read_str_len = read_string(file) if contextMaterial.name not in TEXTURE_DICT: TEXTURE_DICT[contextMaterial.name] = load_image( texture_name, dirname, place_holder=False, recursive=IMAGE_SEARCH) new_chunk.bytes_read += read_str_len # plus one for the null character that gets removed elif new_chunk.ID == EDITKEYFRAME: pass # including these here means their EK_OB_NODE_HEADER are scanned elif new_chunk.ID in { ED_KEY_AMBIENT_NODE, ED_KEY_OBJECT_NODE, ED_KEY_CAMERA_NODE, ED_KEY_TARGET_NODE, ED_KEY_LIGHT_NODE, ED_KEY_L_TARGET_NODE, ED_KEY_SPOTLIGHT_NODE }: # another object is being processed child = None elif new_chunk.ID == EK_OB_NODE_HEADER: object_name, read_str_len = read_string(file) new_chunk.bytes_read += read_str_len temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2) new_chunk.bytes_read += 4 temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) hierarchy = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += 2 child = object_dictionary.get(object_name) if child is None: child = bpy.data.objects.new(object_name, None) # create an empty object context.view_layer.active_layer_collection.collection.objects.link( child) importedObjects.append(child) object_list.append(child) object_parent.append(hierarchy) pivot_list.append(mathutils.Vector((0.0, 0.0, 0.0))) elif new_chunk.ID == EK_OB_INSTANCE_NAME: object_name, read_str_len = read_string(file) # child.name = object_name child.name += "." + object_name object_dictionary[object_name] = child new_chunk.bytes_read += read_str_len # print("new instance object:", object_name) elif new_chunk.ID == EK_OB_PIVOT: # translation temp_data = file.read(STRUCT_SIZE_3FLOAT) pivot = struct.unpack('<3f', temp_data) new_chunk.bytes_read += STRUCT_SIZE_3FLOAT pivot_list[len(pivot_list) - 1] = mathutils.Vector(pivot) elif new_chunk.ID == EK_OB_POSITION_TRACK: # translation new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5 temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5) temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) nkeys = struct.unpack('<H', temp_data)[0] temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2 for i in range(nkeys): temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) nframe = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2) new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2 temp_data = file.read(STRUCT_SIZE_3FLOAT) loc = struct.unpack('<3f', temp_data) new_chunk.bytes_read += STRUCT_SIZE_3FLOAT if nframe == 0: child.location = loc elif new_chunk.ID == EK_OB_ROTATION_TRACK: # rotation new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5 temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5) temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) nkeys = struct.unpack('<H', temp_data)[0] temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2 for i in range(nkeys): temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) nframe = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2) new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2 temp_data = file.read(STRUCT_SIZE_4FLOAT) rad, axis_x, axis_y, axis_z = struct.unpack("<4f", temp_data) new_chunk.bytes_read += STRUCT_SIZE_4FLOAT if nframe == 0: child.rotation_euler = mathutils.Quaternion( (axis_x, axis_y, axis_z), -rad).to_euler() # why negative? elif new_chunk.ID == EK_OB_SCALE_TRACK: # translation new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5 temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5) temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) nkeys = struct.unpack('<H', temp_data)[0] temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2 for i in range(nkeys): temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) nframe = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2) new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2 temp_data = file.read(STRUCT_SIZE_3FLOAT) sca = struct.unpack('<3f', temp_data) new_chunk.bytes_read += STRUCT_SIZE_3FLOAT if nframe == 0: child.scale = sca else: # (new_chunk.ID!=VERSION or new_chunk.ID!=OBJECTINFO or new_chunk.ID!=OBJECT or new_chunk.ID!=MATERIAL): # print 'skipping to end of this chunk' #print("unknown chunk: "+hex(new_chunk.ID)) buffer_size = new_chunk.length - new_chunk.bytes_read binary_format = "%ic" % buffer_size temp_data = file.read(struct.calcsize(binary_format)) new_chunk.bytes_read += buffer_size #update the previous chunk bytes read # print 'previous_chunk.bytes_read += new_chunk.bytes_read' # print previous_chunk.bytes_read, new_chunk.bytes_read previous_chunk.bytes_read += new_chunk.bytes_read ## print 'Bytes left in this chunk: ', previous_chunk.length - previous_chunk.bytes_read # FINISHED LOOP # There will be a number of objects still not added if CreateBlenderObject: putContextMesh(context, contextMesh_vertls, contextMesh_facels, contextMeshMaterials) # Assign parents to objects # check _if_ we need to assign first because doing so recalcs the depsgraph for ind, ob in enumerate(object_list): parent = object_parent[ind] if parent == ROOT_OBJECT: if ob.parent is not None: ob.parent = None else: if ob.parent != object_list[parent]: if ob == object_list[parent]: print(' warning: Cannot assign self to parent ', ob) else: ob.parent = object_list[parent] # pivot_list[ind] += pivot_list[parent] # XXX, not sure this is correct, should parent space matrix be applied before combining? # fix pivots for ind, ob in enumerate(object_list): if ob.type == 'MESH': pivot = pivot_list[ind] pivot_matrix = object_matrix.get( ob, mathutils.Matrix()) # unlikely to fail pivot_matrix = mathutils.Matrix.Translation( pivot_matrix.to_3x3() @ -pivot) ob.data.transform(pivot_matrix)