def init_cube_material(x, y): orig = bpy.data.materials["mat1"] m = orig.copy() m.name = "mat1_%d_%d" % (x, y) p = PrincipledBSDFWrapper(m, is_readonly=False) p.base_color = colors[0] return m
def initMaterial(self): """Create the empty material at the core Principled Node""" self.material = bpy.data.materials.new(name=self.name) self.material.use_nodes = True principled_mat = PrincipledBSDFWrapper(self.material, is_readonly=False) principled_mat.roughness = 1.0 self.principled_node = principled_mat.node_principled_bsdf self.mat_output = principled_mat.node_out
def create_vert_material(mesh, vertMat): mat = bpy.data.materials.new(mesh.header.meshName + "." + vertMat.vmName) mat.use_nodes = True #mat.blend_method = 'BLEND' principled = PrincipledBSDFWrapper(mat, is_readonly=False) principled.base_color = rgb_to_vector(vertMat.vmInfo.diffuse) principled.alpha = vertMat.vmInfo.opacity mat["Shininess"] = vertMat.vmInfo.shininess mat["Specular"] = rgb_to_vector(vertMat.vmInfo.specular) mat["Emission"] = rgb_to_vector(vertMat.vmInfo.emissive) mat["Diffuse"] = rgb_to_vector(vertMat.vmInfo.diffuse) mat["Translucency"] = vertMat.vmInfo.translucency return mat
def create_new_bmat(self, bmat_name, rgba, func_data): bmat = bpy.data.materials.new(name=bmat_name) bmat.use_nodes = True # link bmat to PrincipledBSDFWrapper principled = PrincipledBSDFWrapper(bmat, is_readonly=False) principled.base_color = rgba[:3] # check for alpha if rgba[3] < 1.0: bmat.diffuse_color = rgba principled.alpha = rgba[3] bmat.blend_method = "BLEND" if self.config["sharemats"]: func_data["matdatabase"][rgba] = bmat return bmat
def createMaterial(self): mat = bpy.data.materials.new(name=self.name) mat.use_nodes = True nodes = mat.node_tree.nodes links = mat.node_tree.links principled_mat = PrincipledBSDFWrapper(mat, is_readonly=False) principled = principled_mat.node_principled_bsdf back_principled = None mat_output = principled_mat.node_out principled_mat.roughness = 1.0 for map_name, img in self.maps.items(): if img is None or map_name.split("_")[0] not in __class__.input_tr: continue current_principled = principled if map_name.endswith("_back"): if back_principled is None: # Create backface principled and connect it back_principled = nodes.new(type="ShaderNodeBsdfPrincipled") geometry_node = nodes.new(type="ShaderNodeNewGeometry") mix_node = nodes.new(type="ShaderNodeMixShader") back_principled.inputs["Roughness"].default_value = 1.0 links.new(geometry_node.outputs["Backfacing"], mix_node.inputs[0]) links.new(principled.outputs[0], mix_node.inputs[1]) links.new(back_principled.outputs[0], mix_node.inputs[2]) links.new(mix_node.outputs[0], mat_output.inputs[0]) current_principled = back_principled map_name = map_name[:-5] # remove "_back" texture_node = nodes.new(type="ShaderNodeTexImage") texture_node.image = getCyclesImage(img) texture_node.image.colorspace_settings.name = "sRGB" if map_name == "baseColor" else "Non-Color" if hasattr(texture_node, "color_space"): texture_node.color_space = "COLOR" if map_name == "baseColor" else "NONE" elif map_name == "normal": normal_node = nodes.new(type="ShaderNodeNormalMap") links.new(texture_node.outputs["Color"], normal_node.inputs["Color"]) links.new(normal_node.outputs["Normal"], current_principled.inputs["Normal"]) else: links.new(texture_node.outputs["Color"], current_principled.inputs[__class__.input_tr[map_name]]) if map_name == "opacity": mat.blend_method = 'BLEND' autoAlignNodes(mat_output) return mat
def ensure_cube(cubesize=8): cube = bpy.context.scene.objects.get('Cube') if cube is None: bpy.ops.mesh.primitive_cube_add() cube = bpy.context.active_object cube.location = (0, 0, -cubesize) cube.scale = (cubesize, cubesize, cubesize) cube.color = (0, 0, 0, 1) cube_material = bpy.data.materials.get('CubeMaterial') if (cube_material is None): cube_material = bpy.data.materials.new('CubeMaterial') cube_material.use_nodes = True principled = PrincipledBSDFWrapper(cube_material, is_readonly=False) principled.base_color = (rand(), rand(), rand()) cube.data.materials.append(cube_material)
def createMaterial(name, baseColor, removeShader=False): mat = bpy.data.materials.new(name) mat.use_nodes = True principled = PrincipledBSDFWrapper(mat, is_readonly=False) principled.base_color = baseColor # principled.specular_texture.image = load_image("/path/to/image.png") # Export principled = PrincipledBSDFWrapper(mat, is_readonly=True) base_color = principled.base_color specular_texture = principled.specular_texture if specular_texture and specular_texture.image: specular_texture_filepath = principled.specular_texture.image.filepath if removeShader: #Get the node in its node tree (replace the name below) node_to_delete = mat.node_tree.nodes['Principled BSDF'] #Remove it mat.node_tree.nodes.remove(node_to_delete)
def __init__(self, material): self.material = material self.principled = None if bpy.app.version >= (2, 80, 0): from bpy_extras.node_shader_utils import PrincipledBSDFWrapper self.principled = PrincipledBSDFWrapper(self.material, is_readonly=False)
def ensure_paper(paperGen, paper_resolution=20): obj = bpy.context.scene.objects.get('Paper') if obj is not None: bpy.ops.object.select_all(action='DESELECT') obj.select_set(True) bpy.ops.object.delete() minHeight, paper_mesh = paper.generate(paper_resolution, paper_resolution, paperGen) paper_mesh.color = (1, 1, 1, 1) paper_material = bpy.data.materials.get('PaperMaterial') if (paper_material is None): paper_material = bpy.data.materials.new('PaperMaterial') paper_material.use_nodes = True principled = PrincipledBSDFWrapper(paper_material, is_readonly=False) principled.base_color = (rand(), rand(), rand()) paper_mesh.data.materials.append(paper_material) return minHeight
def handle_layers(context, model, toplayer, layerids, materials): """ In context read the Rhino layers from model then update the layerids dictionary passed in. Update materials dictionary with materials created for layer color. """ # build lookup table for LayerTable index # from GUID, create collection for each # layer for lid in range(len(model.Layers)): l = model.Layers[lid] lcol = get_iddata(context.blend_data.collections, l.Id, l.Name, None) layerids[str(l.Id)] = (lid, lcol) tag_data(layerids[str(l.Id)][1], l.Id, l.Name) matname = l.Name + "+" + str(l.Id) if not matname in materials: laymat = get_iddata(context.blend_data.materials, l.Id, l.Name, None) #laymat = context.blend_data.materials.new(name=matname) laymat.use_nodes = True r,g,b,a = l.Color principled = PrincipledBSDFWrapper(laymat, is_readonly=False) principled.base_color = (r/255.0, g/255.0, b/255.0) materials[matname] = laymat # second pass so we can link layers to each other for lid in range(len(model.Layers)): l = model.Layers[lid] # link up layers to their parent layers if str(l.ParentLayerId) in layerids: parentlayer = layerids[str(l.ParentLayerId)][1] try: parentlayer.children.link(layerids[str(l.Id)][1]) except Exception: pass # or to the top collection if no parent layer was found else: try: toplayer.children.link(layerids[str(l.Id)][1]) except Exception: pass
def create_shader_materials(self, m, mesh): for material in m.shaderMaterials: mat = bpy.data.materials.new(m.header.meshName + ".ShaderMaterial") mat.use_nodes = True principled = PrincipledBSDFWrapper(mat, is_readonly=False) for prop in material.properties: if (prop.name == "DiffuseTexture"): tex = load_texture(self, prop.value) if tex != None: principled.base_color_texture.image = tex elif (prop.name == "NormalMap"): tex = load_texture(self, prop.value) if tex != None: principled.normalmap_texture.image = tex elif (prop.name == "BumpScale"): principled.normalmap_strength = prop.value # Color type elif prop.type == 5: mat[prop.name] = rgba_to_vector(prop) else: mat[prop.name] = prop.value mesh.materials.append(mat)
def createBubbleMaterial(name, baseColor): mat = bpy.data.materials.new(name) mat.use_nodes = True mat.blend_method = 'BLEND' principled = PrincipledBSDFWrapper(mat, is_readonly=False) principled.base_color = baseColor principled.metallic = 0.5 principled.specular = 0.2 principled.roughness = 0.05 #principled.IOR = 1.1 principled.alpha = 0.3 # principled.specular_texture.image = load_image("/path/to/image.png") # Export principled = PrincipledBSDFWrapper(mat, is_readonly=True) base_color = principled.base_color specular_texture = principled.specular_texture if specular_texture and specular_texture.image: specular_texture_filepath = principled.specular_texture.image.filepath
def generateModels(csv, output, color_type): # open file in read mode with open(csv, 'r') as read_obj: # pass the file object to reader() to get the reader object csv_reader = reader(read_obj) # Iterate over each row in the csv using reader object for row in csv_reader: resident_name = row[0] room_number = row[1] # take name and room number to create text bpy.ops.object.delete() bpy.ops.object.text_add(location=(0, 0, 0), rotation=(0, 0, 0)) obj = bpy.context.object print(resident_name) obj.data.body = resident_name + '\nRoom ' + room_number # extrude obj.data.extrude = 0.2 # Bring the text out of the plane and center it bpy.ops.transform.translate(value=(-1.7, 0, 0.25), orient_type='GLOBAL') # texture/color bpy.ops.object.convert(target="MESH") mat = bpy.data.materials.new("Text") mat.use_nodes = True principled = PrincipledBSDFWrapper(mat, is_readonly=False) principled.base_color = (get_random_color(color_type)) mesh = obj.data if len(mesh.materials) == 0: mesh.materials.append(mat) else: mesh.materials[0] = mat # export as gltf filename = resident_name.lower() + " " + room_number filename = filename.replace(" ", "-") bpy.ops.export_scene.gltf(export_format='GLTF_EMBEDDED', filepath=os.path.join(output, filename))
def handle_materials(context, model, materials, update): """ """ for m in model.Materials: matname = material_name(m) if matname not in materials: blmat = utils.get_iddata(context.blend_data.materials, None, m.Name, None) if update: blmat.use_nodes = True refl = m.Reflectivity transp = m.Transparency ior = m.IndexOfRefraction roughness = m.ReflectionGlossiness transrough = m.RefractionGlossiness spec = m.Shine / 255.0 if m.DiffuseColor == _black and m.Reflectivity > 0.0 and m.Transparency == 0.0: r, g, b, _ = m.ReflectionColor elif m.DiffuseColor == _black and m.Reflectivity == 0.0 and m.Transparency > 0.0: r, g, b, _ = m.TransparentColor refl = 0.0 elif m.DiffuseColor == _black and m.Reflectivity > 0.0 and m.Transparency > 0.0: r, g, b, _ = m.TransparentColor refl = 0.0 else: r, g, b, a = m.DiffuseColor if refl > 0.0 and transp > 0.0: refl = 0.0 principled = PrincipledBSDFWrapper(blmat, is_readonly=False) principled.base_color = (r / 255.0, g / 255.0, b / 255.0) principled.metallic = refl principled.transmission = transp principled.ior = ior principled.roughness = roughness principled.specular = spec principled.node_principled_bsdf.inputs[ 16].default_value = transrough materials[matname] = blmat
def read_pmx_data(context, filepath="", adjust_bone_position=False, bone_transfer=False, ): prefs = context.preferences.addons[GV.FolderName].preferences use_japanese_name = prefs.use_japanese_name use_custom_shape = prefs.use_custom_shape xml_save_versions = prefs.saveVersions GV.SetStartTime() if bpy.ops.object.mode_set.poll(): bpy.ops.object.mode_set(mode='OBJECT') if bpy.ops.object.select_all.poll(): bpy.ops.object.select_all(action='DESELECT') with open(filepath, "rb") as f: from . import pmx pmx_data = pmx.Model() pmx_data.Load(f) if pmx_data.Status.Magic == 0: # Echo("Loading Pmd ") from . import pmd from . import pmd2pmx f.seek(0) d_pmd = pmd.Model() d_pmd.Load(f) pmx_data = pmd2pmx.Convert(d_pmd) scene = context.scene base_path = os.path.dirname(filepath) for ob in scene.objects: ob.select_set(False) tmp_name = Get_JP_or_EN_Name(pmx_data.Name, pmx_data.Name_E, use_japanese_name) arm_dat = bpy.data.armatures.new(tmp_name + "_Arm") arm_obj = bpy.data.objects.new(tmp_name + "_Arm", arm_dat) arm_obj.show_in_front = True arm_dat.display_type = "STICK" bpy.context.collection.objects.link(arm_obj) bpy.context.view_layer.objects.active = arm_obj bpy.context.view_layer.update() # Make XML blender_bone_list = make_xml(pmx_data, filepath, use_japanese_name, xml_save_versions) arm_obj.select_set(True) bone_id = {} # Set Bone Position bone_id = Set_Bone_Position(pmx_data, arm_dat, blender_bone_list) bpy.ops.object.mode_set(mode="POSE", toggle=False) # Set Bone Status for (bone_index, data_bone) in enumerate(pmx_data.Bones): bone_name = blender_bone_list[bone_index] pb = arm_obj.pose.bones.get(bone_name) if pb is None: continue # Find name (True or False) find_master = Search_Master(bone_name) find_eyes = Search_Eyes(bone_name) find_twist_m = Search_Twist_Master(bone_name) find_twist_n = Search_Twist_Num(bone_name) find_auto = Search_Auto_Bone(bone_name) if find_twist_n: pb.lock_rotation = [True, False, True] if data_bone.Rotatable == 0: pb.lock_rotation = [True, True, True] if data_bone.Movable == 0: pb.lock_location = [True, True, True] if data_bone.Operational == 0: pb.lock_rotation = [True, True, True] pb.lock_location = [True, True, True] if data_bone.AdditionalRotation == 1: const = pb.constraints.new('COPY_ROTATION') const.target = arm_obj const.subtarget = blender_bone_list[data_bone.AdditionalBoneIndex] const.target_space = 'LOCAL' const.owner_space = 'LOCAL' const.influence = abs(data_bone.AdditionalPower) if data_bone.AdditionalPower < 0: const.invert_x = True const.invert_y = True const.invert_z = True if data_bone.AdditionalMovement == 1: const = pb.constraints.new('COPY_LOCATION') const.target = arm_obj const.subtarget = blender_bone_list[data_bone.AdditionalBoneIndex] const.target_space = 'LOCAL' const.owner_space = 'LOCAL' const.influence = abs(data_bone.AdditionalPower) if data_bone.AdditionalPower < 0: const.invert_x = True const.invert_y = True const.invert_z = True if data_bone.UseFixedAxis == 1: const = pb.constraints.new('LIMIT_ROTATION') const.use_limit_x = True const.use_limit_z = True const.owner_space = 'LOCAL' pb.lock_rotation = [True, False, True] if data_bone.UseLocalAxis == 0: pass if data_bone.AfterPhysical == 0: pass if data_bone.ExternalBone == 0: pass # Set Custom Shape if use_custom_shape: len_const = len(pb.constraints) if find_master: add_function.set_custom_shape(context, pb, shape=GV.ShapeMaster) elif find_eyes: add_function.set_custom_shape(context, pb, shape=GV.ShapeEyes) elif find_twist_m and len_const: add_function.set_custom_shape(context, pb, shape=GV.ShapeTwist1) elif find_twist_n and len_const: add_function.set_custom_shape(context, pb, shape=GV.ShapeTwist2) elif find_auto and len_const: add_function.set_custom_shape(context, pb, shape=GV.ShapeAuto) # Set IK if data_bone.UseIK != 0: pb["IKLoops"] = data_bone.IK.Loops pb["IKLimit"] = data_bone.IK.Limit if len(data_bone.IK.Member) > 0: ik_name = blender_bone_list[data_bone.IK.Member[0].Index] new_ik = arm_obj.pose.bones[ik_name].constraints.new("IK") new_ik.target = arm_obj new_ik.subtarget = blender_bone_list[bone_index] new_ik.chain_count = len(data_bone.IK.Member) for ik_member in data_bone.IK.Member: if ik_member.UseLimit == 1: member_name = blender_bone_list[ik_member.Index] pose_member = arm_obj.pose.bones[member_name] if ik_member.UpperLimit.x == ik_member.LowerLimit.x: pose_member.lock_ik_x = True else: pose_member.use_ik_limit_x = True pose_member.ik_min_x = ik_member.LowerLimit.x pose_member.ik_max_x = ik_member.UpperLimit.x if ik_member.UpperLimit.y == ik_member.LowerLimit.y: pose_member.lock_ik_y = True else: pose_member.use_ik_limit_y = True pose_member.ik_min_y = ik_member.LowerLimit.y pose_member.ik_max_y = ik_member.UpperLimit.y if ik_member.UpperLimit.z == ik_member.LowerLimit.z: pose_member.lock_ik_z = True else: pose_member.use_ik_limit_z = True pose_member.ik_min_z = ik_member.LowerLimit.z pose_member.ik_max_z = ik_member.UpperLimit.z bpy.ops.object.mode_set(mode="EDIT", toggle=False) # Adjust Bone Position if adjust_bone_position: # Get_Adjust_Data(edit_bones, jp_name, en_name) arm_L, vec_arm_L, axis_arm_L, len_arm_L = Get_Adjust_Data(arm_dat.edit_bones, "腕_L", "arm_L") arm_R, vec_arm_R, axis_arm_R, len_arm_R = Get_Adjust_Data(arm_dat.edit_bones, "腕_R", "arm_R") elb_L, vec_elb_L, axis_elb_L, len_elb_L = Get_Adjust_Data(arm_dat.edit_bones, "ひじ_L", "elbow_L") elb_R, vec_elb_R, axis_elb_R, len_elb_R = Get_Adjust_Data(arm_dat.edit_bones, "ひじ_R", "elbow_R") for eb in arm_dat.edit_bones: # Find name (True or False) find_master = Search_Master(eb.name) find_eyes = Search_Eyes(eb.name) find_twist_m = Search_Twist_Master(eb.name) find_twist_n = Search_Twist_Num(eb.name) find_auto = Search_Auto_Bone(eb.name) find_leg_d = Search_Leg_Dummy(eb.name) # Master if find_master: eb_center = Get_Edit_Bone(arm_dat.edit_bones, "センター", "center") if eb_center is not None: eb.head = [0.0, 0.0, 0.0] eb.tail = eb_center.head # Eyes elif find_eyes: eb_eye = Get_Edit_Bone(arm_dat.edit_bones, "目_L", "eye_L") if eb_eye is not None: eb.head.x = 0.0 eb.head.y = 0.0 eb.head.z = eb.tail.z = eb_eye.head.z * 1.16 eb.tail.x = 0.0 eb.tail.y = -0.25 # Auto Bone (Sub Bone), Leg_D Bone elif find_auto or find_leg_d: pb = arm_obj.pose.bones[eb.name] for const in pb.constraints: if hasattr(const, "subtarget"): eb.use_connect = False for child in eb.children: child.use_connect = False eb_sub = arm_dat.edit_bones[const.subtarget] multi = 0.3 if find_auto else 1.0 axis = (eb_sub.tail - eb_sub.head) * multi eb.head = eb_sub.head eb.tail = eb_sub.head + axis break # Twist elif find_twist_m or find_twist_n: eb.use_connect = False for child in eb.children: child.use_connect = False # Set_Adjust_Data(active, eb, vec, axis, length) if re.search(r'^(\u8155|arm)', eb.name) is not None: if eb.name.endswith("_L") and arm_L is not None: Set_Adjust_Data(eb, arm_L, vec_arm_L, axis_arm_L, len_arm_L) elif eb.name.endswith("_R") and arm_R is not None: Set_Adjust_Data(eb, arm_R, vec_arm_R, axis_arm_R, len_arm_R) else: # "手" or "wrist" if eb.name.endswith("_L") and elb_L is not None: Set_Adjust_Data(eb, elb_L, vec_elb_L, axis_elb_L, len_elb_L) elif eb.name.endswith("_R") and elb_R is not None: Set_Adjust_Data(eb, elb_R, vec_elb_R, axis_elb_R, len_elb_R) # BoneItem Direction bpy.ops.armature.select_all(action='SELECT') bpy.ops.b2pmxe.calculate_roll() bpy.ops.armature.select_all(action='DESELECT') bpy.ops.object.mode_set(mode='OBJECT') # Create Mash mesh = bpy.data.meshes.new(tmp_name) obj_mesh = bpy.data.objects.new(mesh.name, mesh) bpy.context.collection.objects.link(obj_mesh) # Link Parent mod = obj_mesh.modifiers.new('RigModif', 'ARMATURE') mod.object = arm_obj mod.use_bone_envelopes = False mod.use_vertex_groups = True # Add Vertex Group vert_group = {} vert_group_index = {} for bone_index, bone_data in enumerate(pmx_data.Bones): bone_name = blender_bone_list[bone_index] target_name = arm_dat.bones[bone_id[bone_name]].name vert_group_index[bone_index] = target_name if target_name not in vert_group.keys(): vert_group[target_name] = obj_mesh.vertex_groups.new(name=target_name) mesh.update() # Add Vertex mesh.vertices.add(len(pmx_data.Vertices)) for vert_index, vert_data in enumerate(pmx_data.Vertices): mesh.vertices[vert_index].co = GT(vert_data.Position, GlobalMatrix) mesh.vertices[vert_index].normal = GT_normal(vert_data.Normal, GlobalMatrix) # mesh.vertices[vert_index].uv = pmx_data.Vertices[vert_index].UV # BDEF1 if vert_data.Type == 0: vert_group[vert_group_index[vert_data.Bones[0]]].add([vert_index], 1.0, 'REPLACE') # BDEF2 elif vert_data.Type == 1: vert_group[vert_group_index[vert_data.Bones[0]]].add([vert_index], vert_data.Weights[0], 'ADD') vert_group[vert_group_index[vert_data.Bones[1]]].add([vert_index], 1.0 - vert_data.Weights[0], 'ADD') # BDEF4 elif vert_data.Type == 2: vert_group[vert_group_index[vert_data.Bones[0]]].add([vert_index], vert_data.Weights[0], 'ADD') vert_group[vert_group_index[vert_data.Bones[1]]].add([vert_index], vert_data.Weights[1], 'ADD') vert_group[vert_group_index[vert_data.Bones[2]]].add([vert_index], vert_data.Weights[2], 'ADD') vert_group[vert_group_index[vert_data.Bones[3]]].add([vert_index], vert_data.Weights[3], 'ADD') # SDEF elif vert_data.Type == 3: vert_group[vert_group_index[vert_data.Bones[0]]].add([vert_index], vert_data.Weights[0], 'ADD') vert_group[vert_group_index[vert_data.Bones[1]]].add([vert_index], 1.0 - vert_data.Weights[0], 'ADD') # Todo? SDEF # QDEF elif vert_data.Type == 4: vert_group[vert_group_index[vert_data.Bones[0]]].add([vert_index], vert_data.Weights[0], 'ADD') vert_group[vert_group_index[vert_data.Bones[1]]].add([vert_index], vert_data.Weights[1], 'ADD') vert_group[vert_group_index[vert_data.Bones[2]]].add([vert_index], vert_data.Weights[2], 'ADD') vert_group[vert_group_index[vert_data.Bones[3]]].add([vert_index], vert_data.Weights[3], 'ADD') # Todo? QDEF mesh.update() # Add Face poly_count = len(pmx_data.Faces) // 3 mesh.polygons.add(poly_count) mesh.polygons.foreach_set("loop_start", range(0, poly_count * 3, 3)) mesh.polygons.foreach_set("loop_total", (3,) * poly_count) mesh.polygons.foreach_set("use_smooth", (True,) * poly_count) mesh.loops.add(len(pmx_data.Faces)) # mesh.loops.foreach_set("vertex_index" ,pmx_data.Faces) for faceIndex in range(poly_count): mesh.loops[faceIndex * 3].vertex_index = pmx_data.Faces[faceIndex * 3] mesh.loops[faceIndex * 3 + 1].vertex_index = pmx_data.Faces[faceIndex * 3 + 2] mesh.loops[faceIndex * 3 + 2].vertex_index = pmx_data.Faces[faceIndex * 3 + 1] mesh.update() if bone_transfer: context.view_layer.update() return arm_obj, obj_mesh # Add Textures # image_dic = {} textures_dic = {} NG_tex_list = [] for (tex_index, tex_data) in enumerate(pmx_data.Textures): tex_path = os.path.join(base_path, tex_data.Path) try: bpy.ops.image.open(filepath=tex_path) # image_dic[tex_index] = bpy.data.images[len(bpy.data.images)-1] textures_dic[tex_index] = bpy.data.textures.new(os.path.basename(tex_path), type='IMAGE') textures_dic[tex_index].image = bpy.data.images[os.path.basename(tex_path)] # Use Alpha textures_dic[tex_index].image.alpha_mode = 'PREMUL' except: NG_tex_list.append(tex_data.Path) # print NG_tex_list if len(NG_tex_list): bpy.ops.b2pmxe.message('INVOKE_DEFAULT', type='INFO', line1="Some Texture file not found.", use_console=True) for data in NG_tex_list: print(" --> %s" % data) mesh.update() # Add Material mat_status = [] for (mat_index, mat_data) in enumerate(pmx_data.Materials): blender_mat_name = Get_JP_or_EN_Name(mat_data.Name, mat_data.Name_E, use_japanese_name) temp_mattrial = bpy.data.materials.new(blender_mat_name) temp_mattrial.use_nodes = True temp_principled = PrincipledBSDFWrapper(temp_mattrial, is_readonly=False) temp_principled.base_color = mat_data.Deffuse.xyz temp_principled.alpha = mat_data.Deffuse.w mat_status.append((len(mat_status), mat_data.FaceLength)) mesh.materials.append(temp_mattrial) # Flags # self.Both = 0 # self.GroundShadow = 1 # self.DropShadow = 1 # self.OnShadow = 1 # self.OnEdge = 1 # # Edge # self.EdgeColor = mathutils.Vector((0,0,0,1)) # self.EdgeSize = 1.0 # Texture if mat_data.TextureIndex != -1: temp_tex = textures_dic[mat_data.TextureIndex] temp_principled.base_color_texture.image = temp_tex.image temp_principled.base_color_texture.use_alpha = True temp_principled.base_color_texture.texcoords = "UV" mesh.update() # Set Material & UV # Set UV Layer if mesh.uv_layers.active_index < 0: mesh.uv_layers.new(name="UV_Data") mesh.uv_layers.active_index = 0 uv_data = mesh.uv_layers.active.data[:] # uvtex = mesh.uv_textures.new("UV_Data") # uv_data = uvtex.data index = 0 for dat in mat_status: for i in range(dat[1] // 3): # Set Material mesh.polygons[index].material_index = dat[0] # Set Texture # if pmx_data.Materials[dat[0]].TextureIndex < len(bpy.data.images) and pmx_data.Materials[dat[0]].TextureIndex >= 0: # if textures_dic.get(pmx_data.Materials[dat[0]].TextureIndex, None) is not None: # mesh.uv_layers[0].data[index].image = textures_dic[pmx_data.Materials[dat[0]].TextureIndex].image # Set UV poly_vert_index = mesh.polygons[index].loop_start uv_data[poly_vert_index + 0].uv = pmx_data.Vertices[mesh.polygons[index].vertices[0]].UV uv_data[poly_vert_index + 1].uv = pmx_data.Vertices[mesh.polygons[index].vertices[1]].UV uv_data[poly_vert_index + 2].uv = pmx_data.Vertices[mesh.polygons[index].vertices[2]].UV # Inv UV V uv_data[poly_vert_index + 0].uv[1] = 1 - uv_data[poly_vert_index + 0].uv[1] uv_data[poly_vert_index + 1].uv[1] = 1 - uv_data[poly_vert_index + 1].uv[1] uv_data[poly_vert_index + 2].uv[1] = 1 - uv_data[poly_vert_index + 2].uv[1] # TwoSide 2.6 not use? # todo set parameter # uv_data[index].use_twoside = True index = index + 1 mesh.update() # Add Shape Key if len(pmx_data.Morphs) > 0: # Add Basis key if mesh.shape_keys is None: obj_mesh.shape_key_add(name="Basis", from_mix=False) mesh.update() for data in pmx_data.Morphs: # Vertex Morph if data.Type == 1: blender_morph_name = Get_JP_or_EN_Name(data.Name, data.Name_E, use_japanese_name) temp_key = obj_mesh.shape_key_add(name=blender_morph_name, from_mix=False) for v in data.Offsets: temp_key.data[v.Index].co += GT(v.Move, GlobalMatrix) mesh.update() # To activate "Basis" shape obj_mesh.active_shape_key_index = 0 bpy.context.view_layer.update() GV.SetVertCount(len(pmx_data.Vertices)) GV.PrintTime(filepath, type='import') return
def importWoWOBJ(objectFile, givenParent=None): baseDir, fileName = os.path.split(objectFile) print('Parsing OBJ: ' + fileName) ### OBJ wide material_libs = set() mtlfile = "" verts = [] normals = [] uv = [] meshes = [] ### Per group class OBJMesh: def __init__(self): self.usemtl = "" self.name = "" self.faces = [] curMesh = OBJMesh() meshIndex = -1 with open(objectFile, 'rb') as f: for line in f: line_split = line.split() if not line_split: continue line_start = line_split[0] if line_start == b'mtllib': mtlfile = line_split[1] elif line_start == b'v': verts.append([float(v) for v in line_split[1:]]) elif line_start == b'vn': normals.append([float(v) for v in line_split[1:]]) elif line_start == b'vt': uv.append([float(v) for v in line_split[1:]]) elif line_start == b'f': line_split = line_split[1:] meshes[meshIndex].faces.append( (int(line_split[0].split(b'/')[0]), int(line_split[1].split(b'/')[0]), int(line_split[2].split(b'/')[0]))) elif line_start == b'g': meshIndex += 1 meshes.append(OBJMesh()) meshes[meshIndex].name = line_split[1].decode("utf-8") elif line_start == b'usemtl': meshes[meshIndex].usemtl = line_split[1].decode("utf-8") ## Materials file (.mtl) materials = dict() matname = "" matfile = "" with open(os.path.join(baseDir, mtlfile.decode("utf-8")), 'r') as f: for line in f: line_split = line.split() if not line_split: continue line_start = line_split[0] if line_start == 'newmtl': matname = line_split[1] elif line_start == 'map_Kd': matfile = line_split[1] materials[matname] = os.path.join(baseDir, matfile) if bpy.ops.object.select_all.poll(): bpy.ops.object.select_all(action='DESELECT') # TODO: Better handling for dupes? objname = os.path.basename(objectFile) if objname in bpy.data.objects: objindex = 1 newname = objname while (newname in bpy.data.objects): newname = objname + '.' + str(objindex).rjust(3, '0') objindex += 1 newmesh = bpy.data.meshes.new(objname) obj = bpy.data.objects.new(objname, newmesh) ## Textures # TODO: Must be a better way to do this! materialmapping = dict() for matname, texturelocation in materials.items(): # Import material only once if (matname not in bpy.data.materials): mat = bpy.data.materials.new(name=matname) mat.use_nodes = True mat.blend_method = 'CLIP' principled = PrincipledBSDFWrapper(mat, is_readonly=False) principled.specular = 0.0 principled.base_color_texture.image = load_image(texturelocation) mat.node_tree.links.new( mat.node_tree.nodes['Image Texture'].outputs[1], mat.node_tree.nodes['Principled BSDF'].inputs[18]) obj.data.materials.append(bpy.data.materials[matname]) # TODO: Must be a better way to do this! materialmapping[matname] = len(obj.data.materials) - 1 ## Meshes bm = bmesh.new() i = 0 for v in verts: vert = bm.verts.new(v) vert.normal = normals[i] i = i + 1 bm.verts.ensure_lookup_table() bm.verts.index_update() for mesh in meshes: exampleFaceSet = False for face in mesh.faces: try: ## TODO: Must be a better way to do this, this is already much faster than doing material every face, but still. if exampleFaceSet == False: bm.faces.new((bm.verts[face[0] - 1], bm.verts[face[1] - 1], bm.verts[face[2] - 1])) bm.faces.ensure_lookup_table() bm.faces[-1].material_index = materialmapping[mesh.usemtl] bm.faces[-1].smooth = True exampleFace = bm.faces[-1] exampleFaceSet = True else: ## Use example face if set to speed up material copy! bm.faces.new((bm.verts[face[0] - 1], bm.verts[face[1] - 1], bm.verts[face[2] - 1]), exampleFace) except ValueError: ## TODO: Duplicate faces happen for some reason pass uv_layer = bm.loops.layers.uv.new() for face in bm.faces: for loop in face.loops: loop[uv_layer].uv = uv[loop.vert.index] bm.to_mesh(newmesh) bm.free() ## Rotate object the right way obj.rotation_euler = [0, 0, 0] obj.rotation_euler.x = radians(90) bpy.context.scene.collection.objects.link(obj) obj.select_set(True) ## WoW coordinate system max_size = 51200 / 3 map_size = max_size * 2 adt_size = map_size / 64 ## Import doodads and/or WMOs csvPath = objectFile.replace('.obj', '_ModelPlacementInformation.csv') if os.path.exists(csvPath): with open(csvPath) as csvFile: reader = csv.DictReader(csvFile, delimiter=';') if 'Type' in reader.fieldnames: importType = 'ADT' wmoparent = bpy.data.objects.new("WMOs", None) wmoparent.parent = obj wmoparent.name = "WMOs" wmoparent.rotation_euler = [0, 0, 0] wmoparent.rotation_euler.x = radians(-90) bpy.context.scene.collection.objects.link(wmoparent) doodadparent = bpy.data.objects.new("Doodads", None) doodadparent.parent = obj doodadparent.name = "Doodads" doodadparent.rotation_euler = [0, 0, 0] doodadparent.rotation_euler.x = radians(-90) bpy.context.scene.collection.objects.link(doodadparent) else: importType = 'WMO' if not givenParent: print('WMO import without given parent, creating..') givenParent = bpy.data.objects.new("WMO parent", None) givenParent.parent = obj givenParent.name = "Doodads" givenParent.rotation_euler = [0, 0, 0] givenParent.rotation_euler.x = radians(-90) bpy.context.scene.collection.objects.link(givenParent) for row in reader: if importType == 'ADT': if 'importedModelIDs' in bpy.context.scene: tempModelIDList = bpy.context.scene['importedModelIDs'] else: tempModelIDList = [] if row['ModelId'] in tempModelIDList: print('Skipping already imported model ' + row['ModelId']) continue else: tempModelIDList.append(row['ModelId']) # ADT CSV if row['Type'] == 'wmo': print('ADT WMO import: ' + row['ModelFile']) # Make WMO parent that holds WMO and doodads parent = bpy.data.objects.new( row['ModelFile'] + " parent", None) parent.parent = wmoparent parent.location = (17066 - float(row['PositionX']), (17066 - float(row['PositionZ'])) * -1, float(row['PositionY'])) parent.rotation_euler = [0, 0, 0] parent.rotation_euler.x += radians( float(row['RotationZ'])) parent.rotation_euler.y += radians( float(row['RotationX'])) parent.rotation_euler.z = radians( (-90 + float(row['RotationY']))) if row['ScaleFactor']: parent.scale = (float(row['ScaleFactor']), float(row['ScaleFactor']), float(row['ScaleFactor'])) bpy.context.scene.collection.objects.link(parent) ## Only import OBJ if model is not yet in scene, otherwise copy existing if row['ModelFile'] not in bpy.data.objects: importedFile = importWoWOBJ( os.path.join(baseDir, row['ModelFile']), parent) else: ## Don't copy WMOs with doodads! if os.path.exists( os.path.join( baseDir, row['ModelFile'].replace( '.obj', '_ModelPlacementInformation.csv')) ): importedFile = importWoWOBJ( os.path.join(baseDir, row['ModelFile']), parent) else: originalObject = bpy.data.objects[ row['ModelFile']] importedFile = originalObject.copy() importedFile.data = originalObject.data.copy() bpy.context.scene.collection.objects.link( importedFile) importedFile.parent = parent elif row['Type'] == 'm2': print('ADT M2 import: ' + row['ModelFile']) ## Only import OBJ if model is not yet in scene, otherwise copy existing if row['ModelFile'] not in bpy.data.objects: importedFile = importWoWOBJ( os.path.join(baseDir, row['ModelFile'])) else: originalObject = bpy.data.objects[row['ModelFile']] importedFile = originalObject.copy() importedFile.rotation_euler = [0, 0, 0] importedFile.rotation_euler.x = radians(90) bpy.context.scene.collection.objects.link( importedFile) importedFile.parent = doodadparent importedFile.location.x = (17066 - float(row['PositionX'])) importedFile.location.y = ( 17066 - float(row['PositionZ'])) * -1 importedFile.location.z = float(row['PositionY']) importedFile.rotation_euler.x += radians( float(row['RotationZ'])) importedFile.rotation_euler.y += radians( float(row['RotationX'])) importedFile.rotation_euler.z = radians( 90 + float(row['RotationY'])) if row['ScaleFactor']: importedFile.scale = (float(row['ScaleFactor']), float(row['ScaleFactor']), float(row['ScaleFactor'])) bpy.context.scene['importedModelIDs'] = tempModelIDList else: # WMO CSV print('WMO M2 import: ' + row['ModelFile']) if row['ModelFile'] not in bpy.data.objects: importedFile = importWoWOBJ( os.path.join(baseDir, row['ModelFile'])) else: originalObject = bpy.data.objects[row['ModelFile']] importedFile = originalObject.copy() bpy.context.scene.collection.objects.link(importedFile) importedFile.location = (float(row['PositionX']) * -1, float(row['PositionY']) * -1, float(row['PositionZ'])) importedFile.rotation_euler = [0, 0, 0] rotQuat = Quaternion( (float(row['RotationW']), float(row['RotationX']), float(row['RotationY']), float(row['RotationZ']))) rotEul = rotQuat.to_euler() rotEul.x += radians(90) rotEul.z += radians(180) importedFile.rotation_euler = rotEul importedFile.parent = givenParent or obj if row['ScaleFactor']: importedFile.scale = (float(row['ScaleFactor']), float(row['ScaleFactor']), float(row['ScaleFactor'])) return obj #objectFile = "D:\\models\\world\\maps\\azeroth\\azeroth_32_32.obj" #objectFile = "D:\\models\\world\\maps\\kultiras\\kultiras_32_29.obj" #objectFile = "D:\\models\\world\\wmo\\kultiras\\human\\8hu_kultiras_seabattlement01.obj" #importWoWOBJ(objectFile)
def load_texture_to_mat(self, tex_name, mat): img = load_texture(self, tex_name) principled = PrincipledBSDFWrapper(mat, is_readonly=False) principled.base_color_texture.image = img
def process_next_chunk(context, file, previous_chunk, importedObjects, IMAGE_SEARCH): from bpy_extras.image_utils import load_image #print previous_chunk.bytes_read, 'BYTES READ' contextObName = None contextLamp = [None, None] # object, Data contextMaterial = None contextMaterialWrapper = None contextMatrix_rot = None # Blender.mathutils.Matrix(); contextMatrix.identity() #contextMatrix_tx = None # Blender.mathutils.Matrix(); contextMatrix.identity() contextMesh_vertls = None # flat array: (verts * 3) contextMesh_facels = None contextMeshMaterials = [] # (matname, [face_idxs]) contextMeshUV = None # flat array (verts * 2) TEXTURE_DICT = {} MATDICT = {} # TEXMODE = Mesh.FaceModes['TEX'] # Localspace variable names, faster. STRUCT_SIZE_FLOAT = struct.calcsize('f') STRUCT_SIZE_2FLOAT = struct.calcsize('2f') STRUCT_SIZE_3FLOAT = struct.calcsize('3f') STRUCT_SIZE_4FLOAT = struct.calcsize('4f') STRUCT_SIZE_UNSIGNED_SHORT = struct.calcsize('H') STRUCT_SIZE_4UNSIGNED_SHORT = struct.calcsize('4H') STRUCT_SIZE_4x3MAT = struct.calcsize('ffffffffffff') # STRUCT_SIZE_4x3MAT = calcsize('ffffffffffff') # print STRUCT_SIZE_4x3MAT, ' STRUCT_SIZE_4x3MAT' # only init once object_list = [] # for hierarchy object_parent = [] # index of parent in hierarchy, 0xFFFF = no parent pivot_list = [] # pivots with hierarchy handling def putContextMesh(context, myContextMesh_vertls, myContextMesh_facels, myContextMeshMaterials): bmesh = bpy.data.meshes.new(contextObName) if myContextMesh_facels is None: myContextMesh_facels = [] if myContextMesh_vertls: bmesh.vertices.add(len(myContextMesh_vertls) // 3) bmesh.vertices.foreach_set("co", myContextMesh_vertls) nbr_faces = len(myContextMesh_facels) bmesh.polygons.add(nbr_faces) bmesh.loops.add(nbr_faces * 3) eekadoodle_faces = [] for v1, v2, v3 in myContextMesh_facels: eekadoodle_faces.extend((v3, v1, v2) if v3 == 0 else (v1, v2, v3)) bmesh.polygons.foreach_set("loop_start", range(0, nbr_faces * 3, 3)) bmesh.polygons.foreach_set("loop_total", (3, ) * nbr_faces) bmesh.loops.foreach_set("vertex_index", eekadoodle_faces) if bmesh.polygons and contextMeshUV: bmesh.uv_layers.new() uv_faces = bmesh.uv_layers.active.data[:] else: uv_faces = None for mat_idx, (matName, faces) in enumerate(myContextMeshMaterials): if matName is None: bmat = None else: bmat = MATDICT.get(matName) # in rare cases no materials defined. if bmat: img = TEXTURE_DICT.get(bmat.name) else: print(" warning: material %r not defined!" % matName) bmat = MATDICT[matName] = bpy.data.materials.new( matName) img = None bmesh.materials.append(bmat) # can be None if uv_faces and img: for fidx in faces: bmesh.polygons[fidx].material_index = mat_idx # TODO: How to restore this? # uv_faces[fidx].image = img else: for fidx in faces: bmesh.polygons[fidx].material_index = mat_idx if uv_faces: uvl = bmesh.uv_layers.active.data[:] for fidx, pl in enumerate(bmesh.polygons): face = myContextMesh_facels[fidx] v1, v2, v3 = face # eekadoodle if v3 == 0: v1, v2, v3 = v3, v1, v2 uvl[pl.loop_start].uv = contextMeshUV[v1 * 2:(v1 * 2) + 2] uvl[pl.loop_start + 1].uv = contextMeshUV[v2 * 2:(v2 * 2) + 2] uvl[pl.loop_start + 2].uv = contextMeshUV[v3 * 2:(v3 * 2) + 2] # always a tri bmesh.validate() bmesh.update() ob = bpy.data.objects.new(contextObName, bmesh) object_dictionary[contextObName] = ob context.view_layer.active_layer_collection.collection.objects.link(ob) importedObjects.append(ob) if contextMatrix_rot: ob.matrix_local = contextMatrix_rot object_matrix[ob] = contextMatrix_rot.copy() #a spare chunk new_chunk = Chunk() temp_chunk = Chunk() CreateBlenderObject = False def read_float_color(temp_chunk): temp_data = file.read(STRUCT_SIZE_3FLOAT) temp_chunk.bytes_read += STRUCT_SIZE_3FLOAT return [float(col) for col in struct.unpack('<3f', temp_data)] def read_float(temp_chunk): temp_data = file.read(STRUCT_SIZE_FLOAT) temp_chunk.bytes_read += STRUCT_SIZE_FLOAT return struct.unpack('<f', temp_data)[0] def read_short(temp_chunk): temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) temp_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT return struct.unpack('<H', temp_data)[0] def read_byte_color(temp_chunk): temp_data = file.read(struct.calcsize('3B')) temp_chunk.bytes_read += 3 return [float(col) / 255 for col in struct.unpack('<3B', temp_data) ] # data [0,1,2] == rgb def read_texture(new_chunk, temp_chunk, name, mapto): # new_texture = bpy.data.textures.new(name, type='IMAGE') u_scale, v_scale, u_offset, v_offset = 1.0, 1.0, 0.0, 0.0 mirror = False extension = 'wrap' while (new_chunk.bytes_read < new_chunk.length): #print 'MAT_TEXTURE_MAP..while', new_chunk.bytes_read, new_chunk.length read_chunk(file, temp_chunk) if temp_chunk.ID == MAT_MAP_FILEPATH: texture_name, read_str_len = read_string(file) img = TEXTURE_DICT[contextMaterial.name] = load_image( texture_name, dirname, recursive=IMAGE_SEARCH) temp_chunk.bytes_read += read_str_len # plus one for the null character that gets removed elif temp_chunk.ID == MAT_MAP_USCALE: u_scale = read_float(temp_chunk) elif temp_chunk.ID == MAT_MAP_VSCALE: v_scale = read_float(temp_chunk) elif temp_chunk.ID == MAT_MAP_UOFFSET: u_offset = read_float(temp_chunk) elif temp_chunk.ID == MAT_MAP_VOFFSET: v_offset = read_float(temp_chunk) elif temp_chunk.ID == MAT_MAP_TILING: tiling = read_short(temp_chunk) if tiling & 0x2: extension = 'mirror' elif tiling & 0x10: extension = 'decal' elif temp_chunk.ID == MAT_MAP_ANG: print("\nwarning: ignoring UV rotation") skip_to_end(file, temp_chunk) new_chunk.bytes_read += temp_chunk.bytes_read # add the map to the material in the right channel if img: add_texture_to_material(img, (u_scale, v_scale, 1), (u_offset, v_offset, 0), extension, contextMaterialWrapper, mapto) dirname = os.path.dirname(file.name) #loop through all the data for this chunk (previous chunk) and see what it is while (previous_chunk.bytes_read < previous_chunk.length): #print '\t', previous_chunk.bytes_read, 'keep going' #read the next chunk #print 'reading a chunk' read_chunk(file, new_chunk) #is it a Version chunk? if new_chunk.ID == VERSION: #print 'if new_chunk.ID == VERSION:' #print 'found a VERSION chunk' #read in the version of the file #it's an unsigned short (H) temp_data = file.read(struct.calcsize('I')) version = struct.unpack('<I', temp_data)[0] new_chunk.bytes_read += 4 # read the 4 bytes for the version number #this loader works with version 3 and below, but may not with 4 and above if version > 3: print( '\tNon-Fatal Error: Version greater than 3, may not load correctly: ', version) #is it an object info chunk? elif new_chunk.ID == OBJECTINFO: #print 'elif new_chunk.ID == OBJECTINFO:' # print 'found an OBJECTINFO chunk' process_next_chunk(context, file, new_chunk, importedObjects, IMAGE_SEARCH) #keep track of how much we read in the main chunk new_chunk.bytes_read += temp_chunk.bytes_read #is it an object chunk? elif new_chunk.ID == OBJECT: if CreateBlenderObject: putContextMesh(context, contextMesh_vertls, contextMesh_facels, contextMeshMaterials) contextMesh_vertls = [] contextMesh_facels = [] ## preparando para receber o proximo objeto contextMeshMaterials = [] # matname:[face_idxs] contextMeshUV = None # Reset matrix contextMatrix_rot = None #contextMatrix_tx = None CreateBlenderObject = True contextObName, read_str_len = read_string(file) new_chunk.bytes_read += read_str_len #is it a material chunk? elif new_chunk.ID == MATERIAL: # print("read material") #print 'elif new_chunk.ID == MATERIAL:' contextMaterial = bpy.data.materials.new('Material') contextMaterialWrapper = PrincipledBSDFWrapper(contextMaterial, is_readonly=False, use_nodes=True) elif new_chunk.ID == MAT_NAME: #print 'elif new_chunk.ID == MAT_NAME:' material_name, read_str_len = read_string(file) # print("material name", material_name) #plus one for the null character that ended the string new_chunk.bytes_read += read_str_len contextMaterial.name = material_name.rstrip( ) # remove trailing whitespace MATDICT[material_name] = contextMaterial elif new_chunk.ID == MAT_AMBIENT: #print 'elif new_chunk.ID == MAT_AMBIENT:' read_chunk(file, temp_chunk) # TODO: consider ambient term somehow. maybe add to color # if temp_chunk.ID == MAT_FLOAT_COLOR: # contextMaterial.mirror_color = read_float_color(temp_chunk) # temp_data = file.read(struct.calcsize('3f')) # temp_chunk.bytes_read += 12 # contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)] # elif temp_chunk.ID == MAT_24BIT_COLOR: # contextMaterial.mirror_color = read_byte_color(temp_chunk) # temp_data = file.read(struct.calcsize('3B')) # temp_chunk.bytes_read += 3 # contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb # else: skip_to_end(file, temp_chunk) new_chunk.bytes_read += temp_chunk.bytes_read elif new_chunk.ID == MAT_DIFFUSE: #print 'elif new_chunk.ID == MAT_DIFFUSE:' read_chunk(file, temp_chunk) if temp_chunk.ID == MAT_FLOAT_COLOR: contextMaterialWrapper.base_color = read_float_color( temp_chunk) # temp_data = file.read(struct.calcsize('3f')) # temp_chunk.bytes_read += 12 # contextMaterial.rgbCol = [float(col) for col in struct.unpack('<3f', temp_data)] elif temp_chunk.ID == MAT_24BIT_COLOR: contextMaterialWrapper.base_color = read_byte_color(temp_chunk) # temp_data = file.read(struct.calcsize('3B')) # temp_chunk.bytes_read += 3 # contextMaterial.rgbCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb else: skip_to_end(file, temp_chunk) # print("read material diffuse color", contextMaterial.diffuse_color) new_chunk.bytes_read += temp_chunk.bytes_read elif new_chunk.ID == MAT_SPECULAR: #print 'elif new_chunk.ID == MAT_SPECULAR:' read_chunk(file, temp_chunk) # TODO: consider using specular term somehow # if temp_chunk.ID == MAT_FLOAT_COLOR: # contextMaterial.specular_color = read_float_color(temp_chunk) # temp_data = file.read(struct.calcsize('3f')) # temp_chunk.bytes_read += 12 # contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)] # elif temp_chunk.ID == MAT_24BIT_COLOR: # contextMaterial.specular_color = read_byte_color(temp_chunk) # temp_data = file.read(struct.calcsize('3B')) # temp_chunk.bytes_read += 3 # contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb # else: skip_to_end(file, temp_chunk) new_chunk.bytes_read += temp_chunk.bytes_read elif new_chunk.ID == MAT_TEXTURE_MAP: read_texture(new_chunk, temp_chunk, "Diffuse", "COLOR") elif new_chunk.ID == MAT_SPECULAR_MAP: read_texture(new_chunk, temp_chunk, "Specular", "SPECULARITY") elif new_chunk.ID == MAT_OPACITY_MAP: read_texture(new_chunk, temp_chunk, "Opacity", "ALPHA") elif new_chunk.ID == MAT_BUMP_MAP: read_texture(new_chunk, temp_chunk, "Bump", "NORMAL") elif new_chunk.ID == MAT_TRANSPARENCY: #print 'elif new_chunk.ID == MAT_TRANSPARENCY:' read_chunk(file, temp_chunk) if temp_chunk.ID == PERCENTAGE_SHORT: temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) temp_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT contextMaterialWrapper.alpha = 1 - ( float(struct.unpack('<H', temp_data)[0]) / 100) elif temp_chunk.ID == PERCENTAGE_FLOAT: temp_data = file.read(STRUCT_SIZE_FLOAT) temp_chunk.bytes_read += STRUCT_SIZE_FLOAT contextMaterialWrapper.alpha = 1 - float( struct.unpack('f', temp_data)[0]) else: print("Cannot read material transparency") new_chunk.bytes_read += temp_chunk.bytes_read elif new_chunk.ID == OBJECT_LIGHT: # Basic lamp support. temp_data = file.read(STRUCT_SIZE_3FLOAT) x, y, z = struct.unpack('<3f', temp_data) new_chunk.bytes_read += STRUCT_SIZE_3FLOAT # no lamp in dict that would be confusing contextLamp[1] = bpy.data.lights.new("Lamp", 'POINT') contextLamp[0] = ob = bpy.data.objects.new("Lamp", contextLamp[1]) context.view_layer.active_layer_collection.collection.objects.link( ob) importedObjects.append(contextLamp[0]) #print 'number of faces: ', num_faces #print x,y,z contextLamp[0].location = x, y, z # Reset matrix contextMatrix_rot = None #contextMatrix_tx = None #print contextLamp.name, elif new_chunk.ID == OBJECT_MESH: # print 'Found an OBJECT_MESH chunk' pass elif new_chunk.ID == OBJECT_VERTICES: """ Worldspace vertex locations """ # print 'elif new_chunk.ID == OBJECT_VERTICES:' temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) num_verts = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += 2 # print 'number of verts: ', num_verts contextMesh_vertls = struct.unpack( '<%df' % (num_verts * 3), file.read(STRUCT_SIZE_3FLOAT * num_verts)) new_chunk.bytes_read += STRUCT_SIZE_3FLOAT * num_verts # dummyvert is not used atm! #print 'object verts: bytes read: ', new_chunk.bytes_read elif new_chunk.ID == OBJECT_FACES: # print 'elif new_chunk.ID == OBJECT_FACES:' temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) num_faces = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += 2 #print 'number of faces: ', num_faces # print '\ngetting a face' temp_data = file.read(STRUCT_SIZE_4UNSIGNED_SHORT * num_faces) new_chunk.bytes_read += STRUCT_SIZE_4UNSIGNED_SHORT * num_faces # 4 short ints x 2 bytes each contextMesh_facels = struct.unpack('<%dH' % (num_faces * 4), temp_data) contextMesh_facels = [ contextMesh_facels[i - 3:i] for i in range(3, (num_faces * 4) + 3, 4) ] elif new_chunk.ID == OBJECT_MATERIAL: # print 'elif new_chunk.ID == OBJECT_MATERIAL:' material_name, read_str_len = read_string(file) new_chunk.bytes_read += read_str_len # remove 1 null character. temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) num_faces_using_mat = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * num_faces_using_mat) new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * num_faces_using_mat temp_data = struct.unpack("<%dH" % (num_faces_using_mat), temp_data) contextMeshMaterials.append((material_name, temp_data)) #look up the material in all the materials elif new_chunk.ID == OBJECT_UV: temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) num_uv = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += 2 temp_data = file.read(STRUCT_SIZE_2FLOAT * num_uv) new_chunk.bytes_read += STRUCT_SIZE_2FLOAT * num_uv contextMeshUV = struct.unpack('<%df' % (num_uv * 2), temp_data) elif new_chunk.ID == OBJECT_TRANS_MATRIX: # How do we know the matrix size? 54 == 4x4 48 == 4x3 temp_data = file.read(STRUCT_SIZE_4x3MAT) data = list(struct.unpack('<ffffffffffff', temp_data)) new_chunk.bytes_read += STRUCT_SIZE_4x3MAT contextMatrix_rot = mathutils.Matrix(( data[:3] + [0], data[3:6] + [0], data[6:9] + [0], data[9:] + [1], )).transposed() elif (new_chunk.ID == MAT_MAP_FILEPATH): texture_name, read_str_len = read_string(file) if contextMaterial.name not in TEXTURE_DICT: TEXTURE_DICT[contextMaterial.name] = load_image( texture_name, dirname, place_holder=False, recursive=IMAGE_SEARCH) new_chunk.bytes_read += read_str_len # plus one for the null character that gets removed elif new_chunk.ID == EDITKEYFRAME: pass # including these here means their EK_OB_NODE_HEADER are scanned elif new_chunk.ID in { ED_KEY_AMBIENT_NODE, ED_KEY_OBJECT_NODE, ED_KEY_CAMERA_NODE, ED_KEY_TARGET_NODE, ED_KEY_LIGHT_NODE, ED_KEY_L_TARGET_NODE, ED_KEY_SPOTLIGHT_NODE }: # another object is being processed child = None elif new_chunk.ID == EK_OB_NODE_HEADER: object_name, read_str_len = read_string(file) new_chunk.bytes_read += read_str_len temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2) new_chunk.bytes_read += 4 temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) hierarchy = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += 2 child = object_dictionary.get(object_name) if child is None: child = bpy.data.objects.new(object_name, None) # create an empty object context.view_layer.active_layer_collection.collection.objects.link( child) importedObjects.append(child) object_list.append(child) object_parent.append(hierarchy) pivot_list.append(mathutils.Vector((0.0, 0.0, 0.0))) elif new_chunk.ID == EK_OB_INSTANCE_NAME: object_name, read_str_len = read_string(file) # child.name = object_name child.name += "." + object_name object_dictionary[object_name] = child new_chunk.bytes_read += read_str_len # print("new instance object:", object_name) elif new_chunk.ID == EK_OB_PIVOT: # translation temp_data = file.read(STRUCT_SIZE_3FLOAT) pivot = struct.unpack('<3f', temp_data) new_chunk.bytes_read += STRUCT_SIZE_3FLOAT pivot_list[len(pivot_list) - 1] = mathutils.Vector(pivot) elif new_chunk.ID == EK_OB_POSITION_TRACK: # translation new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5 temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5) temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) nkeys = struct.unpack('<H', temp_data)[0] temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2 for i in range(nkeys): temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) nframe = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2) new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2 temp_data = file.read(STRUCT_SIZE_3FLOAT) loc = struct.unpack('<3f', temp_data) new_chunk.bytes_read += STRUCT_SIZE_3FLOAT if nframe == 0: child.location = loc elif new_chunk.ID == EK_OB_ROTATION_TRACK: # rotation new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5 temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5) temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) nkeys = struct.unpack('<H', temp_data)[0] temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2 for i in range(nkeys): temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) nframe = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2) new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2 temp_data = file.read(STRUCT_SIZE_4FLOAT) rad, axis_x, axis_y, axis_z = struct.unpack("<4f", temp_data) new_chunk.bytes_read += STRUCT_SIZE_4FLOAT if nframe == 0: child.rotation_euler = mathutils.Quaternion( (axis_x, axis_y, axis_z), -rad).to_euler() # why negative? elif new_chunk.ID == EK_OB_SCALE_TRACK: # translation new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5 temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5) temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) nkeys = struct.unpack('<H', temp_data)[0] temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2 for i in range(nkeys): temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT) nframe = struct.unpack('<H', temp_data)[0] new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2) new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2 temp_data = file.read(STRUCT_SIZE_3FLOAT) sca = struct.unpack('<3f', temp_data) new_chunk.bytes_read += STRUCT_SIZE_3FLOAT if nframe == 0: child.scale = sca else: # (new_chunk.ID!=VERSION or new_chunk.ID!=OBJECTINFO or new_chunk.ID!=OBJECT or new_chunk.ID!=MATERIAL): # print 'skipping to end of this chunk' #print("unknown chunk: "+hex(new_chunk.ID)) buffer_size = new_chunk.length - new_chunk.bytes_read binary_format = "%ic" % buffer_size temp_data = file.read(struct.calcsize(binary_format)) new_chunk.bytes_read += buffer_size #update the previous chunk bytes read # print 'previous_chunk.bytes_read += new_chunk.bytes_read' # print previous_chunk.bytes_read, new_chunk.bytes_read previous_chunk.bytes_read += new_chunk.bytes_read ## print 'Bytes left in this chunk: ', previous_chunk.length - previous_chunk.bytes_read # FINISHED LOOP # There will be a number of objects still not added if CreateBlenderObject: putContextMesh(context, contextMesh_vertls, contextMesh_facels, contextMeshMaterials) # Assign parents to objects # check _if_ we need to assign first because doing so recalcs the depsgraph for ind, ob in enumerate(object_list): parent = object_parent[ind] if parent == ROOT_OBJECT: if ob.parent is not None: ob.parent = None else: if ob.parent != object_list[parent]: if ob == object_list[parent]: print(' warning: Cannot assign self to parent ', ob) else: ob.parent = object_list[parent] # pivot_list[ind] += pivot_list[parent] # XXX, not sure this is correct, should parent space matrix be applied before combining? # fix pivots for ind, ob in enumerate(object_list): if ob.type == 'MESH': pivot = pivot_list[ind] pivot_matrix = object_matrix.get( ob, mathutils.Matrix()) # unlikely to fail pivot_matrix = mathutils.Matrix.Translation( pivot_matrix.to_3x3() @ -pivot) ob.data.transform(pivot_matrix)
for ob in bpy.data.collections['cubes'].objects[:]: bpy.data.objects.remove(ob, do_unlink=True) # deleting materials for m in bpy.data.materials: if m.name.startswith("mat1."): print("deleting: " + m.name) bpy.data.materials.remove(m) # setup materials materials = [] for i, color in enumerate(colors): orig = bpy.data.materials["mat1"] m1 = orig.copy() m1.name = "mat1.%d" % i p = PrincipledBSDFWrapper(m1, is_readonly=False) p.base_color = color # print(m1.name, m1.diffuse_color[0], color, bpy.data.materials[new_name].diffuse_color[0]) materials.append(m1) # materials.reverse() import csv path = '/home/pboone/workspace/pm-blender/bins_alldof.csv' # path = 'C:/Users/Paul Boone/Documents/Cover Art/bins_alldof.csv' with open(path) as csvfile: rows = csv.reader(csvfile) for i, row in enumerate(rows): for j, num_materials in enumerate(row): if i < 20 and j < 20: weight = float(num_materials)
def create_PMMaterial(mat: Material, xml_mat_list, tex_dic: Dict[str, int]) -> pmx.PMMaterial: principled = PrincipledBSDFWrapper(mat, is_readonly=True) pmx_mat = pmx.PMMaterial() pmx_mat.Name = mat.name pmx_mat.Name_E = mat.name xml_deffuse = None xml_specular = None xml_ambient = None # Load XML Status if pmx_mat.Name in xml_mat_list.keys(): temp_mat = xml_mat_list[pmx_mat.Name] pmx_mat.Name = temp_mat.get("name", mat.name) pmx_mat.Name_E = temp_mat.get("name_e", pmx_mat.Name) pmx_mat.UseSystemToon = int(temp_mat.get("use_systemtoon", "1")) if pmx_mat.UseSystemToon == 1: pmx_mat.ToonIndex = int(temp_mat.get("toon", "0")) else: tex_path = temp_mat.get("toon", "toon01.bmp") if tex_path == "" or tex_path == "-1": pmx_mat.ToonIndex = -1 else: pmx_mat.ToonIndex = tex_dic.setdefault(tex_path, len(tex_dic)) pmx_mat.Both = int(temp_mat.get("both", "0")) pmx_mat.GroundShadow = int(temp_mat.get("ground_shadow", "0")) pmx_mat.DropShadow = int(temp_mat.get("drop_shadow", "0")) pmx_mat.OnShadow = int(temp_mat.get("on_shadow", "0")) pmx_mat.OnEdge = int(temp_mat.get("on_edge", "0")) pmx_mat.EdgeSize = float(temp_mat.get("edge_size", "1.0")) edge_c = temp_mat.find("edge_color") pmx_mat.EdgeColor = Math.Vector( (float(edge_c.get("r", "0.0")), float(edge_c.get("g", "0.0")), float(edge_c.get("b", "0.0")), float(edge_c.get("a", "1.0")))) deffuse_elm = temp_mat.find("deffuse") if deffuse_elm != None: c = (float(deffuse_elm.get("r", principled.base_color.r)), float(deffuse_elm.get("g", principled.base_color.g)), float(deffuse_elm.get("b", principled.base_color.b)), float(deffuse_elm.get("a", principled.alpha))) xml_deffuse = Math.Vector(c) specular_elm = temp_mat.find("specular") if specular_elm != None: xml_specular = Math.Vector((float(specular_elm.get("r", "0.0")), float(specular_elm.get("g", "0.0")), float(specular_elm.get("b", "0.0")))) ambient_elm = temp_mat.find("ambient") if ambient_elm != None: xml_ambient = Math.Vector((float(ambient_elm.get("r", "0.0")), float(ambient_elm.get("g", "0.0")), float(ambient_elm.get("b", "0.0")))) pmx_mat.Power = float(temp_mat.get("power", "1")) sphere_elm = temp_mat.find("sphere") if sphere_elm != None: path = sphere_elm.get("path") pmx_mat.SphereIndex = tex_dic.setdefault(path, len(tex_dic)) pmx_mat.SphereType = int(sphere_elm.get("type", "0")) r, g, b = principled.base_color a = principled.alpha pmx_mat.Deffuse = xml_deffuse if xml_deffuse != None else Math.Vector( (r, g, b, a)) pmx_mat.Specular = xml_specular if xml_specular != None else Math.Vector( (0.0, 0.0, 0.0)) pmx_mat.Ambient = xml_ambient if xml_ambient != None else pmx_mat.Deffuse.xyz * 0.4 pmx_mat.FaceLength = 0 tex_base_path = bpy.path.abspath("//") if tex_base_path == "": tex_base_path = os.path.dirname(filepath) texture = principled.base_color_texture if texture and texture.image: filepath = texture.image.filepath tex_abs_path = bpy.path.abspath(filepath) tex_path = bpy.path.relpath(tex_abs_path, tex_base_path) tex_path = tex_path.replace("//", "", 1) pmx_mat.TextureIndex = tex_dic.setdefault(tex_path, len(tex_dic)) return pmx_mat
def createMaterial(self): mat = bpy.data.materials.new(name=self.name) mat.use_nodes = True nodes = mat.node_tree.nodes links = mat.node_tree.links principled_mat = PrincipledBSDFWrapper(mat, is_readonly=False) principled = principled_mat.node_principled_bsdf mat_output = principled_mat.node_out principled_mat.roughness = 1.0 front = {} back = {} # Create all of the texture nodes for map_name, img in self.maps.items(): if img is None or map_name.split("_")[0] not in __class__.input_tr: continue texture_node = nodes.new(type="ShaderNodeTexImage") if map_name.endswith("_back"): map_name = map_name[:-5] # remove "_back" back[map_name] = texture_node else: front[map_name] = texture_node texture_node.image = getCyclesImage(img) texture_node.image.colorspace_settings.name = "sRGB" if map_name == "baseColor" or map_name == "diffuse" else "Non-Color" if hasattr(texture_node, "color_space"): texture_node.color_space = "COLOR" if map_name == "baseColor" or map_name == "diffuse" else "NONE" if map_name == "opacity": mat.blend_method = 'BLEND' if map_name == "height": mat.cycles.displacement_method = 'BOTH' if not front: # In case just the backside texture was chosen front = back back = {} def setup(name, node): if __class__.input_tr.get(name): links.new(node.outputs["Color"], principled.inputs[__class__.input_tr[name]]) else: if name == "glossiness": invert_node = nodes.new(type="ShaderNodeInvert") links.new(node.outputs["Color"], invert_node.inputs["Color"]) links.new(invert_node.outputs["Color"], principled.inputs["Roughness"]) if name == "diffuse": if not principled.inputs["Base Color"].is_linked: links.new(node.outputs["Color"], principled.inputs["Base Color"]) elif name == "height": displacement_node = nodes.new(type="ShaderNodeDisplacement") displacement_node.inputs[2].default_value = .2 links.new(node.outputs["Color"], displacement_node.inputs["Height"]) links.new(displacement_node.outputs["Displacement"], mat_output.inputs[2]) elif name == "normal": normal_node = nodes.new(type="ShaderNodeNormalMap") links.new(node.outputs["Color"], normal_node.inputs["Color"]) links.new(normal_node.outputs["Normal"], principled.inputs["Normal"]) elif name == "normalInvertedY": normal_node = nodes.new(type="ShaderNodeNormalMap") separate_node = nodes.new(type="ShaderNodeSeparateRGB") combine_node = nodes.new(type="ShaderNodeCombineRGB") math_node = nodes.new(type="ShaderNodeMath") math_node.operation = "MULTIPLY_ADD" math_node.inputs[1].default_value = -1 math_node.inputs[2].default_value = 1 links.new(node.outputs["Color"], separate_node.inputs["Image"]) links.new(separate_node.outputs["R"], combine_node.inputs[0]) links.new(separate_node.outputs["G"], math_node.inputs[0]) links.new(math_node.outputs["Value"], combine_node.inputs[1]) links.new(separate_node.outputs["B"], combine_node.inputs[2]) links.new(combine_node.outputs["Image"], normal_node.inputs["Color"]) links.new(normal_node.outputs["Normal"], principled.inputs["Normal"]) if not back: # If there is no item in the back dictionary [setup(name, node) for name, node in front.items()] else: geometry_node = nodes.new("ShaderNodeNewGeometry") def pre_setup(name, front, back, mix): links.new(geometry_node.outputs["Backfacing"], mix.inputs[0]) links.new(front.outputs["Color"], mix.inputs[1]) links.new(back.outputs["Color"], mix.inputs[2]) setup(name, mix) for name, node in front.items(): if back.get(name): pre_setup(name, node, back[name], nodes.new(type="ShaderNodeMixRGB")) autoAlignNodes(mat_output) return mat