def load(self, context, filepath=""): ob = bpy.context.object if ob.type != 'ARMATURE': return "An armature must be selected!" path = os.path.dirname(filepath) path = os.path.normpath(path) try: ob.animation_data.action except: ob.animation_data_create() with ProgressReport(context.window_manager) as progress: # Begin the progress counter with 1 step for each file progress.enter_substeps(len(self.files)) # Force all bones to use quaternion rotation # (Must be included or bone.rotation_quaternion won't update # properly when setting the matrix directly) for bone in ob.pose.bones.data.bones: bone.rotation_mode = 'QUATERNION' for f in self.files: progress.enter_substeps(1, f.name) try: anim_path = os.path.normpath(os.path.join(path, f.name)) load_seanim(self, context, progress, anim_path) except Exception as e: progress.leave_substeps("ERROR: " + repr(e)) else: progress.leave_substeps() # Print when all files have been imported progress.leave_substeps("Finished!")
def save( context, filepath, *, use_mesh_modifiers=True, use_selection=True, use_vertex_colors=True, global_matrix=None, ): with ProgressReport(context.window_manager) as progress: # exit edit mode if bpy.ops.object.mode_set.poll(): bpy.ops.object.mode_set(mode='OBJECT') depsgraph = context.evaluated_depsgraph_get() scene = context.scene if use_selection: objects = context.selected_objects else: objects = scene.objects progress.enter_substeps(1) write_file( filepath, objects, depsgraph, scene, use_mesh_modifiers=use_mesh_modifiers, use_vertex_colors=use_vertex_colors, global_matrix=global_matrix, progress=progress ) progress.leave_substeps() return {'FINISHED'}
def export(context, keywords): print("export") filepath = keywords['filepath'] with ProgressReport(context.window_manager) as progress: #progress.enter_substeps(100) with ProgressReportSubstep(progress, 2, "Export path: %r" % filepath, "Export Finished") as subprogress1: with open(filepath, "w", encoding="utf8", newline="\n") as f: fw = f.write objects = context.scene.objects fw("<model info=\"Blender exporter\">") for i, ob_main in enumerate(objects): print(type(ob_main.type)) if ob_main.type == 'MESH': mesh = ob_main.to_mesh() mesh_triangulate(mesh) fw("<mesh name=\"" + ob_main.name + "\">") fw("<v>") for v in mesh.vertices[:]: fw('%.6f %.6f %.6f ' % v.co[:]) fw("</v>") fw("<i>") for poly in mesh.polygons: fw(str(poly.vertices[0]) + " " + str(poly.vertices[1]) + " " + str(poly.vertices[2]) + " ") fw("</i>") fw("</mesh>") else: print(ob_main.type) fw("</model>") return {'FINISHED'}
def load(context, filepath): """ Called by the user interface or another script. load_obj(path) - should give acceptable results. This function passes the file and sends the data off to be split into objects and then converted into mesh objects """ with ProgressReport(context.window_manager) as progress: progress.enter_substeps(1, "Importing MT5 %r..." % filepath) progress.enter_substeps(2, "Reading MT5...") mt5 = MT5() mt5.read(filepath) progress.leave_substeps("Done.") progress.enter_substeps(2, "Creating animation rig...") # create armature armature = bpy.data.armatures.new('ShenmueRig') obj = bpy.data.objects.new('ShenmueRig', armature) bpy.context.scene.collection.objects.link(obj) bpy.context.view_layer.objects.active = obj bpy.context.view_layer.update() bpy.ops.object.mode_set(mode='EDIT') # create root root = armature.edit_bones.new('Root') root.head[:] = 0.0, 0.0, 0.0 root.tail[:] = 0.0, 0.0, 0.0 root.roll = 0.0 root.use_connect = True # create bones from nodes nodes = mt5.root_node.get_all_nodes()[1:] node_bone_dict = {mt5.root_node: root} for node in nodes: bone = armature.edit_bones.new(str(node.get_bone_id())) node_bone_dict[node] = bone parent_pos = node.parent.get_global_position() pos = node.get_global_position() bone.head[:] = parent_pos.x, parent_pos.y, parent_pos.z bone.tail[:] = pos.x, pos.y, pos.z bone.roll = 0.0 bone.use_connect = True bone.parent = node_bone_dict[node.parent] # create IK bones and constraints progress.leave_substeps("Done.") progress.leave_substeps("Finished importing: %r" % filepath) return {'FINISHED'}
def load_skm(filepath, context, IMAGE_SEARCH=True): global SCN, ToEE_data_dir, progress time1 = time.clock() # for timing the import duration with ProgressReport(context.window_manager) as progress: print("importing SKA: %r..." % (filepath), end="") # filepath = 'D:/GOG Games/ToEECo8/data/art/meshes/Monsters/Giants/Hill_Giants/Hill_Giant_2/Zomb_giant_2.SKA' skm_filepath = filepath if bpy.ops.object.select_all.poll(): bpy.ops.object.select_all(action='DESELECT') ToEE_data_dir = get_ToEE_data_dir(filepath) print("Data dir: %s", ToEE_data_dir) # Read data into intermediate SkmFile and SkaFile objects skm_data = SkmFile() # SKM file progress.enter_substeps(1, "Reading SKM File %r..." % skm_filepath) with open(skm_filepath, 'rb') as file: print('Opened file: ', skm_filepath) skm_data.read(file) # fixme, make unglobal, clear in case object_dictionary.clear() object_matrix.clear() scn = context.scene SCN = scn importedObjects = [] # Fill this list with objects progress.enter_substeps(3, "Converting SKM to Blender model...") skm_to_blender(skm_data, importedObjects, IMAGE_SEARCH) progress.leave_substeps("Finished SKM conversion.") progress.step() # In Blender 2.80 API new objects mast be linked not to the scene, but to the scene collections: view_layer = context.view_layer view_layer.update() for ob in importedObjects: ob.select_set(True) # fixme, make unglobal object_dictionary.clear() object_matrix.clear() print(" done in %.4f sec." % (time.clock() - time1)) return
def load(operator, context, filepath=""): with ProgressReport(context.window_manager) as progress: try: progress.enter_substeps(3, "Importing \'%s\' ..." % filepath) mainLoader = CLOLoader(filepath) progress.step("Parsing file ...", 1) mainLoader.parse(operator) progress.step("Done, building ...", 2) mainLoader.build(operator) progress.leave_substeps("Done, finished importing: \'%s\'" % filepath) except RuntimeError: return {'CANCELED'} return {'FINISHED'}
def _write_skm(context, filepath, EXPORT_ANIMATION=False, WRITE_MDF=False, global_matrix=None): global progress with ProgressReport(context.window_manager) as progress: # Exit edit mode before exporting, so current object states are exported properly. if bpy.ops.object.mode_set.poll(): bpy.ops.object.mode_set(mode='OBJECT') mesh = bpy.context.scene.objects['ToEE Model'] rig = bpy.context.scene.objects['ToEE Rig'] skm_data = blender_to_skm(mesh, rig, WRITE_MDF) skm_filepath = os.path.splitext(filepath)[0] + '.skm' with open(skm_filepath, 'wb') as skm_file: skm_data.write(skm_file) return
def execute(self): with ProgressReport(self.context.window_manager) as progress: progress.enter_substeps( 1, "Importing OBJ {}...".format(self.filepath)) data = self.load_lua_data(self.filepath) for lod_index in data["lods"]: self.current_collection = bpy.data.collections.new( "lod{}".format(lod_index - 1)) bpy.context.scene.collection.children.link( self.current_collection) lod_data = data["lods"][lod_index] self.parse_node(lod_data["node"], None) progress.leave_substeps("Finished importing: {}".format( self.filepath)) return {'FINISHED'}
def load(operator, context, filepath=""): with ProgressReport(context.window_manager) as progress: progress.enter_substeps(3, "Importing \'%s\' ..." % filepath) mainLoader = GR2Loader(filepath) progress.step("Parsing file ...", 1) mainLoader.parse(operator) progress.step("Done, building ...", 2) if bpy.ops.object.mode_set.poll(): bpy.ops.object.mode_set(mode='OBJECT', toggle=False) mainLoader.build(operator.import_collision) progress.leave_substeps("Done, finished importing: \'%s\'" % filepath) return {'FINISHED'}
def save(self, context): ob = bpy.context.object if ob.type != 'ARMATURE': return "An armature must be selected!" prefix = self.prefix # os.path.basename(self.filepath) suffix = self.suffix path = os.path.dirname(self.filepath) path = os.path.normpath(path) # Gets automatically updated per-action if self.use_actions is true, # otherwise it stays the same filepath = self.filepath with ProgressReport(context.window_manager) as progress: actions = [] if self.use_actions: actions = bpy.data.actions else: actions = [bpy.context.object.animation_data.action] progress.enter_substeps(len(actions)) for action in actions: if self.use_actions: filename = prefix + action.name + suffix + ".seanim" filepath = os.path.normpath(os.path.join(path, filename)) progress.enter_substeps(1, action.name) try: export_action(self, context, progress, action, filepath) except Exception as e: progress.leave_substeps("ERROR: " + repr(e)) else: progress.leave_substeps() progress.leave_substeps("Finished!")
def _write( operator, context, filepath, EXPORT_GLOBAL_MATRIX, EXPORT_HAS_CLO, ): with ProgressReport(context.window_manager) as progress: base_name, ext = os.path.splitext(filepath) # Base name, scene name, extension context_name = [base_name, '', ext] depsgraph = context.evaluated_depsgraph_get() scene = context.scene # Exit edit mode before exporting, so current object states are exported properly. if bpy.ops.object.mode_set.poll(): bpy.ops.object.mode_set(mode='OBJECT') objects = context.selected_objects full_path = ''.join(context_name) # EXPORT THE FILE. progress.enter_substeps(1) write_file( operator, full_path, objects, depsgraph, scene, EXPORT_GLOBAL_MATRIX, EXPORT_HAS_CLO, progress, ) progress.leave_substeps()
def _write_ska(context, filepath, EXPORT_ANIMATION=False, WRITE_MDF=False, global_matrix=None): global progress from bpy_extras.io_utils import create_derived_objects, free_derived_objects with ProgressReport(context.window_manager) as progress: base_name, ext = os.path.splitext(filepath) context_name = [base_name, '', '', ext] # Base name, scene name, frame number, extension depsgraph = context.evaluated_depsgraph_get() scene = context.scene # Exit edit mode before exporting, so current object states are exported properly. if bpy.ops.object.mode_set.poll(): bpy.ops.object.mode_set(mode='OBJECT') orig_frame = scene.frame_current # Export an animation? if EXPORT_ANIMATION: scene_frames = range(scene.frame_start, scene.frame_end + 1) # Up to and including the end frame. else: scene_frames = [orig_frame] # Dont export an animation. mesh = bpy.context.scene.objects['ToEE Model'] rig = bpy.context.scene.objects['ToEE Rig'] skm_data = blender_to_skm(mesh, rig, WRITE_MDF) skm_filepath = os.path.splitext(filepath)[0] + '.skm' with open(skm_filepath, 'wb') as skm_file: skm_data.write(skm_file) """Save the Blender scene animation to a ToEE format SKA file.\ This contains bones and keyframe animations. """ print("\n*** Exporting SKA ***") # Time the export time1 = time.clock() # Blender.Window.WaitCursor(1) if global_matrix is None: global_matrix = mathutils.Matrix() scene = context.scene objects = (ob for ob in scene.objects if ob.visible_get()) # for ob in objects: # # get derived objects # print("object: " + str(ob)) # free, derived = create_derived_objects(scene, ob) # if derived is None: # continue # print( "derived obj: " + str(derived) + "\n") # for ob_derived, mat in derived: # if ob.type not in {'MESH', 'CURVE', 'SURFACE', 'FONT', 'META'}: # continue # try: # data = ob_derived.to_mesh(scene, True, 'PREVIEW') # except: # data = None # if data: # matrix = global_matrix @ mat # data.transform(matrix) # # todo # if free: # free_derived_objects(ob) # # Open the file for writing: # file = open(filepath, 'wb') # # Recursively write the chunks to file: # # primary.write(file) # # Close the file: # file.close() # Debugging only: report the exporting time: # Blender.Window.WaitCursor(0) print("SKA export time: %.2f" % (time.clock() - time1)) return
def write_file(filepath, objects, depsgraph, scene, EXPORT_TRI=False, EXPORT_EDGES=False, EXPORT_SMOOTH_GROUPS=False, EXPORT_SMOOTH_GROUPS_BITFLAGS=False, EXPORT_NORMALS=False, EXPORT_UV=True, EXPORT_MTL=True, EXPORT_APPLY_MODIFIERS=True, EXPORT_APPLY_MODIFIERS_RENDER=False, EXPORT_BLEN_OBS=True, EXPORT_GROUP_BY_OB=False, EXPORT_GROUP_BY_MAT=False, EXPORT_KEEP_VERT_ORDER=False, EXPORT_POLYGROUPS=False, EXPORT_CURVE_AS_NURBS=True, EXPORT_GLOBAL_MATRIX=None, EXPORT_PATH_MODE='AUTO', progress=ProgressReport(), ): """ Basic write function. The context and options must be already set This can be accessed externaly eg. write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options. """ if EXPORT_GLOBAL_MATRIX is None: EXPORT_GLOBAL_MATRIX = Matrix() def veckey3d(v): return round(v.x, 4), round(v.y, 4), round(v.z, 4) def veckey2d(v): return round(v[0], 4), round(v[1], 4) def findVertexGroupName(face, vWeightMap): """ Searches the vertexDict to see what groups is assigned to a given face. We use a frequency system in order to sort out the name because a given vertex can belong to two or more groups at the same time. To find the right name for the face we list all the possible vertex group names with their frequency and then sort by frequency in descend order. The top element is the one shared by the highest number of vertices is the face's group """ weightDict = {} for vert_index in face.vertices: vWeights = vWeightMap[vert_index] for vGroupName, weight in vWeights: weightDict[vGroupName] = weightDict.get(vGroupName, 0.0) + weight if weightDict: return max((weight, vGroupName) for vGroupName, weight in weightDict.items())[1] else: return '(null)' with ProgressReportSubstep(progress, 2, "OBJ Export path: %r" % filepath, "OBJ Export Finished") as subprogress1: with open(filepath, "w", encoding="utf8", newline="\n") as f: fw = f.write # Write Header fw('# Blender v%s OBJ File: %r\n' % (bpy.app.version_string, os.path.basename(bpy.data.filepath))) fw('# www.blender.org\n') # Tell the obj file what material file to use. if EXPORT_MTL: mtlfilepath = os.path.splitext(filepath)[0] + ".mtl" # filepath can contain non utf8 chars, use repr fw('mtllib %s\n' % repr(os.path.basename(mtlfilepath))[1:-1]) # Initialize totals, these are updated each object totverts = totuvco = totno = 1 face_vert_index = 1 # A Dict of Materials # (material.name, image.name):matname_imagename # matname_imagename has gaps removed. mtl_dict = {} # Used to reduce the usage of matname_texname materials, which can become annoying in case of # repeated exports/imports, yet keeping unique mat names per keys! # mtl_name: (material.name, image.name) mtl_rev_dict = {} copy_set = set() # Get all meshes subprogress1.enter_substeps(len(objects)) for i, ob_main in enumerate(objects): # ignore dupli children if ob_main.parent and ob_main.parent.instance_type in {'VERTS', 'FACES'}: subprogress1.step("Ignoring %s, dupli child..." % ob_main.name) continue obs = [(ob_main, ob_main.matrix_world)] if ob_main.is_instancer: obs += [(dup.instance_object.original, dup.matrix_world.copy()) for dup in depsgraph.object_instances if dup.parent and dup.parent.original == ob_main] # ~ print(ob_main.name, 'has', len(obs) - 1, 'dupli children') subprogress1.enter_substeps(len(obs)) for ob, ob_mat in obs: with ProgressReportSubstep(subprogress1, 6) as subprogress2: uv_unique_count = no_unique_count = 0 # Nurbs curve support if EXPORT_CURVE_AS_NURBS and test_nurbs_compat(ob): ob_mat = EXPORT_GLOBAL_MATRIX @ ob_mat totverts += write_nurb(fw, ob, ob_mat) continue # END NURBS ob_for_convert = ob.evaluated_get(depsgraph) if EXPORT_APPLY_MODIFIERS else ob.original try: me = ob_for_convert.to_mesh() except RuntimeError: me = None if me is None: continue # _must_ do this before applying transformation, else tessellation may differ if EXPORT_TRI: # _must_ do this first since it re-allocs arrays mesh_triangulate(me) me.transform(EXPORT_GLOBAL_MATRIX @ ob_mat) # If negative scaling, we have to invert the normals... if ob_mat.determinant() < 0.0: me.flip_normals() if EXPORT_UV: faceuv = len(me.uv_layers) > 0 if faceuv: uv_layer = me.uv_layers.active.data[:] else: faceuv = False me_verts = me.vertices[:] # Make our own list so it can be sorted to reduce context switching face_index_pairs = [(face, index) for index, face in enumerate(me.polygons)] if EXPORT_EDGES: edges = me.edges else: edges = [] if not (len(face_index_pairs) + len(edges) + len(me.vertices)): # Make sure there is something to write # clean up ob_for_convert.to_mesh_clear() continue # dont bother with this mesh. if EXPORT_NORMALS and face_index_pairs: me.calc_normals_split() # No need to call me.free_normals_split later, as this mesh is deleted anyway! loops = me.loops if (EXPORT_SMOOTH_GROUPS or EXPORT_SMOOTH_GROUPS_BITFLAGS) and face_index_pairs: smooth_groups, smooth_groups_tot = me.calc_smooth_groups(use_bitflags=EXPORT_SMOOTH_GROUPS_BITFLAGS) if smooth_groups_tot <= 1: smooth_groups, smooth_groups_tot = (), 0 else: smooth_groups, smooth_groups_tot = (), 0 materials = me.materials[:] material_names = [m.name if m else None for m in materials] # avoid bad index errors if not materials: materials = [None] material_names = [name_compat(None)] # Sort by Material, then images # so we dont over context switch in the obj file. if EXPORT_KEEP_VERT_ORDER: pass else: if len(materials) > 1: if smooth_groups: sort_func = lambda a: (a[0].material_index, smooth_groups[a[1]] if a[0].use_smooth else False) else: sort_func = lambda a: (a[0].material_index, a[0].use_smooth) else: # no materials if smooth_groups: sort_func = lambda a: smooth_groups[a[1] if a[0].use_smooth else False] else: sort_func = lambda a: a[0].use_smooth face_index_pairs.sort(key=sort_func) del sort_func # Set the default mat to no material and no image. contextMat = 0, 0 # Can never be this, so we will label a new material the first chance we get. contextSmooth = None # Will either be true or false, set bad to force initialization switch. if EXPORT_BLEN_OBS or EXPORT_GROUP_BY_OB: name1 = ob.name name2 = ob.data.name if name1 == name2: obnamestring = name_compat(name1) else: obnamestring = '%s_%s' % (name_compat(name1), name_compat(name2)) if EXPORT_BLEN_OBS: fw('o %s\n' % obnamestring) # Write Object name else: # if EXPORT_GROUP_BY_OB: fw('g %s\n' % obnamestring) subprogress2.step() # Vert for v in me_verts: fw('v %.6f %.6f %.6f\n' % v.co[:]) subprogress2.step() # UV if faceuv: # in case removing some of these dont get defined. uv = f_index = uv_index = uv_key = uv_val = uv_ls = None uv_face_mapping = [None] * len(face_index_pairs) uv_dict = {} uv_get = uv_dict.get for f, f_index in face_index_pairs: uv_ls = uv_face_mapping[f_index] = [] for uv_index, l_index in enumerate(f.loop_indices): uv = uv_layer[l_index].uv # include the vertex index in the key so we don't share UV's between vertices, # allowed by the OBJ spec but can cause issues for other importers, see: T47010. # this works too, shared UV's for all verts #~ uv_key = veckey2d(uv) uv_key = loops[l_index].vertex_index, veckey2d(uv) uv_val = uv_get(uv_key) if uv_val is None: uv_val = uv_dict[uv_key] = uv_unique_count fw('vt %.6f %.6f\n' % uv[:]) uv_unique_count += 1 uv_ls.append(uv_val) del uv_dict, uv, f_index, uv_index, uv_ls, uv_get, uv_key, uv_val # Only need uv_unique_count and uv_face_mapping subprogress2.step() # NORMAL, Smooth/Non smoothed. if EXPORT_NORMALS: no_key = no_val = None normals_to_idx = {} no_get = normals_to_idx.get loops_to_normals = [0] * len(loops) for f, f_index in face_index_pairs: for l_idx in f.loop_indices: no_key = veckey3d(loops[l_idx].normal) no_val = no_get(no_key) if no_val is None: no_val = normals_to_idx[no_key] = no_unique_count fw('vn %.4f %.4f %.4f\n' % no_key) no_unique_count += 1 loops_to_normals[l_idx] = no_val del normals_to_idx, no_get, no_key, no_val else: loops_to_normals = [] subprogress2.step() # XXX if EXPORT_POLYGROUPS: # Retrieve the list of vertex groups vertGroupNames = ob.vertex_groups.keys() if vertGroupNames: currentVGroup = '' # Create a dictionary keyed by face id and listing, for each vertex, the vertex groups it belongs to vgroupsMap = [[] for _i in range(len(me_verts))] for v_idx, v_ls in enumerate(vgroupsMap): v_ls[:] = [(vertGroupNames[g.group], g.weight) for g in me_verts[v_idx].groups] for f, f_index in face_index_pairs: f_smooth = f.use_smooth if f_smooth and smooth_groups: f_smooth = smooth_groups[f_index] f_mat = min(f.material_index, len(materials) - 1) # MAKE KEY key = material_names[f_mat], None # No image, use None instead. # Write the vertex group if EXPORT_POLYGROUPS: if vertGroupNames: # find what vertext group the face belongs to vgroup_of_face = findVertexGroupName(f, vgroupsMap) if vgroup_of_face != currentVGroup: currentVGroup = vgroup_of_face fw('g %s\n' % vgroup_of_face) # CHECK FOR CONTEXT SWITCH if key == contextMat: pass # Context already switched, dont do anything else: if key[0] is None and key[1] is None: # Write a null material, since we know the context has changed. if EXPORT_GROUP_BY_MAT: # can be mat_image or (null) fw("g %s_%s\n" % (name_compat(ob.name), name_compat(ob.data.name))) if EXPORT_MTL: fw("usemtl (null)\n") # mat, image else: mat_data = mtl_dict.get(key) if not mat_data: # First add to global dict so we can export to mtl # Then write mtl # Make a new names from the mat and image name, # converting any spaces to underscores with name_compat. # If none image dont bother adding it to the name # Try to avoid as much as possible adding texname (or other things) # to the mtl name (see [#32102])... mtl_name = "%s" % name_compat(key[0]) if mtl_rev_dict.get(mtl_name, None) not in {key, None}: if key[1] is None: tmp_ext = "_NONE" else: tmp_ext = "_%s" % name_compat(key[1]) i = 0 while mtl_rev_dict.get(mtl_name + tmp_ext, None) not in {key, None}: i += 1 tmp_ext = "_%3d" % i mtl_name += tmp_ext mat_data = mtl_dict[key] = mtl_name, materials[f_mat] mtl_rev_dict[mtl_name] = key if EXPORT_GROUP_BY_MAT: # can be mat_image or (null) fw("g %s_%s_%s\n" % (name_compat(ob.name), name_compat(ob.data.name), mat_data[0])) if EXPORT_MTL: fw("usemtl %s\n" % mat_data[0]) # can be mat_image or (null) contextMat = key if f_smooth != contextSmooth: if f_smooth: # on now off if smooth_groups: f_smooth = smooth_groups[f_index] fw('s %d\n' % f_smooth) else: fw('s 1\n') else: # was off now on fw('s off\n') contextSmooth = f_smooth f_v = [(vi, me_verts[v_idx], l_idx) for vi, (v_idx, l_idx) in enumerate(zip(f.vertices, f.loop_indices))] fw('f') if faceuv: if EXPORT_NORMALS: for vi, v, li in f_v: fw(" %d/%d/%d" % (totverts + v.index, totuvco + uv_face_mapping[f_index][vi], totno + loops_to_normals[li], )) # vert, uv, normal else: # No Normals for vi, v, li in f_v: fw(" %d/%d" % (totverts + v.index, totuvco + uv_face_mapping[f_index][vi], )) # vert, uv face_vert_index += len(f_v) else: # No UV's if EXPORT_NORMALS: for vi, v, li in f_v: fw(" %d//%d" % (totverts + v.index, totno + loops_to_normals[li])) else: # No Normals for vi, v, li in f_v: fw(" %d" % (totverts + v.index)) fw('\n') subprogress2.step() # Write edges. if EXPORT_EDGES: for ed in edges: if ed.is_loose: fw('l %d %d\n' % (totverts + ed.vertices[0], totverts + ed.vertices[1])) # Make the indices global rather then per mesh totverts += len(me_verts) totuvco += uv_unique_count totno += no_unique_count # clean up ob_for_convert.to_mesh_clear() subprogress1.leave_substeps("Finished writing geometry of '%s'." % ob_main.name) subprogress1.leave_substeps() subprogress1.step("Finished exporting geometry, now exporting materials") # Now we have all our materials, save them if EXPORT_MTL: write_mtl(scene, mtlfilepath, EXPORT_PATH_MODE, copy_set, mtl_dict) # copy all collected files. io_utils.path_reference_copy(copy_set)
def write_file( filepath, objects, depsgraph, scene, use_mesh_modifiers=True, use_vertex_colors=True, global_matrix=None, progress=ProgressReport(), ): if global_matrix is None: global_matrix = Matrix() with ProgressReportSubstep(progress, 1, "ERM Export path: %r" % filepath, "ERM Export Finished") as obj_progress: with open(filepath, "wb") as fhnd: fw = fhnd.write # Initialize totals totverts = totmeshes = 0 objs = [obj for obj in objects if obj.type == 'MESH'] # write header fw(get_binary_u64(len(objs))) # Get all meshes obj_progress.enter_substeps(len(objs)) for obj in objs: with ProgressReportSubstep(obj_progress, 5) as mesh_progress: object_pos = fhnd.tell() # Write placeholder Object Header ## vertex count fw(get_binary_u64(0)) ## index count fw(get_binary_u64(0)) ## flags flags = 0 if use_vertex_colors: flags |= 1 << 0 fw(get_binary_u64(flags)) try: mesh = (obj.evaluated_get(depsgraph) if use_mesh_modifiers else obj.original).to_mesh() except RuntimeError: continue mesh_triangulate(mesh) obj_matrix = obj.matrix_world mesh.transform(global_matrix @ obj_matrix) if obj_matrix.determinant() < 0.0: mesh.flip_normals() mesh_verts = mesh.vertices[:] mesh_colors = mesh.vertex_colors[:] mesh_uvs = mesh.uv_layers.active.data if len(mesh.polygons) + len(mesh_verts) <= 0: bpy.data.meshes.remove(mesh) continue # cleanup done mesh_progress.step() # Write verts for v in mesh_verts: fw(get_binary_f64(v.co[0])) fw(get_binary_f64(v.co[1])) fw(get_binary_f64(v.co[2])) # vertices done mesh_progress.step() # Write vertex colors for col_layer in mesh_colors: for col in col_layer.data: fw(get_binary_f64(col.color[0])) fw(get_binary_f64(col.color[1])) fw(get_binary_f64(col.color[2])) fw(get_binary_f64(col.color[3])) # vertex colors done mesh_progress.step() # Write vertex indices obj_indices = 0 for face in mesh.polygons: for loop_index in face.loop_indices: fw(get_binary_u64(totverts + mesh.loops[loop_index].vertex_index)) obj_indices += 1 # Write UVs for face in mesh.polygons: for loop_index in face.loop_indices: uv = mesh_uvs[loop_index].uv fw(get_binary_f64(uv[0])) fw(get_binary_f64(uv[1])) # indices done mesh_progress.step() # Make the indices global rather then per mesh totverts += len(mesh_verts) # Write object header end_pos = fhnd.tell() fhnd.seek(object_pos) fw(get_binary_u64(len(mesh_verts))) fw(get_binary_u64(obj_indices)) fhnd.seek(end_pos) obj_progress.leave_substeps()
def load(context, filepath, *, global_clight_size=0.0, use_smooth_groups=True, use_edges=True, use_split_objects=True, use_split_groups=False, use_image_search=True, use_groups_as_vgroups=False, relpath=None, global_matrix=None ): """ Called by the user interface or another script. load(path) - should give acceptable results. This function passes the file and sends the data off to be split into objects and then converted into mesh objects """ def unique_name(existing_names, name_orig): i = 0 name = name_orig while name in existing_names: name = b"%s.%03d" % (name_orig, i) i += 1 existing_names.add(name) return name def handle_vec(line_start, context_multi_line, line_split, tag, data, vec, vec_len): ret_context_multi_line = tag if strip_slash(line_split) else b'' if line_start == tag: vec[:] = [float_func(v) for v in line_split[1:]] elif context_multi_line == tag: vec += [float_func(v) for v in line_split] if not ret_context_multi_line: data.append(tuple(vec[:vec_len])) return ret_context_multi_line def create_face(context_smooth_group, context_object_key): face_vert_loc_indices = [] face_vert_nor_indices = [] face_vert_tex_indices = [] return ( face_vert_loc_indices, face_vert_nor_indices, face_vert_tex_indices, context_smooth_group, context_object_key, [], # If non-empty, that face is a Blender-invalid ngon (holes...), need a mutable object for that... ) with ProgressReport(context.window_manager) as progress: progress.enter_substeps(1, "Importing BRK %r..." % filepath) if global_matrix is None: global_matrix = mathutils.Matrix() if use_split_objects or use_split_groups: use_groups_as_vgroups = False time_main = time.time() verts_loc = [] verts_nor = [] verts_tex = [] faces = [] # tuples of the faces vertex_groups = {} # when use_groups_as_vgroups is true # Get the string to float conversion func for this file- is 'float' for almost all files. float_func = get_float_func(filepath) # Context variables context_smooth_group = None context_object_key = None context_object_obpart = None context_vgroup = None objects_names = set() # Until we can use sets unique_smooth_groups = {} # when there are faces that end with \ # it means they are multiline- # since we use xreadline we cant skip to the next line # so we need to know whether context_multi_line = b'' # Per-face handling data. face_vert_loc_indices = None face_vert_nor_indices = None face_vert_tex_indices = None verts_loc_len = verts_nor_len = verts_tex_len = 0 face_items_usage = set() face_invalid_blenpoly = None prev_vidx = None face = None vec = [] childParentPairs = [] #maintains a tuple of child objects and parent names quick_vert_failures = 0 skip_quick_vert = False progress.enter_substeps(3, "Parsing BRK file...") with open(filepath, 'rb') as f: for line in f: line_split = line.split() if not line_split: continue line_start = line_split[0] # we compare with this a _lot_ # Handling vertex data are pretty similar, factorize that. # Also, most BRK files store all those on a single line, so try fast parsing for that first, # and only fallback to full multi-line parsing when needed, this gives significant speed-up # (~40% on affected code). if line_start == b'v': vdata, vdata_len, do_quick_vert = verts_loc, 3, not skip_quick_vert elif line_start == b'vn': vdata, vdata_len, do_quick_vert = verts_nor, 3, not skip_quick_vert elif line_start == b'vt': vdata, vdata_len, do_quick_vert = verts_tex, 2, not skip_quick_vert elif context_multi_line == b'v': vdata, vdata_len, do_quick_vert = verts_loc, 3, False elif context_multi_line == b'vn': vdata, vdata_len, do_quick_vert = verts_nor, 3, False elif context_multi_line == b'vt': vdata, vdata_len, do_quick_vert = verts_tex, 2, False else: vdata_len = 0 if vdata_len: if do_quick_vert: try: vdata.append(tuple(map(float_func, line_split[1:vdata_len + 1]))) except: do_quick_vert = False # In case we get too many failures on quick parsing, force fallback to full multi-line one. # Exception handling can become costly... quick_vert_failures += 1 if quick_vert_failures > 10000: skip_quick_vert = True if not do_quick_vert: context_multi_line = handle_vec(line_start, context_multi_line, line_split, context_multi_line or line_start, vdata, vec, vdata_len) elif line_start == b'f' or context_multi_line == b'f': if not context_multi_line: line_split = line_split[1:] # Instantiate a face face = create_face(context_smooth_group, context_object_key) (face_vert_loc_indices, face_vert_nor_indices, face_vert_tex_indices, _1, _2, face_invalid_blenpoly) = face faces.append(face) face_items_usage.clear() verts_loc_len = len(verts_loc) verts_nor_len = len(verts_nor) verts_tex_len = len(verts_tex) # Else, use face_vert_loc_indices and face_vert_tex_indices previously defined and used the obj_face context_multi_line = b'f' if strip_slash(line_split) else b'' for v in line_split: brk_vert = v.split(b'/') idx = int(brk_vert[0]) # Note that we assume here we cannot get BRK invalid 0 index... vert_loc_index = (idx + verts_loc_len) if (idx < 1) else idx - 1 # Add the vertex to the current group # *warning*, this wont work for files that have groups defined around verts if use_groups_as_vgroups and context_vgroup: vertex_groups[context_vgroup].append(vert_loc_index) # This a first round to quick-detect ngons that *may* use a same edge more than once. # Potential candidate will be re-checked once we have done parsing the whole face. if not face_invalid_blenpoly: # If we use more than once a same vertex, invalid ngon is suspected. if vert_loc_index in face_items_usage: face_invalid_blenpoly.append(True) else: face_items_usage.add(vert_loc_index) face_vert_loc_indices.append(vert_loc_index) # formatting for faces with normals and textures is # loc_index/tex_index/nor_index if len(brk_vert) > 1 and brk_vert[1] and brk_vert[1] != b'0': idx = int(brk_vert[1]) face_vert_tex_indices.append((idx + verts_tex_len) if (idx < 1) else idx - 1) else: face_vert_tex_indices.append(0) if len(brk_vert) > 2 and brk_vert[2] and brk_vert[2] != b'0': idx = int(brk_vert[2]) face_vert_nor_indices.append((idx + verts_nor_len) if (idx < 1) else idx - 1) else: face_vert_nor_indices.append(0) if not context_multi_line: # Means we have finished a face, we have to do final check if ngon is suspected to be blender-invalid... if face_invalid_blenpoly: face_invalid_blenpoly.clear() face_items_usage.clear() prev_vidx = face_vert_loc_indices[-1] for vidx in face_vert_loc_indices: edge_key = (prev_vidx, vidx) if (prev_vidx < vidx) else (vidx, prev_vidx) if edge_key in face_items_usage: face_invalid_blenpoly.append(True) break face_items_usage.add(edge_key) prev_vidx = vidx elif use_edges and (line_start == b'l' or context_multi_line == b'l'): # very similar to the face load function above with some parts removed if not context_multi_line: line_split = line_split[1:] # Instantiate a face face = create_face(context_smooth_group, context_object_key) face_vert_loc_indices = face[0] # XXX A bit hackish, we use special 'value' of face_vert_nor_indices (a single True item) to tag this # as a polyline, and not a regular face... face[1][:] = [True] faces.append(face) # Else, use face_vert_loc_indices previously defined and used the brk_face context_multi_line = b'l' if strip_slash(line_split) else b'' for v in line_split: brk_vert = v.split(b'/') idx = int(brk_vert[0]) - 1 face_vert_loc_indices.append((idx + len(verts_loc) + 1) if (idx < 0) else idx) elif line_start == b's': if use_smooth_groups: context_smooth_group = line_value(line_split) if context_smooth_group == b'off': context_smooth_group = None elif context_smooth_group: # is not None unique_smooth_groups[context_smooth_group] = None elif line_start == b'o': if use_split_objects: context_object_key = unique_name(objects_names, line_value(line_split)) context_object_obpart = context_object_key # unique_objects[context_object_key]= None elif line_start == b'st': studEmpty = bpy.data.objects.new(line_split[1].decode(), None) bpy.context.collection.objects.link(studEmpty) studEmpty.location = [float(line_split[2].decode()), float(line_split[3].decode()), float(line_split[4].decode())] #add stud mesh #print("Adding stud mesh") #mesh = bpy.data.meshes.new("mesh") #studObj = bpy.data.objects.new("test", None) #bpy.context.collection.objects.link(studObj) #studObj.location = [0,0,0] if len(line_split) == 7: #indicates that this empty has a parent object parentName = line_split[6].decode() #append this pair to the list for future use childParentPairs.append([studEmpty, parentName]) elif line_start == b'g': if use_split_groups: grppart = line_value(line_split) context_object_key = (context_object_obpart, grppart) if context_object_obpart else grppart elif use_groups_as_vgroups: context_vgroup = line_value(line.split()) if context_vgroup and context_vgroup != b'(null)': vertex_groups.setdefault(context_vgroup, []) else: context_vgroup = None # dont assign a vgroup progress.step("Done, building geometries (verts:%i faces:%i smoothgroups:%i) ..." % (len(verts_loc), len(faces), len(unique_smooth_groups))) # deselect all if bpy.ops.object.select_all.poll(): bpy.ops.object.select_all(action='DESELECT') scene = context.scene new_objects = [] # put new objects here # Split the mesh by objects, may SPLIT_OB_OR_GROUP = bool(use_split_objects or use_split_groups) for data in split_mesh(verts_loc, faces, filepath, SPLIT_OB_OR_GROUP): verts_loc_split, faces_split, dataname, use_vnor, use_vtex = data # Create meshes from the data, warning 'vertex_groups' wont support splitting #~ print(dataname, use_vnor, use_vtex) create_mesh(new_objects, use_edges, verts_loc_split, verts_nor if use_vnor else [], verts_tex if use_vtex else [], faces_split, unique_smooth_groups, vertex_groups, dataname, ) view_layer = context.view_layer collection = view_layer.active_layer_collection.collection # Create new brk for brk in new_objects: collection.objects.link(brk) brk.select_set(True) # we could apply this anywhere before scaling. brk.matrix_world = global_matrix view_layer.update() axis_min = [1000000000] * 3 axis_max = [-1000000000] * 3 if global_clight_size: # Get all object bounds for ob in new_objects: for v in ob.bound_box: for axis, value in enumerate(v): if axis_min[axis] > value: axis_min[axis] = value if axis_max[axis] < value: axis_max[axis] = value # Scale objects max_axis = max(axis_max[0] - axis_min[0], axis_max[1] - axis_min[1], axis_max[2] - axis_min[2]) scale = 1.0 while global_clight_size < max_axis * scale: scale = scale / 10.0 for brk in new_objects: brk.scale = scale, scale, scale #set parent-child relationships for pair in childParentPairs: for obj in context.scene.objects: if pair[1] == obj.name: pair[0].parent = obj pair[0].matrix_parent_inverse = obj.matrix_world.inverted() #take care to keep transform, otherwise children end up in weird places break progress.leave_substeps("Done.") progress.leave_substeps("Finished importing: %r" % filepath) return {'FINISHED'}
def write_file(filepath, objects, scene, mainUVChoiceType, uvIndex, uvName, EXPORT_TRI=False, EXPORT_EDGES=False, EXPORT_SMOOTH_GROUPS=False, EXPORT_SMOOTH_GROUPS_BITFLAGS=False, EXPORT_NORMALS=False, EXPORT_UV=True, EXPORT_MTL=True, EXPORT_APPLY_MODIFIERS=True, EXPORT_APPLY_MODIFIERS_RENDER=False, EXPORT_BLEN_OBS=True, EXPORT_GROUP_BY_OB=False, EXPORT_GROUP_BY_MAT=False, EXPORT_KEEP_VERT_ORDER=False, EXPORT_POLYGROUPS=False, EXPORT_CURVE_AS_NURBS=True, EXPORT_GLOBAL_MATRIX=None, EXPORT_PATH_MODE='AUTO', progress=ProgressReport(), ): """ Basic write function. The context and options must be already set This can be accessed externaly eg. write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options. """ if EXPORT_GLOBAL_MATRIX is None: EXPORT_GLOBAL_MATRIX = Matrix() # print('-----------------') # print(EXPORT_GLOBAL_MATRIX) def veckey3d(v): return round(v.x, 4), round(v.y, 4), round(v.z, 4) def veckey2d(v): return round(v[0], 4), round(v[1], 4) def findVertexGroupName(face, vWeightMap): """ Searches the vertexDict to see what groups is assigned to a given face. We use a frequency system in order to sort out the name because a given vertex can belong to two or more groups at the same time. To find the right name for the face we list all the possible vertex group names with their frequency and then sort by frequency in descend order. The top element is the one shared by the highest number of vertices is the face's group """ weightDict = {} for vert_index in face.vertices: vWeights = vWeightMap[vert_index] for vGroupName, weight in vWeights: weightDict[vGroupName] = weightDict.get(vGroupName, 0.0) + weight if weightDict: return max((weight, vGroupName) for vGroupName, weight in weightDict.items())[1] else: return '(null)' fw = filepath.write # Initialize totals, these are updated each object totverts = totuvco = totno = 1 face_vert_index = 1 copy_set = set() # Get all meshes for i, ob_main in enumerate(objects): # ignore dupli children if ob_main.parent and ob_main.parent.instance_type in {'VERTS', 'FACES'}: continue obs = [(ob_main, ob_main.matrix_world)] for ob, ob_mat in obs: uv_unique_count = no_unique_count = 0 ob_for_convert = ob.original try: me = ob_for_convert.to_mesh() except RuntimeError: me = None if me is None: continue # _must_ do this before applying transformation, else tessellation may differ if EXPORT_TRI: # _must_ do this first since it re-allocs arrays mesh_triangulate(me) me.transform(EXPORT_GLOBAL_MATRIX @ ob_mat) # If negative scaling, we have to invert the normals... if ob_mat.determinant() < 0.0: me.flip_normals() if EXPORT_UV: faceuv = len(me.uv_layers) > 0 if faceuv: # uv_layer = me.uv_layers.active.data[:] objUVindex = 0 if mainUVChoiceType == "NAME": for i in range(0, len(me.uv_layers)): if me.uv_layers[i].name == uvName: objUVindex = i elif mainUVChoiceType == "INDEX": if uvIndex < len(me.uv_layers): objUVindex = uvIndex if objUVindex < len(me.uv_layers): uv_layer = me.uv_layers[objUVindex].data[:] else: uv_layer = me.uv_layers[0].data[:] else: faceuv = False me_verts = me.vertices[:] # Make our own list so it can be sorted to reduce context switching face_index_pairs = [(face, index) for index, face in enumerate(me.polygons)] if EXPORT_EDGES: edges = me.edges else: edges = [] if not (len(face_index_pairs) + len(edges) + len(me.vertices)): # Make sure there is something to write # clean up ob_for_convert.to_mesh_clear() continue # dont bother with this mesh. if EXPORT_NORMALS and face_index_pairs: me.calc_normals_split() # No need to call me.free_normals_split later, as this mesh is deleted anyway! loops = me.loops if (EXPORT_SMOOTH_GROUPS or EXPORT_SMOOTH_GROUPS_BITFLAGS) and face_index_pairs: smooth_groups, smooth_groups_tot = me.calc_smooth_groups(use_bitflags=EXPORT_SMOOTH_GROUPS_BITFLAGS) if smooth_groups_tot <= 1: smooth_groups, smooth_groups_tot = (), 0 else: smooth_groups, smooth_groups_tot = (), 0 materials = me.materials[:] material_names = [m.name if m else None for m in materials] # avoid bad index errors if not materials: materials = [None] material_names = [name_compat(None)] # Sort by Material, then images # so we dont over context switch in the obj file. if EXPORT_KEEP_VERT_ORDER: pass else: if len(materials) > 1: if smooth_groups: sort_func = lambda a: (a[0].material_index, smooth_groups[a[1]] if a[0].use_smooth else False) else: sort_func = lambda a: (a[0].material_index, a[0].use_smooth) else: # no materials if smooth_groups: sort_func = lambda a: smooth_groups[a[1] if a[0].use_smooth else False] else: sort_func = lambda a: a[0].use_smooth face_index_pairs.sort(key=sort_func) del sort_func # Set the default mat to no material and no image. contextMat = 0, 0 # Can never be this, so we will label a new material the first chance we get. contextSmooth = None # Will either be true or false, set bad to force initialization switch. if EXPORT_BLEN_OBS or EXPORT_GROUP_BY_OB: name1 = ob.name name2 = ob.data.name # if name1 == name2: # obnamestring = name_compat(name1) # else: # obnamestring = '%s_%s' % (name_compat(name1), name_compat(name2)) #assume all objects have unique names for now obnamestring = name_compat(name1) if EXPORT_BLEN_OBS: fw('o %s\n' % obnamestring) # Write Object name else: # if EXPORT_GROUP_BY_OB: fw('g %s\n' % obnamestring) # subprogress2.step() # Vert for v in me_verts: fw('v %.6f %.6f %.6f\n' % v.co[:]) # subprogress2.step() # UV if faceuv: # in case removing some of these dont get defined. uv = f_index = uv_index = uv_key = uv_val = uv_ls = None uv_face_mapping = [None] * len(face_index_pairs) uv_dict = {} uv_get = uv_dict.get for f, f_index in face_index_pairs: uv_ls = uv_face_mapping[f_index] = [] for uv_index, l_index in enumerate(f.loop_indices): uv = uv_layer[l_index].uv # include the vertex index in the key so we don't share UV's between vertices, # allowed by the OBJ spec but can cause issues for other importers, see: T47010. # this works too, shared UV's for all verts #~ uv_key = veckey2d(uv) uv_key = loops[l_index].vertex_index, veckey2d(uv) uv_val = uv_get(uv_key) if uv_val is None: uv_val = uv_dict[uv_key] = uv_unique_count fw('vt %.6f %.6f\n' % uv[:]) uv_unique_count += 1 uv_ls.append(uv_val) del uv_dict, uv, f_index, uv_index, uv_ls, uv_get, uv_key, uv_val # Only need uv_unique_count and uv_face_mapping # subprogress2.step() # NORMAL, Smooth/Non smoothed. if EXPORT_NORMALS: no_key = no_val = None normals_to_idx = {} no_get = normals_to_idx.get loops_to_normals = [0] * len(loops) for f, f_index in face_index_pairs: for l_idx in f.loop_indices: no_key = veckey3d(loops[l_idx].normal) no_val = no_get(no_key) if no_val is None: no_val = normals_to_idx[no_key] = no_unique_count fw('vn %.4f %.4f %.4f\n' % no_key) no_unique_count += 1 loops_to_normals[l_idx] = no_val del normals_to_idx, no_get, no_key, no_val else: loops_to_normals = [] # subprogress2.step() # XXX if EXPORT_POLYGROUPS: # Retrieve the list of vertex groups vertGroupNames = ob.vertex_groups.keys() if vertGroupNames: currentVGroup = '' # Create a dictionary keyed by face id and listing, for each vertex, the vertex groups it belongs to vgroupsMap = [[] for _i in range(len(me_verts))] for v_idx, v_ls in enumerate(vgroupsMap): v_ls[:] = [(vertGroupNames[g.group], g.weight) for g in me_verts[v_idx].groups] for f, f_index in face_index_pairs: f_smooth = f.use_smooth if f_smooth and smooth_groups: f_smooth = smooth_groups[f_index] f_mat = min(f.material_index, len(materials) - 1) # MAKE KEY key = material_names[f_mat], None # No image, use None instead. # Write the vertex group if EXPORT_POLYGROUPS: if vertGroupNames: # find what vertext group the face belongs to vgroup_of_face = findVertexGroupName(f, vgroupsMap) if vgroup_of_face != currentVGroup: currentVGroup = vgroup_of_face fw('g %s\n' % vgroup_of_face) # CHECK FOR CONTEXT SWITCH if key == contextMat: pass # Context already switched, dont do anything contextMat = key if f_smooth != contextSmooth: if f_smooth: # on now off if smooth_groups: f_smooth = smooth_groups[f_index] fw('s %d\n' % f_smooth) else: fw('s 1\n') else: # was off now on fw('s off\n') contextSmooth = f_smooth f_v = [(vi, me_verts[v_idx], l_idx) for vi, (v_idx, l_idx) in enumerate(zip(f.vertices, f.loop_indices))] fw('f') if faceuv: if EXPORT_NORMALS: for vi, v, li in f_v: fw(" %d/%d/%d" % (totverts + v.index, totuvco + uv_face_mapping[f_index][vi], totno + loops_to_normals[li], )) # vert, uv, normal else: # No Normals for vi, v, li in f_v: fw(" %d/%d" % (totverts + v.index, totuvco + uv_face_mapping[f_index][vi], )) # vert, uv face_vert_index += len(f_v) else: # No UV's if EXPORT_NORMALS: for vi, v, li in f_v: fw(" %d//%d" % (totverts + v.index, totno + loops_to_normals[li])) else: # No Normals for vi, v, li in f_v: fw(" %d" % (totverts + v.index)) fw('\n') # subprogress2.step() # Write edges. if EXPORT_EDGES: for ed in edges: if ed.is_loose: fw('l %d %d\n' % (totverts + ed.vertices[0], totverts + ed.vertices[1])) # Make the indices global rather then per mesh totverts += len(me_verts) totuvco += uv_unique_count totno += no_unique_count # clean up ob_for_convert.to_mesh_clear()
def write_file( filepath, objects, depsgraph, scene, EXPORT_TRI=False, EXPORT_EDGES=False, EXPORT_SMOOTH_GROUPS=False, EXPORT_SMOOTH_GROUPS_BITFLAGS=False, EXPORT_NORMALS=False, EXPORT_UV=True, EXPORT_APPLY_MODIFIERS=True, EXPORT_APPLY_MODIFIERS_RENDER=False, EXPORT_GROUP_BY_OB=False, EXPORT_KEEP_VERT_ORDER=False, EXPORT_POLYGROUPS=False, EXPORT_GLOBAL_MATRIX=None, EXPORT_PATH_MODE='AUTO', progress=ProgressReport(), ): """ Basic write function. The context and options must be already set This can be accessed externaly eg. write( 'c:\\test\\foobar.brk', Blender.Object.GetSelected() ) # Using default options. """ if EXPORT_GLOBAL_MATRIX is None: EXPORT_GLOBAL_MATRIX = Matrix() def veckey3d(v): return round(v.x, 4), round(v.y, 4), round(v.z, 4) def veckey2d(v): return round(v[0], 4), round(v[1], 4) def findVertexGroupName(face, vWeightMap): """ Searches the vertexDict to see what groups is assigned to a given face. We use a frequency system in order to sort out the name because a given vertex can belong to two or more groups at the same time. To find the right name for the face we list all the possible vertex group names with their frequency and then sort by frequency in descend order. The top element is the one shared by the highest number of vertices is the face's group """ weightDict = {} for vert_index in face.vertices: vWeights = vWeightMap[vert_index] for vGroupName, weight in vWeights: weightDict[vGroupName] = weightDict.get(vGroupName, 0.0) + weight if weightDict: return max((weight, vGroupName) for vGroupName, weight in weightDict.items())[1] else: return '(null)' with ProgressReportSubstep(progress, 2, "BRK Export path: %r" % filepath, "BRK Export Finished") as subprogress1: with open(filepath, "w", encoding="utf8", newline="\n") as f: fw = f.write # Write Header fw('# BrickCAD v%s BRK File: %r\n' % (bpy.app.version_string, os.path.basename(bpy.data.filepath))) # Initialize totals, these are updated each object totverts = totuvco = totno = 1 face_vert_index = 1 copy_set = set() # Get all meshes subprogress1.enter_substeps(len(objects)) for i, ob_main in enumerate(objects): # ignore dupli children if ob_main.parent and ob_main.parent.instance_type in { 'VERTS', 'FACES' }: subprogress1.step("Ignoring %s, dupli child..." % ob_main.name) continue obs = [(ob_main, ob_main.matrix_world)] if ob_main.is_instancer: obs += [(dup.instance_object.original, dup.matrix_world.copy()) for dup in depsgraph.object_instances if dup.parent and dup.parent.original == ob_main] # ~ print(ob_main.name, 'has', len(obs) - 1, 'dupli children') subprogress1.enter_substeps(len(obs)) for ob, ob_mat in obs: #print(ob.type) with ProgressReportSubstep(subprogress1, 6) as subprogress2: uv_unique_count = no_unique_count = 0 ob_for_convert = ob.evaluated_get( depsgraph ) if EXPORT_APPLY_MODIFIERS else ob.original try: me = ob_for_convert.to_mesh() except RuntimeError: me = None if me is None: #object is an empty, this is going to be used to indicate the location of a stud obnamestring = name_compat(ob.name) #check if object has a parent parent = ob.parent #print("Parent: " + str(parent.name)) if parent is None: fw('st %s %.6f %.6f %.6f\n' % (obnamestring, ob.matrix_world.translation[0], ob.matrix_world.translation[1], ob.matrix_world.translation[2]) ) # Write Object name and location else: fw( 'st %s %.6f %.6f %.6f p %s\n' % (obnamestring, ob.matrix_world.translation[0], ob.matrix_world.translation[1], ob.matrix_world.translation[2], name_compat(parent.name)) ) # Write Object name, location, and parent name continue # _must_ do this before applying transformation, else tessellation may differ if EXPORT_TRI: # _must_ do this first since it re-allocs arrays mesh_triangulate(me) me.transform(EXPORT_GLOBAL_MATRIX @ ob_mat) # If negative scaling, we have to invert the normals... if ob_mat.determinant() < 0.0: me.flip_normals() if EXPORT_UV: faceuv = len(me.uv_layers) > 0 if faceuv: uv_layer = me.uv_layers.active.data[:] else: faceuv = False me_verts = me.vertices[:] # Make our own list so it can be sorted to reduce context switching face_index_pairs = [ (face, index) for index, face in enumerate(me.polygons) ] if EXPORT_EDGES: edges = me.edges else: edges = [] if not (len(face_index_pairs) + len(edges) + len(me.vertices) ): # Make sure there is something to write # clean up bpy.data.meshes.remove(me) continue # dont bother with this mesh. if EXPORT_NORMALS and face_index_pairs: me.calc_normals_split() # No need to call me.free_normals_split later, as this mesh is deleted anyway! loops = me.loops if (EXPORT_SMOOTH_GROUPS or EXPORT_SMOOTH_GROUPS_BITFLAGS ) and face_index_pairs: smooth_groups, smooth_groups_tot = me.calc_smooth_groups( use_bitflags=EXPORT_SMOOTH_GROUPS_BITFLAGS) if smooth_groups_tot <= 1: smooth_groups, smooth_groups_tot = (), 0 else: smooth_groups, smooth_groups_tot = (), 0 contextSmooth = None # Will either be true or false, set bad to force initialization switch. name = ob.name obnamestring = name_compat(name) fw('o %s\n' % obnamestring) # Write Object name subprogress2.step() # Vert for v in me_verts: fw('v %.6f %.6f %.6f\n' % v.co[:]) subprogress2.step() # UV if faceuv: # in case removing some of these dont get defined. uv = f_index = uv_index = uv_key = uv_val = uv_ls = None uv_face_mapping = [None] * len(face_index_pairs) uv_dict = {} uv_get = uv_dict.get for f, f_index in face_index_pairs: uv_ls = uv_face_mapping[f_index] = [] for uv_index, l_index in enumerate( f.loop_indices): uv = uv_layer[l_index].uv # include the vertex index in the key so we don't share UV's between vertices, # allowed by the OBJ spec but can cause issues for other importers, see: T47010. # this works too, shared UV's for all verts #~ uv_key = veckey2d(uv) uv_key = loops[ l_index].vertex_index, veckey2d(uv) uv_val = uv_get(uv_key) if uv_val is None: uv_val = uv_dict[ uv_key] = uv_unique_count fw('vt %.6f %.6f\n' % uv[:]) uv_unique_count += 1 uv_ls.append(uv_val) del uv_dict, uv, f_index, uv_index, uv_ls, uv_get, uv_key, uv_val # Only need uv_unique_count and uv_face_mapping subprogress2.step() # NORMAL, Smooth/Non smoothed. if EXPORT_NORMALS: no_key = no_val = None normals_to_idx = {} no_get = normals_to_idx.get loops_to_normals = [0] * len(loops) for f, f_index in face_index_pairs: for l_idx in f.loop_indices: no_key = veckey3d(loops[l_idx].normal) no_val = no_get(no_key) if no_val is None: no_val = normals_to_idx[ no_key] = no_unique_count fw('vn %.4f %.4f %.4f\n' % no_key) no_unique_count += 1 loops_to_normals[l_idx] = no_val del normals_to_idx, no_get, no_key, no_val else: loops_to_normals = [] subprogress2.step() # XXX if EXPORT_POLYGROUPS: # Retrieve the list of vertex groups vertGroupNames = ob.vertex_groups.keys() if vertGroupNames: currentVGroup = '' # Create a dictionary keyed by face id and listing, for each vertex, the vertex groups it belongs to vgroupsMap = [[] for _i in range(len(me_verts))] for v_idx, v_ls in enumerate(vgroupsMap): v_ls[:] = [(vertGroupNames[g.group], g.weight) for g in me_verts[v_idx].groups] for f, f_index in face_index_pairs: f_smooth = f.use_smooth if f_smooth and smooth_groups: f_smooth = smooth_groups[f_index] # Write the vertex group if EXPORT_POLYGROUPS: if vertGroupNames: # find what vertext group the face belongs to vgroup_of_face = findVertexGroupName( f, vgroupsMap) if vgroup_of_face != currentVGroup: currentVGroup = vgroup_of_face fw('g %s\n' % vgroup_of_face) if f_smooth != contextSmooth: if f_smooth: # on now off if smooth_groups: f_smooth = smooth_groups[f_index] fw('s %d\n' % f_smooth) else: fw('s 1\n') else: # was off now on fw('s off\n') contextSmooth = f_smooth f_v = [(vi, me_verts[v_idx], l_idx) for vi, (v_idx, l_idx) in enumerate( zip(f.vertices, f.loop_indices))] fw('f') if faceuv: if EXPORT_NORMALS: for vi, v, li in f_v: fw(" %d/%d/%d" % ( totverts + v.index, totuvco + uv_face_mapping[f_index][vi], totno + loops_to_normals[li], )) # vert, uv, normal else: # No Normals for vi, v, li in f_v: fw(" %d/%d" % ( totverts + v.index, totuvco + uv_face_mapping[f_index][vi], )) # vert, uv face_vert_index += len(f_v) else: # No UV's if EXPORT_NORMALS: for vi, v, li in f_v: fw(" %d//%d" % (totverts + v.index, totno + loops_to_normals[li])) else: # No Normals for vi, v, li in f_v: fw(" %d" % (totverts + v.index)) fw('\n') subprogress2.step() # Write edges. if EXPORT_EDGES: for ed in edges: if ed.is_loose: fw('l %d %d\n' % (totverts + ed.vertices[0], totverts + ed.vertices[1])) # Make the indices global rather then per mesh totverts += len(me_verts) totuvco += uv_unique_count totno += no_unique_count # clean up ob_for_convert.to_mesh_clear() subprogress1.leave_substeps( "Finished writing geometry of '%s'." % ob_main.name) subprogress1.leave_substeps() # copy all collected files. io_utils.path_reference_copy(copy_set)
def load(context, filepath): """ Called by the user interface or another script. load_obj(path) - should give acceptable results. This function passes the file and sends the data off to be split into objects and then converted into mesh objects """ with ProgressReport(context.window_manager) as progress: progress.enter_substeps(1, "Importing MOTION.BIN %r..." % filepath) progress.enter_substeps(2, "Reading MOTION.BIN...") motn = MOTN() motn.read(filepath) progress.leave_substeps("Done.") # get the armature armature = bpy.context.scene.objects['ShenmueRig'] bpy.context.view_layer.objects.active = armature armature.select_set(True) bpy.ops.object.mode_set(mode='POSE') # set rotation mode to euler angle for all bones for bone in armature.pose.bones: bone.rotation_mode = "XYZ" progress.enter_substeps(2, "Loading sequences to selected rig...") for sequence in motn.sequences: # debug walk selection if sequence.name == "A_WALK_L_02": print("A_WALK_L_02\n") for bone_data in sequence.data.bone_keyframes: bone_id = str(IKBoneID(bone_data.bone_index)) print(bone_id) if bone_id in ShenmueRigMap: bone_name = ShenmueRigMap[bone_id] for bone in armature.pose.bones: if bone.name == bone_name: print(bone_id, bone_name) for pos_x in bone_data.pos_x: bone.location = [pos_x.value, 0.0, 0.0] bone.keyframe_insert(data_path="location", index=0, frame=pos_x.frame) for pos_y in bone_data.pos_z: bone.location = [0.0, pos_y.value, 0.0] bone.keyframe_insert(data_path="location", index=1, frame=pos_y.frame) for pos_z in bone_data.pos_y: bone.location = [0.0, 0.0, pos_z.value] bone.keyframe_insert(data_path="location", index=2, frame=pos_z.frame) for rot_x in bone_data.rot_x: bone.rotation_euler = [ rot_x.value, 0.0, 0.0 ] bone.keyframe_insert( data_path="rotation_euler", index=0, frame=rot_x.frame) for rot_y in bone_data.rot_z: bone.rotation_euler = [ 0.0, rot_y.value, 0.0 ] bone.keyframe_insert( data_path="rotation_euler", index=1, frame=rot_y.frame) for rot_z in bone_data.rot_y: bone.rotation_euler = [ 0.0, 0.0, rot_z.value ] bone.keyframe_insert( data_path="rotation_euler", index=2, frame=rot_z.frame) break progress.leave_substeps("Done.") progress.leave_substeps("Finished importing: %r" % filepath) return {'FINISHED'}
def load(self, context, filepath, *, import_skeleton=True, skeleton_auto_connect=True, import_animations=False, scale_factor=1.0, use_cycles=True, relpath=None, global_matrix=None): """ Called by the use interface or another script. load_cgf(path) - should give acceptable result. This function passes the file and sends the data off to be split into objects and then converted into mesh objects """ self.filepath = filepath self.armature_auto_connect = skeleton_auto_connect self.scale_factor = scale_factor if self.filepath.endswith('.caf'): self.load_animation() return {'FINISHED'} with ProgressReport(context.window_manager) as progress: progress.enter_substeps( 1, "Importing CGF %r ... relpath: %r" % (filepath, relpath)) if global_matrix is None: global_matrix = Matrix() time_main = time.time() b_mats = [] progress.enter_substeps(1, "Parsing CGF file ...") with open(filepath, 'rb') as f: data = CgfFormat.Data() # check if cgf file is valid try: data.inspect_version_only(f) except ValueError: # not a cgf file raise else: progress.enter_substeps(2, "Reading CGF %r ..." % filepath) data.read(f) print('Project root: %s' % self.project_root) progress.leave_substeps("Done reading.") progress.enter_substeps(3, "Parsing CGF %r ..." % filepath) if data.game == 'Crysis': print( '[WARNING]: Crysis import is very experimental, and is likely to fail' ) print('game: %s' % data.game) print('file type: 0x%08X' % data.header.type) print('version: 0x%08X' % data.version) print('user version: 0x%08X' % data.user_version) for i, chunk in enumerate(data.chunks): print('id %i: %s' % (i, chunk.__class__.__name__)) # TODO: fixed the scale correction scale_factor = self.get_global_scale(data) for chunk in data.chunks: chunk.apply_scale(1.0 / scale_factor) # import data progress.step("Done, making data into blender") progress.step("Done, loading materials and images ...") # TODO: create materials # Far Cry: iterate over all standard material chunks for chunk in data.chunks: # check chunk type if not isinstance(chunk, CgfFormat.MtlChunk): continue # multi material: skip if chunk.children or to_str(chunk.name).startswith('s_nouvmap') \ or chunk.type != CgfFormat.MtlType.STANDARD \ or self.get_material_name(chunk.name) is None: continue # single material b_mats.append((self.create_std_material(chunk, use_cycles=use_cycles), self.is_material_nodraw(chunk.name))) # Deselect all if bpy.ops.object.select_all.poll(): bpy.ops.object.select_all(action="DESELECT") scene = context.scene new_objects = {} # put new objects here armature_chunk = None node_transforms = {} # SPLIT_OB_OR_GROUP = bool(use_split_objects or use_split_groups) # Create meshes from the data, warning 'vertex_groups' wont suppot splitting #~ print(dataname, user_vnor, use_vtex) # parse bone list first. for chunk in data.chunks: if isinstance(chunk, CgfFormat.BoneNameListChunk): self.parse_bone_name_list(chunk) # parse bone data for chunk in data.chunks: if isinstance(chunk, CgfFormat.BoneAnimChunk): self.build_bone_infos(chunk) elif isinstance(chunk, CgfFormat.BoneInitialPosChunk): self.process_bone_initial_position(chunk) for chunk in data.chunks: if isinstance(chunk, CgfFormat.NodeChunk) and isinstance( chunk.object, CgfFormat.MeshChunk): self.dataname = to_str(chunk.name) self.create_mesh( new_objects, chunk.object, b_mats, self.dataname, ) node_transforms[chunk.object] = Matrix( chunk.transform.as_tuple()).transposed() self.mapping_vertex_group_weights(new_objects) elif import_skeleton and isinstance(chunk, CgfFormat.BoneAnimChunk): self.create_armatures(chunk, new_objects, scale_factor=scale_factor) # create new obj for (chk, obj) in new_objects.items(): if obj not in scene.objects.values(): scene.objects.link(obj) # we could apply this anywhere before scaling node_transform = node_transforms[ chk] if chk in node_transforms else None print('Node transform: %s' % node_transform) obj.matrix_world = global_matrix if node_transform: obj.matrix_world = obj.matrix_world * node_transform print('Apply obj %s\' matrix world.' % obj) scene.update() for i in scene.objects: i.select = False # deselect all objects # Deselect all if bpy.ops.object.select_all.poll(): bpy.ops.object.select_all(action="DESELECT") if len(new_objects): for obj in new_objects.values(): if obj.type == 'ARMATURE': obj.select = True scene.objects.active = obj break progress.leave_substeps("Done ...") if import_animations: progress.enter_substeps( 4, "Import animations by searching Cry action list file (CAL) ..." ) self.load_animations() progress.leave_substeps("Done, imported animations ...") progress.leave_substeps("Finished importing CGF %r ..." % filepath) return {'FINISHED'}
def write_file( filepath, objects, depsgraph, scene, EXPORT_TRI=False, EXPORT_EDGES=False, EXPORT_SMOOTH_GROUPS=False, EXPORT_SMOOTH_GROUPS_BITFLAGS=False, EXPORT_NORMALS=False, EXPORT_UV=True, EXPORT_MTL=True, EXPORT_APPLY_MODIFIERS=True, EXPORT_APPLY_MODIFIERS_RENDER=False, EXPORT_BLEN_OBS=True, EXPORT_GROUP_BY_OB=False, EXPORT_GROUP_BY_MAT=False, EXPORT_KEEP_VERT_ORDER=False, EXPORT_POLYGROUPS=False, EXPORT_CURVE_AS_NURBS=True, EXPORT_GLOBAL_MATRIX=None, EXPORT_PATH_MODE='AUTO', progress=ProgressReport(), EXPORT_VERTEX_COLORS=True, ): """ Basic write function. The context and options must be already set This can be accessed externaly eg. write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options. """ if EXPORT_GLOBAL_MATRIX is None: EXPORT_GLOBAL_MATRIX = Matrix() with ProgressReportSubstep(progress, 2, "WC1 Export path: %r" % filepath, "WC1 Export Finished") as subprogress1: with open(filepath, "wb") as fhnd: fw = fhnd.write # Write placeholder Header fw(get_binary_u64(0)) # Initialize totals, these are updated each object totverts = totmeshes = 0 face_vert_index = 1 copy_set = set() # Get all meshes subprogress1.enter_substeps(len(objects)) for i, ob_main in enumerate(objects): # ignore dupli children if ob_main.parent and ob_main.parent.instance_type in { 'VERTS', 'FACES' }: subprogress1.step("Ignoring %s, dupli child..." % ob_main.name) continue obs = [(ob_main, ob_main.matrix_world)] if ob_main.is_instancer: obs += [(dup.instance_object.original, dup.matrix_world.copy()) for dup in depsgraph.object_instances if dup.parent and dup.parent.original == ob_main] # ~ print(ob_main.name, 'has', len(obs) - 1, 'dupli children') subprogress1.enter_substeps(len(obs)) # write Header end_pos = fhnd.tell() fhnd.seek(0) fw(get_binary_u64(len(obs))) fhnd.seek(end_pos) for ob, ob_mat in obs: with ProgressReportSubstep(subprogress1, 5) as subprogress2: object_pos = fhnd.tell() # Write placeholder Object Header ## vertex count fw(get_binary_u64(0)) ## index count fw(get_binary_u64(0)) ## flags flags = 0 if EXPORT_VERTEX_COLORS: flags |= 1 << 0 fw(get_binary_u64(flags)) ob_for_convert = ob.evaluated_get( depsgraph ) if EXPORT_APPLY_MODIFIERS else ob.original try: me = ob_for_convert.to_mesh() except RuntimeError: me = None if me is None: continue # _must_ do this before applying transformation, else tessellation may differ if EXPORT_TRI: # _must_ do this first since it re-allocs arrays mesh_triangulate(me) me.transform(EXPORT_GLOBAL_MATRIX @ ob_mat) # If negative scaling, we have to invert the normals... if ob_mat.determinant() < 0.0: me.flip_normals() me_verts = me.vertices[:] me_cols = me.vertex_colors[:] # Make our own list so it can be sorted to reduce context switching face_index_pairs = [ (face, index) for index, face in enumerate(me.polygons) ] if not (len(face_index_pairs) + len(me.vertices) ): # Make sure there is something to write # clean up bpy.data.meshes.remove(me) continue # dont bother with this mesh. subprogress2.step() # Vert for v in me_verts: fw(get_binary_f64(v.co[0])) fw(get_binary_f64(-v.co[1])) fw(get_binary_f64(v.co[2])) subprogress2.step() for col_layer in me_cols: for col in col_layer.data: fw(get_binary_f64(col.color[0])) fw(get_binary_f64(col.color[1])) fw(get_binary_f64(col.color[2])) fw(get_binary_f64(col.color[3])) subprogress2.step() obj_indices = 0 for f, f_index in face_index_pairs: f_v = [(vi, me_verts[v_idx], l_idx) for vi, (v_idx, l_idx) in enumerate( zip(f.vertices, f.loop_indices))] for vi, v, li in f_v: fw(get_binary_u64(totverts + v.index)) obj_indices += 1 subprogress2.step() # Make the indices global rather then per mesh totverts += len(me_verts) # write object header end_pos = fhnd.tell() fhnd.seek(object_pos) fw(get_binary_u64(len(me_verts))) fw(get_binary_u64(obj_indices)) fhnd.seek(end_pos) # clean up ob_for_convert.to_mesh_clear() subprogress1.leave_substeps( "Finished writing geometry of '%s'." % ob_main.name) subprogress1.leave_substeps() subprogress1.step( "Finished exporting geometry, now exporting materials") # copy all collected files. io_utils.path_reference_copy(copy_set)
def load_ska_and_skm(filepath, context, IMPORT_CONSTRAIN_BOUNDS=10.0, IMAGE_SEARCH=True, APPLY_MATRIX=True, USE_INHERIT_ROTATION=True, USE_LOCAL_LOCATION=True, APPLY_ANIMATIONS=False, global_matrix=None): global SCN, ToEE_data_dir, progress # XXX # if BPyMessages.Error_NoFile(filepath): # return time1 = time.clock() # for timing the import duration # progress = ProgressReport(context.window_manager) with ProgressReport(context.window_manager) as progress: print("importing SKA: %r..." % (filepath), end="") # filepath = 'D:/GOG Games/ToEECo8/data/art/meshes/Monsters/Giants/Hill_Giants/Hill_Giant_2/Zomb_giant_2.SKA' ska_filepath = filepath skm_filepath = get_skm_filepath(ska_filepath) if bpy.ops.object.select_all.poll(): bpy.ops.object.select_all(action='DESELECT') ToEE_data_dir = get_ToEE_data_dir(filepath) print("Data dir: %s", ToEE_data_dir) # Read data into intermediate SkmFile and SkaFile objects skm_data = SkmFile() ska_data = SkaFile() progress.enter_substeps(5, "Reading SKM & SKA Files %r..." % skm_filepath) # SKM file print("Reading SKM data") progress.step() with open(skm_filepath, 'rb') as file: print('Opened file: ', skm_filepath) skm_data.read(file) # SKA file print("Reading SKA File %r..." % ska_filepath) progress.step() with open(ska_filepath, 'rb') as file: print('Opened file: ', ska_filepath) if APPLY_ANIMATIONS: ska_data.read(file) # fixme, make unglobal, clear in case object_dictionary.clear() object_matrix.clear() scn = context.scene # scn = bpy.data.scenes.active SCN = scn # SCN_OBJECTS = scn.objects # SCN_OBJECTS.selected = [] # de select all importedObjects = [] # Fill this list with objects progress.enter_substeps(3, "Converting SKM to Blender model...") skm_to_blender(skm_data, importedObjects, IMAGE_SEARCH) # In Blender 2.80 API new objects mast be linked not to the scene, but to the scene collections: view_layer = context.view_layer view_layer.update() # print(importedObjects) if global_matrix: print(global_matrix) for ob in importedObjects: if True: # ob.parent is None: ob.matrix_world = global_matrix else: ob.parent.matrix_world = ob.parent.matrix_world @ global_matrix # if True: # for ob in importedObjects: # if ob.type == 'MESH': # me = ob.data # me.transform(ob.matrix_local.inverted()) for ob in importedObjects: ob.select_set(True) progress.enter_substeps(1, "Converting SKA to Blender animations...") if APPLY_ANIMATIONS: ska_to_blender(ska_data, skm_data, importedObjects, USE_INHERIT_ROTATION, USE_LOCAL_LOCATION, APPLY_ANIMATIONS) # fixme, make unglobal object_dictionary.clear() object_matrix.clear() view_layer = context.view_layer view_layer.update() # Select all new objects. print(" done in %.4f sec." % (time.clock() - time1))
def _write(context, filepath, EXPORT_TRI, # ok EXPORT_EDGES, EXPORT_SMOOTH_GROUPS, EXPORT_SMOOTH_GROUPS_BITFLAGS, EXPORT_NORMALS, # ok EXPORT_UV, # ok EXPORT_MTL, EXPORT_APPLY_MODIFIERS, # ok EXPORT_APPLY_MODIFIERS_RENDER, # ok EXPORT_BLEN_OBS, EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_KEEP_VERT_ORDER, EXPORT_POLYGROUPS, EXPORT_CURVE_AS_NURBS, EXPORT_SEL_ONLY, # ok EXPORT_ANIMATION, EXPORT_GLOBAL_MATRIX, EXPORT_PATH_MODE, # Not used ): with ProgressReport(context.window_manager) as progress: base_name, ext = os.path.splitext(filepath) context_name = [base_name, '', '', ext] # Base name, scene name, frame number, extension depsgraph = context.evaluated_depsgraph_get() scene = context.scene # Exit edit mode before exporting, so current object states are exported properly. if bpy.ops.object.mode_set.poll(): bpy.ops.object.mode_set(mode='OBJECT') orig_frame = scene.frame_current # Export an animation? if EXPORT_ANIMATION: scene_frames = range(scene.frame_start, scene.frame_end + 1) # Up to and including the end frame. else: scene_frames = [orig_frame] # Dont export an animation. # Loop through all frames in the scene and export. progress.enter_substeps(len(scene_frames)) for frame in scene_frames: if EXPORT_ANIMATION: # Add frame to the filepath. context_name[2] = '_%.6d' % frame scene.frame_set(frame, subframe=0.0) if EXPORT_SEL_ONLY: objects = context.selected_objects else: objects = scene.objects full_path = ''.join(context_name) # erm... bit of a problem here, this can overwrite files when exporting frames. not too bad. # EXPORT THE FILE. progress.enter_substeps(1) write_file(full_path, objects, depsgraph, scene, EXPORT_TRI, EXPORT_EDGES, EXPORT_SMOOTH_GROUPS, EXPORT_SMOOTH_GROUPS_BITFLAGS, EXPORT_NORMALS, EXPORT_UV, EXPORT_MTL, EXPORT_APPLY_MODIFIERS, EXPORT_APPLY_MODIFIERS_RENDER, EXPORT_BLEN_OBS, EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_KEEP_VERT_ORDER, EXPORT_POLYGROUPS, EXPORT_CURVE_AS_NURBS, EXPORT_GLOBAL_MATRIX, EXPORT_PATH_MODE, progress, ) progress.leave_substeps() scene.frame_set(orig_frame, subframe=0.0) progress.leave_substeps()
def write_file( operator, filepath, objects, depsgraph, scene, EXPORT_GLOBAL_MATRIX=None, EXPORT_HAS_CLO=False, progress=ProgressReport(), ): if EXPORT_GLOBAL_MATRIX is None: EXPORT_GLOBAL_MATRIX = Matrix() def bx(bone, axis): blst = [ v_dict[v][axis] for v in v_dict if bone in [ v_dict[v]["B1"], v_dict[v]["B2"], v_dict[v]["B3"], v_dict[v] ["B4"] ] ] return [0] if len(blst) == 0 else blst with ProgressReportSubstep(progress, 2, "GR2 Export path: \'%s\'" % filepath, "GR2 Export Finished") as subprogress1: # Initialize totals, these are updated each object tot_v = tot_tex = tot_nor = 1 # Check the number of objects selected and if there is more than one, cancel the export if len(objects) != 1: operator.report( {'ERROR'}, ("Unable to complete export, %i objects selected. \n" "This addon only supports exporting one object at a time.") % len(objects)) return {'CANCELLED'} # Set-up dictionaries and lists to store or work with the mesh data v_dict = {} f_dict = {} # Collect the mesh data subprogress1.enter_substeps( 1, "Parsing the geometry data via Blender's Data API") for i, ob_main in enumerate(objects): obs = [(ob_main, ob_main.matrix_world)] for ob, ob_mat in obs: with ProgressReportSubstep(subprogress1, 6) as subprogress2: nor_unique_count = tan_unique_count = tex_unique_count = 0 ob_for_convert = ob.original try: me = ob_for_convert.to_mesh() except RuntimeError: me = None if me is None: continue me.transform(EXPORT_GLOBAL_MATRIX @ ob_mat) # If negative scaling, we have to invert the normals... if ob_mat.determinant() < 0.0: me.flip_normals() f_tex = len(me.uv_layers) > 0 tex_layer = me.uv_layers.active.data[:] if f_tex else None me_v = me.vertices[:] # Make our own list so it can be sorted to reduce context switching face_index_pairs = [ (face, index) for index, face in enumerate(me.polygons) ] # Make sure there is something to write if not len(face_index_pairs) + len(me.vertices): # Clean up ob_for_convert.to_mesh_clear() if face_index_pairs: me.calc_normals_split() me_lp = me.loops # Gather the materials used by this model me_mats = me.materials[:] mat_names = [m.name if m else None for m in me_mats] num_mats = len(mat_names) face_mats = {} for face in me.polygons: face_mats.setdefault(face.material_index, []).append(face.index) m_idx = {} for j in range(num_mats): m_idx[j] = len( face_mats[j]) if j in face_mats.keys() else 0 # Mesh Name obnamestring = name_compat(ob.name) # Avoid bad index errors if not me_mats: me_mats = [None] mat_names = [name_compat(None)] subprogress2.step() # 1 # VERTEX COORDS for v in me_v: v_dict[v.index] = {} v_dict[v.index]['X'] = v.co[0] v_dict[v.index]['Y'] = v.co[1] v_dict[v.index]['Z'] = v.co[2] subprogress2.step() # BONE WEIGHTS v_bon = {} boneNames = ob.vertex_groups.keys() if boneNames: # Create a dictionary keyed by face id and listing, for each vertex, and the vertex # groups it belongs to. vgroupsMap = [[] for _i in range(len(me_v))] for v_idx, v_ls in enumerate(vgroupsMap): v_bon[v_idx] = {} for g in me_v[v_idx].groups: v_bon[v_idx][g.group] = g.weight bon_names = {g.index: g.name for g in ob.vertex_groups} else: bon_names = {0: obnamestring} num_b = len(bon_names) subprogress2.step() # 2 # NORMALS COORDS v_nor = {} nor_key = nor_val = None normals_to_idx = {} nor_get = normals_to_idx.get loops_to_normals = [0] * len(me_lp) for f, f_index in face_index_pairs: for l_idx in f.loop_indices: nor_key = (me_lp[l_idx].normal[0], me_lp[l_idx].normal[1], me_lp[l_idx].normal[2]) nor_val = nor_get(nor_key) if nor_val is None: nor_val = normals_to_idx[ nor_key] = nor_unique_count v_nor[nor_val] = nor_key nor_unique_count += 1 loops_to_normals[l_idx] = nor_val del normals_to_idx, nor_get, nor_key, nor_val subprogress2.step() # 3 # TANGENTS COORDS ctx = bpy.context.active_object.data ctx.calc_tangents() v_tan = {} v_bts = {} tan_key = tan_val = None tangents_to_idx = {} tan_get = tangents_to_idx.get loops_to_tangents = [0] * len(ctx.loops) # Loop faces for face in ctx.polygons: # Loop over face loop for l_idx in [ctx.loops[i] for i in face.loop_indices]: tan_key = (l_idx.tangent[0], l_idx.tangent[1], l_idx.tangent[2]) bts_key = l_idx.bitangent_sign tan_val = tan_get(tan_key) if tan_val is None: tan_val = tangents_to_idx[ tan_key] = tan_unique_count v_tan[tan_val] = tan_key v_bts[tan_val] = bts_key tan_unique_count += 1 loops_to_tangents[l_idx.index] = tan_val del tangents_to_idx, tan_get, tan_key, tan_val subprogress2.step() # 4 # UV TEXTURE COORDS v_tex = {} if f_tex: # In case removing some of these dont get defined. tex = f_index = tex_index = tex_key = tex_val = tex_ls = None tex_face_mapping = [None] * len(face_index_pairs) tex_dict = {} tex_get = tex_dict.get for f, f_index in face_index_pairs: tex_ls = tex_face_mapping[f_index] = [] for tex_index, l_index in enumerate( f.loop_indices): tex = tex_layer[l_index].uv # Include the vertex index in the key so we don't share UV's between vertices tex_key = me_lp[l_index].vertex_index, (tex[0], tex[1]) tex_val = tex_get(tex_key) if tex_val is None: tex_val = tex_dict[ tex_key] = tex_unique_count v_tex[tex_val] = tex_key tex_unique_count += 1 tex_ls.append(tex_val) del tex_dict, tex, f_index, tex_index, tex_ls, tex_get, tex_key, tex_val # Only need tex_unique_count and uv_face_mapping subprogress2.step() # 5 # FACES f_lst = [] for f, f_index in face_index_pairs: f_v = [(vi, me_v[v_idx], l_idx) for vi, (v_idx, l_idx) in enumerate( zip(f.vertices, f.loop_indices))] for vi, v, li in f_v: wn = 1 if boneNames: if v.index in v_bon: for x, y in sorted(v_bon[v.index].items(), key=lambda xy: (xy[1], xy[0]), reverse=True): v_dict[v.index]["W" + str(wn)] = y wn += 1 while wn <= 4: v_dict[v.index]["W" + str(wn)] = float(0.0) wn += 1 bn = 1 if boneNames: for x, y in sorted(v_bon[v.index].items(), key=lambda xy: (xy[1], xy[0]), reverse=True): v_dict[v.index]["B" + str(bn)] = x bn += 1 while bn <= 4: v_dict[v.index]["B" + str(bn)] = v_dict[v.index]["B1"] if \ "B1" in v_dict[v.index].keys() else int(0) bn += 1 v_dict[v.index]["Nx"] = v_nor[ loops_to_normals[li]][0] v_dict[v.index]["Ny"] = v_nor[ loops_to_normals[li]][1] v_dict[v.index]["Nz"] = v_nor[ loops_to_normals[li]][2] v_dict[v.index]["Ns"] = v_bts[ loops_to_tangents[li]] v_dict[v.index]["Tx"] = v_tan[ loops_to_tangents[li]][0] v_dict[v.index]["Ty"] = v_tan[ loops_to_tangents[li]][1] v_dict[v.index]["Tz"] = v_tan[ loops_to_tangents[li]][2] v_dict[v.index]["Ts"] = int( 0 - v_bts[loops_to_tangents[li]]) v_dict[v.index]["U"] = v_tex[ tex_face_mapping[f_index][vi]][1][0] v_dict[v.index]["V"] = v_tex[ tex_face_mapping[f_index][vi]][1][1] f_lst.append(v.index) f_dict[f_index] = list(f_lst) f_lst.clear() del v_bon, v_nor, v_tan, v_bts, v_tex subprogress2.step() # 6 # Make the indices global rather then per mesh tot_v += len(me_v) tot_tex += tex_unique_count tot_nor += nor_unique_count # List of keys in v_dict if not boneNames: keys = [ "Nx", "Ny", "Nz", "Ns", "Tx", "Ty", "Tz", "Ts", "U", "V" ] else: keys = [ "W1", "W2", "W3", "W4", "B1", "B2", "B3", "B4", "Nx", "Ny", "Nz", "Ns", "Tx", "Ty", "Tz", "Ts", "U", "V" ] # Totals num_v = len(v_dict) num_f = len(f_dict) min_x = min([[iv for ik, iv in ov.items()][0] for ok, ov in v_dict.items()]) min_y = min([[iv for ik, iv in ov.items()][1] for ok, ov in v_dict.items()]) min_z = min([[iv for ik, iv in ov.items()][2] for ok, ov in v_dict.items()]) max_x = max([[iv for ik, iv in ov.items()][0] for ok, ov in v_dict.items()]) max_y = max([[iv for ik, iv in ov.items()][1] for ok, ov in v_dict.items()]) max_z = max([[iv for ik, iv in ov.items()][2] for ok, ov in v_dict.items()]) # clean up ob_for_convert.to_mesh_clear() # If the file doesn't exist, create it, if it does, clear it subprogress1.enter_substeps( 1, "Parsing complete, writing the geometry data to file.") open(filepath, "wb") with open(filepath, "rb+") as f: fw = f.write with ProgressReportSubstep(subprogress1, 6) as subprogress2: # Pos 0x00 # Write the MAGIC bytes fw(b'GAWB') # Write the Major Version fw(uint32(4)) # Write the Minor Version fw(uint32(3)) # Write the BNRY / LTLE offset off_0C0 = f.tell() fw(uint32(0)) # We'll populate this later subprogress2.step() # 1 # Pos 0x10 # Write the number of cached offsets off_010 = f.tell() fw(uint32(0)) # We'll populate this later # Write the type of GR2 file fw(uint32(1)) if EXPORT_HAS_CLO else fw(uint32(0)) # Write the number of meshes fw(uint16(1)) # Write the number of materials fw(uint16(num_mats)) # If this is a skeleton file, how many bones are there? fw(uint16(0)) # 0 - This isn't a skeleton file # Write the number of attachments # TODO Figure out how to handle attachments, 0 for now. fw(uint16(0)) # Write 16 x 00 bytes fw(uint32(0) + uint32(0) + uint32(0) + uint32(0)) subprogress2.step() # 2 # Pos 0x30 fw(float32(min_x)) # Min X fw(float32(min_y)) # Min Y fw(float32(min_z)) # Min Z fw(float32(1)) # Always 00 00 80 3F fw(float32(max_x)) # Max X fw(float32(max_y)) # Max Y fw(float32(max_z)) # Max Z fw(float32(1)) # Always 00 00 80 3F subprogress2.step() # 3 # Pos 0x50 off_050 = f.tell() # Write the offset of the cached offsets section fw(uint32(0)) # We'll populate this later # Write the offset of the mesh header off_054 = f.tell() fw(uint32(112)) # Write the offset of the material names offsets section off_058 = f.tell() fw(uint32(112 + 48 + (num_mats * 48))) # Write 4 x 00 fw(uint32(0)) # Write the offset of the attachments section # TODO Figure out how to handle attachments, 0 for now. fw(uint32(0)) # Write 00 byte until pos / 16 = int fw(zero_padding(f.tell())) subprogress2.step() # 4 # Pos 0x70 off_070 = f.tell() # Write the mesh headers # Write the offset of the mesh name fw(uint32(0)) # We'll populate this later # Write bitFlag 1, 0 unles bones = 0, then 128 fw(uint32(0)) if boneNames else fw(uint32(128)) # Write the number of pieces that make-up the object fw(uint16(num_mats)) # Write the number of bones used by this mesh fw(uint16(num_b)) # Write bitFlag 2 fw(uint8(47)) fw(uint8(1)) # Write the number of bytes used for each vertex fw(uint16(32 if boneNames else 24)) # Write the number of vertices fw(uint32(len(me_v))) # Write the number of indices (3 x number of faces) fw(uint32(num_f * 3)) # Write the offset of the mesh vertices section fw(uint32(160 + (num_mats * 48) + calc_padding(num_mats * 4))) # Write the offset of the mesh piece headers fw(uint32(160)) # Write the offset of the mesh faces section fw( uint32(160 + (num_mats * 48) + calc_padding(num_mats * 4) + (num_v * (32 if boneNames else 24)))) # Write the offset of the mesh bone section fw( uint32(160 + (num_mats * 48) + calc_padding(num_mats * 4) + (num_v * (32 if boneNames else 24)) + calc_padding(num_f * 6))) # Write 00 byte until pos / 16 = int fw(zero_padding(f.tell())) subprogress2.step() # 5 # Write the mesh piece headers based on materials off_piece = f.tell() for piece in range(num_mats): # Write the starting offset for the faces of this piece if piece == 0: fw(uint32(0)) else: i = piece i -= 1 fw(uint32(m_idx[i])) # Write the number of faces used by this piece / material fw(uint32(m_idx[piece])) # Write the id of the material used by this piece if num_mats < 0: # If this piece has no material return int32: -1 fw(uint32(4294967295)) fw(uint32(4294967295)) else: # Otherwise return the material id fw(uint32(piece)) fw(uint32(piece)) # Write the bounding box for this piece # TODO Figure out how to do this, for now use the global bounding box and hope for the best. fw(float32(min_x)) # Min X fw(float32(min_y)) # Min Y fw(float32(min_z)) # Min Z fw(float32(1)) # Always 00 00 80 3F fw(float32(max_x)) # Max X fw(float32(max_y)) # Max Y fw(float32(max_z)) # Max Z fw(float32(1)) # Always 00 00 80 3F subprogress2.step() # 6 # Write the offset for the name of each material off_mat_names = f.tell() for mat in range(num_mats): fw(uint32(0)) # We'll populate this later # Write 00 byte until pos / 16 = int fw(zero_padding(f.tell())) # Write the attachments # TODO Figure out how to handle attachments. subprogress2.step() # 7 # Write the vertices off_verts = f.tell() for v in v_dict: fw(float32(v_dict[v]["X"])) # X fw(float32(v_dict[v]["Y"])) # Y fw(float32(v_dict[v]["Z"])) # Z if all(key in v_dict[v] for key in keys): if boneNames: # Only dynamic models have bones fw(uint8(int(v_dict[v]["W1"] * 255))) fw(uint8(int(v_dict[v]["W2"] * 255))) fw(uint8(int(v_dict[v]["W3"] * 255))) fw(uint8(int(v_dict[v]["W4"] * 255))) fw(uint8(v_dict[v]["B1"])) fw(uint8(v_dict[v]["B2"])) fw(uint8(v_dict[v]["B3"])) fw(uint8(v_dict[v]["B4"])) fw(float8(v_dict[v]["Nx"])) # X fw(float8(v_dict[v]["Ny"])) # Y fw(float8(v_dict[v]["Nz"])) # Z fw(float8(v_dict[v]["Ns"])) # Binormal sign? fw(float8(v_dict[v]["Tx"])) # X fw(float8(v_dict[v]["Ty"])) # Y fw(float8(v_dict[v]["Tz"])) # Z fw(float8(v_dict[v]["Ts"])) # Bitangent sign? fw(float16(v_dict[v]["U"])) # U fw(float16(1 - v_dict[v]["V"])) # V else: fw(bytes(20)) # Just in case someone is dumb! subprogress2.step() # 8 # Write the face indices off_faces = f.tell() for face in range(num_f): fw(uint16(f_dict[face][0])) # v1 fw(uint16(f_dict[face][1])) # v2 fw(uint16(f_dict[face][2])) # v3 fw(zero_padding(f.tell())) subprogress2.step() # 9 # Write the bones off_bones = f.tell() if not boneNames: # This is a static model, which only have 1 root/default bone fw(uint32(calc_padding(off_bones + (num_b * 28)))) fw(float32(min_x)) # Min X fw(float32(min_y)) # Min Y fw(float32(min_z)) # Min Z fw(float32(max_x)) # Max X fw(float32(max_y)) # Max Y fw(float32(max_z)) # Max Z else: # This is a dynamic model, which have 1 or more skeleton bones bn = 0 for bo in range(num_b): if bo == 0: fw( uint32( calc_padding(off_bones + (num_b * 28)) + int(len(obnamestring) + 1) + len(''.join(mat_names)) + (1 * num_mats))) else: fw( uint32( calc_padding(off_bones + (num_b * 28)) + int(len(obnamestring) + 1) + len(''.join(mat_names)) + (1 * num_mats) + bn)) bn += int(len(bon_names[bo]) + 1) fw(float32(min(bx(bo, "X")))) # Min X Coord fw(float32(min(bx(bo, "Y")))) # Min Y Coord fw(float32(min(bx(bo, "Z")))) # Min Z Coord fw(float32(max(bx(bo, "X")))) # Max X Coord fw(float32(max(bx(bo, "Y")))) # Max Y Coord fw(float32(max(bx(bo, "Z")))) # Max Z Coord fw(zero_padding(f.tell())) subprogress2.step() # 10 # Write the strings off_mesh_string = f.tell() fw(bytes(obnamestring, 'utf-8')) # Mesh Name fw(uint8(0)) # Terminate with 1 x 00 byte off_mat_strings = f.tell() for m in mat_names: fw(bytes(m, 'utf-8')) # Material name fw(uint8(0)) # Terminate with 1 x 00 byte off_bone_strings = f.tell() if boneNames: for b in bon_names: fw(bytes(bon_names[b], 'utf-8')) # Bone name fw(uint8(0)) # Terminate with 1 x 00 byte fw(zero_padding(f.tell())) subprogress2.step() # 11 # Write the cached offsets off_cache = f.tell() fw(uint32(off_050)) fw(uint32(f.tell() - 4)) fw(uint32(off_054)) fw(uint32(off_070)) fw(uint32(off_058)) fw(uint32(off_mat_names)) fw(uint32(off_070)) fw(uint32(off_mesh_string)) fw(uint32(off_070 + 24)) fw(uint32(off_verts)) fw(uint32(off_070 + 28)) fw(uint32(off_piece)) fw(uint32(off_070 + 32)) fw(uint32(off_faces)) fw(uint32(off_070 + 36)) fw(uint32(off_bones)) m_len = 0 for m in range(num_mats): # First we write the offset address fw(uint32(off_mat_names + (4 * m))) # Second we write the value if m == 0: fw(uint32(off_mat_strings)) else: m_len += (len(mat_names[m - 1]) + 1) fw(uint32(off_mat_strings + m_len)) b_len = 0 for b in range(num_b): # First we write the offset address fw(uint32(off_bones + (28 * b))) # Second we write the value if b == 0: fw(uint32(off_bone_strings)) else: b_len += (len(bon_names[b - 1]) + 1) fw(uint32(off_bone_strings + b_len)) fw(zero_padding(f.tell())) subprogress2.step() # 12 # Write the BNRY/LTLE section as 32 x 00 byte off_BNRY = f.tell() fw(uint32(0) + uint32(0) + uint32(0) + uint32(0)) fw(uint32(0) + uint32(0) + uint32(0) + uint32(0)) subprogress2.step() # 13 # Write the bounding box of each mesh fw(float32(min_x)) # Min X fw(float32(min_y)) # Min Y fw(float32(min_z)) # Min Z fw(float32(max_x)) # Max X fw(float32(max_y)) # Max Y fw(float32(max_z)) # Max Z fw(float32(0)) # 4 x 00 byte for padding subprogress2.step() # 14 fw(b'EGCD') fw(uint32(5)) fw(uint32(off_BNRY)) # offset of the BNRY/LTLE section subprogress2.step() # 15 # Go back and write offsets f.seek(off_0C0) fw(uint32(off_BNRY)) f.seek(off_010) fw(uint32(7 + (1 if num_mats > 0 else 0) + num_mats + num_b)) f.seek(off_050) fw(uint32(off_cache)) f.seek(off_070) fw(uint32(off_mesh_string)) f.seek(off_mat_names) mn = 0 for m in range(num_mats): if m == 0: fw(uint32(off_mat_strings)) else: fw(uint32(off_mat_strings + mn)) mn += int(len(mat_names[m]) + 1)
def write_loop(context, filepath, ARMATURE, ANIMATION, PHYSICS, MATERIALS, BINARY) -> None: """ Func for looping write calls + setting up env for writing """ with ProgressReport(context.window_manager) as progress: base_name, ext = os.path.splitext(filepath) # base_name = name_convert(base_name) << this 'ere probably causes problems with windows. full_path = [base_name, '', '', ext] # Base name, scene name, frame number, extension bpy_depsgraph = context.depsgraph bpy_scene = context.scene # Dirty (!) fix for the 'object with/ mesh' problem tmp_bo = list() bpy_objects = bpy_scene.objects for tmp_obj in bpy_objects: print(str(type(tmp_obj.data))) print(type(tmp_obj.data)) if (str(type(tmp_obj.data)) == "<class 'bpy_types.Mesh'>"): tmp_bo.append(tmp_obj) tmp_obj = None bpy_objects = tmp_bo tmp_bo = None if not len(bpy_objects): raise Exception("There is nothing to export.") # Exit edit mode before exporting, so current object states are exported properly. if bpy.ops.object.mode_set.poll(): bpy.ops.object.mode_set(mode='OBJECT') # We only want to export one frame currently, so: frame = bpy_scene.frame_current bpy_scene.frame_set(frame, subframe=0.0) # # ## pull data from bpy, create file paths, ect ## # og_scene = ogre_types.Scene(bpy_scene) og_meshes = dict() og_materials = dict() # og_armatures = dict() path_scene = "" path_mesh = "" bpy_exported_meshes = dict() og_exported_meshes = dict() # progress: number of objects to export progress.enter_substeps(len(bpy_objects)) # iterate through our objects for obj in bpy_objects: # !! "dependancies" mesh_name = str(obj.data.name) material = obj.active_material if material is None: try: material = obj.material_slots.values()[0].material except ReferenceError( "Material not found. Do your objects have materials, that use nodes?" ) as e: raise e material_name = material.name # skeleton_name = # !! calculate paths # basically adding the parts of full_path together path_scene = full_path[0] + full_path[1] + full_path[ 2] + full_path[3] path_mesh = full_path[0] + full_path[1] + full_path[ 2] + "_" + mesh_name + ".mesh" path_material = full_path[0] + full_path[1] + full_path[ 2] + ".material" # path_armature = full_path[0] + full_path[1] + full_path[2] + "_" + skeleton_name + ".skeleton" # !! Mesh # MESH EXPORT GUARD tmp_mesh = obj.to_mesh(bpy_depsgraph, True) cur_mesh = None # for each mesh in the exported meshes (and materials) for key_path_mesh in bpy_exported_meshes: # we get the (already done) mesh data and material name (unique ID) cmp_mesh, cmp_mat = bpy_exported_meshes[key_path_mesh] # if we find that both match the current object's (dupli obj) if (cmp_mesh.unit_test_compare(mesh=tmp_mesh) == "Same") and (cmp_mat == material_name): # then we redirect to said (already exported) mesh data path_mesh = key_path_mesh cur_mesh = og_exported_meshes[key_path_mesh] # modify mesh_name here to avoid problems with nodes in .scene junk, mesh_name = os.path.split(path_mesh) # if we DIDN'T find a siutable (duplicate) datablock if cur_mesh is None: # we put the current contender into the 'exported' container (along with its material) bpy_exported_meshes[path_mesh] = tmp_mesh, material_name # then get it's data. we need data. og_exported_meshes[path_mesh] = cur_mesh = ogre_types.Mesh( obj, bpy_depsgraph, ARMATURE, ANIMATION) # modify mesh_name here to avoid problems with nodes in .scene junk, mesh_name = os.path.split(path_mesh) # !! Material if MATERIALS: # materials only guard against one material twice as they have different logic than meshes. # which is kinda bs. if material_name not in og_materials.keys(): og_materials[material_name] = ogre_types.Material(material) # !! put collected stuff into lists for writing cur_node = ogre_types.Node(obj, meshfile=mesh_name, PHYSICS=PHYSICS) og_meshes[path_mesh] = cur_mesh og_scene.add_node(cur_node) # progress: step per each object progress.step() # progress: first batch done (collection of data) progress.leave_substeps() # # ## create files, serialize xml, ect ## # # initialize the xml-builder for scene xn_scene = zxml.XMLnode() # and the serializer seri = zxml.XMLserializer() # ## .scene ## xn_scene.append( "scene", { "export_time": og_scene.export_time, "exported_by": og_scene.exported_by, "formatVersion": og_scene.formatVersion, "previous_export_time": og_scene.previous_export_time }) xn_scene.add("nodes", {}) first = True for node in og_scene.nodes: if first: xn_scene.add("node", {"name": node.name}) first = False else: xn_scene.append("node", {"name": node.name}) xn_scene.add("position", node.posd) xn_scene.append("rotation", node.quad) xn_scene.append("scale", node.scaled) xn_scene.append("game", {}) xn_scene.add("sensors", {}) xn_scene.append("actuators", {}) xn_scene.pointer_up() xn_scene.append("entity", node.ent_dict) xn_scene.pointer_up() xn_scene.pointer_up() if MATERIALS: xn_scene.add("externals", {}) xn_scene.add("item", {"type": "material"}) xn_scene.add("file", {"name": path_material}) xn_scene.pointer_up() xn_scene.pointer_up() xn_scene.pointer_up() xn_scene.add("environment", {}) col = og_scene.colourAmbient xn_scene.add("colourAmbient", {"r": col[0], "g": col[1], "b": col[2]}) col = og_scene.colourBackground xn_scene.append("colourBackground", { "r": col[0], "g": col[1], "b": col[2] }) col = og_scene.colourDiffuse xn_scene.append("colourDiffuse", { "r": col[0], "g": col[1], "b": col[2] }) seri.write_file(path_scene, graph=xn_scene.graph) # ## .material ## if MATERIALS: # progress: cycle thru materials progress.enter_substeps(len(og_materials)) mn_mat = zmat.MATnode() seri_mat = zmat.MATserializer() mn_mat.bracket("material", "_missing_material_") mn_mat.entry("receive_shadows", "off") mn_mat.bracket("technique", "") mn_mat.bracket("pass", "") mn_mat.entry("ambient", "0.1 0.1 0.1 1.0") mn_mat.entry("diffuse", "0.8 0.0 0.0 1.0") mn_mat.entry("specular", "0.5 0.5 0.5 1.0 12.5") mn_mat.entry("emissive", "0.3 0.3 0.3 1.0") mn_mat.pointer_reset() for material_name in og_materials: og_material = og_materials[material_name] mn_mat.bracket("material", og_material.name) mn_mat.entry("receive_shadows", og_material.receive_shadows) mn_mat.bracket("technique", "") mn_mat.bracket("pass", og_material.name) mn_mat.entry("ambient", og_material.ambient) mn_mat.entry("diffuse", og_material.diffuse) mn_mat.entry("specular", og_material.specular) mn_mat.entry("emissive", og_material.emissive) # like hell am I gonna write down all that once again for et in og_material.pass_dict: at = og_material.pass_dict[et] mn_mat.entry(et, at) # fk no. if og_material.tu_dict["texture"] is not None: path_img = og_material.tu_dict["texture"] tmp = bpy.path.basename(path_img) og_material.tu_dict["texture"] = tmp src = bpy.path.abspath(path_img) dst = os.path.join(os.path.dirname(path_material), tmp) if src != dst: shutil.copyfile(src, dst) mn_mat.bracket("texture_unit", "") for et in og_material.tu_dict: at = og_material.tu_dict[et] mn_mat.entry(et, at) mn_mat.pointer_reset() # progress: step per each mat progress.step() # progress: done with mats progress.leave_substeps() seri_mat.write_file(path_material, mn_mat.graph) # ## .xml.mesh, .mesh ## # progress: go for meshes if BINARY: # binary takes two steps progress.enter_substeps(len(og_meshes) * 2) else: # otherwise we just have one progress.enter_substeps(len(og_meshes)) for path_mesh in og_meshes: og_mesh = og_meshes[path_mesh] # initialize xml-builder xn_mesh = zxml.XMLnode() # Roots xn_mesh.append("mesh", {}) xn_mesh.add("sharedgeometry", {"vertexcount": og_mesh.vertexcount}) xn_mesh.add("vertexbuffer", og_mesh.attr_dict) # Vertices first = True for vert in og_mesh.vertexlist: if first: xn_mesh.add("vertex", {}) first = False else: xn_mesh.append("vertex", {}) xn_mesh.add("position", vert.posd) xn_mesh.append("normal", vert.nord) xn_mesh.append("texcoord", vert.uvd) xn_mesh.append("tangent", vert.tand) xn_mesh.append("binormal", vert.bind) xn_mesh.pointer_up() xn_mesh.pointer_up() xn_mesh.pointer_up() xn_mesh.append("submeshes", {}) xn_mesh.add( "submesh", { "material": og_mesh.submesh_material, "operationtype": "triangle_list", "use32bitindexes": "False", "usesharedvertices": "true" }) xn_mesh.add("faces", {"count": len(og_mesh.submesh_list)}) # Tris first = True for tri in og_mesh.submesh_list: if first: xn_mesh.add("face", { "v1": tri[0], "v2": tri[1], "v3": tri[2] }) first = False else: xn_mesh.append("face", { "v1": tri[0], "v2": tri[1], "v3": tri[2] }) xn_mesh.pointer_up() xn_mesh.pointer_up() xn_mesh.pointer_up() xn_mesh.append("submeshnames", {}) xn_mesh.add("submesh", { "index": 0, "name": og_mesh.submesh_material }) xn_mesh.pointer_up() seri.write_file(path_mesh + ".xml", graph=xn_mesh.graph) # progress: done with a mesh progress.step() # ## .mesh ## # Binary creation: if BINARY: # get current path fn = inspect.getframeinfo(inspect.currentframe()).filename path = os.path.dirname(os.path.abspath(fn)) # construct command oxt_cmd = path + "/ogrexmltools/OgreXMLConverter.exe -d3d -q " + path_mesh + ".xml " + path_mesh # print("XML_Converter cmd: " + oxt_cmd) # execute command oxt_proc = subprocess.Popen(oxt_cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) # print converter output out, err = oxt_proc.communicate() out = out.decode() err = err.decode() print(out) print(err) # progress: done with a mesh's binary progress.step() # progress: done with meshes progress.leave_substeps()