def write(filename, objects,\ EXPORT_NORMALS_HQ=False,\ EXPORT_MTL=True, EXPORT_COPY_IMAGES=False,\ EXPORT_APPLY_MODIFIERS=True, EXPORT_BLEN_OBS=True,\ EXPORT_GROUP_BY_OB=False): ''' Basic write function. The context and options must be alredy set This can be accessed externaly eg. write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options. ''' def veckey3d(v): return round(v.x, 6), round(v.y, 6), round(v.z, 6) def veckey2d(v): return round(v.x, 6), round(v.y, 6) print 'WTF Export path: "%s"' % filename temp_mesh_name = '~tmp-mesh' time1 = sys.time() scn = Scene.GetCurrent() file = open(filename, "w") file.write('<?xml version="1.0"?>\n') file.write('<OPEN_TRACK>\n') # Write Header # file.write('\n<!--\n' # + ' Blender3D v%s WTF File: %s\n' % (Blender.Get('version'), Blender.Get('filename').split('/')[-1].split('\\')[-1] ) # + ' www.blender3d.org\n' # + '-->\n\n') # Get the container mesh. - used for applying modifiers and non mesh objects. containerMesh = meshName = tempMesh = None for meshName in Blender.NMesh.GetNames(): if meshName.startswith(temp_mesh_name): tempMesh = Mesh.Get(meshName) if not tempMesh.users: containerMesh = tempMesh if not containerMesh: containerMesh = Mesh.New(temp_mesh_name) del meshName del tempMesh # Initialize totals, these are updated each object totverts = totuvco = totno = 0 face_vert_index = 0 globalNormals = {} file.write('\n<library_objects>\n') # Get all meshs for ob_main in objects: obnamestring = fixName(ob_main.name) file.write('\t<object id="%s">\n' % obnamestring) # Write Object name for ob, ob_mat in BPyObject.getDerivedObjects(ob_main): # Will work for non meshes now! :) # getMeshFromObject(ob, container_mesh=None, apply_modifiers=True, vgroups=True, scn=None) me = BPyMesh.getMeshFromObject(ob, containerMesh, EXPORT_APPLY_MODIFIERS, False, scn) if not me: file.write('\t\t<loc>%.6f %.6f %.6f</loc>\n' % tuple(ob_main.loc)) # Write Object name file.write('\t\t<rot>%.6f %.6f %.6f</rot>\n' % tuple(ob_main.rot)) # Write Object name continue faceuv = me.faceUV # We have a valid mesh if me.faces: # Add a dummy object to it. has_quads = False for f in me.faces: if len(f) == 4: has_quads = True break if has_quads: oldmode = Mesh.Mode() Mesh.Mode(Mesh.SelectModes['FACE']) me.sel = True tempob = scn.objects.new(me) me.quadToTriangle(0) # more=0 shortest length oldmode = Mesh.Mode(oldmode) scn.objects.unlink(tempob) Mesh.Mode(oldmode) # Make our own list so it can be sorted to reduce context switching faces = [f for f in me.faces] edges = me.edges if not (len(faces) + len(edges) + len(me.verts)): # Make sure there is somthing to write continue # dont bother with this mesh. me.transform(ob_mat) # High Quality Normals if faces: if EXPORT_NORMALS_HQ: BPyMesh.meshCalcNormals(me) else: # transforming normals is incorrect # when the matrix is scaled, # better to recalculate them me.calcNormals() # # Crash Blender #materials = me.getMaterials(1) # 1 == will return None in the list. materials = me.materials materialNames = [] materialItems = materials[:] if materials: for mat in materials: if mat: # !=None materialNames.append(mat.name) else: materialNames.append(None) # Cant use LC because some materials are None. # materialNames = map(lambda mat: mat.name, materials) # Bug Blender, dosent account for null materials, still broken. # Possible there null materials, will mess up indicies # but at least it will export, wait until Blender gets fixed. materialNames.extend((16 - len(materialNames)) * [None]) materialItems.extend((16 - len(materialItems)) * [None]) # Sort by Material, then images # so we dont over context switch in the obj file. if faceuv: try: faces.sort(key=lambda a: (a.mat, a.image, a.smooth)) except: faces.sort(lambda a, b: cmp((a.mat, a.image, a.smooth), (b.mat, b.image, b.smooth))) elif len(materials) > 1: try: faces.sort(key=lambda a: (a.mat, a.smooth)) except: faces.sort(lambda a, b: cmp((a.mat, a.smooth), (b.mat, b.smooth))) else: # no materials try: faces.sort(key=lambda a: a.smooth) except: faces.sort(lambda a, b: cmp(a.smooth, b.smooth)) # Set the default mat to no material and no image. contextMat = ( 0, 0 ) # Can never be this, so we will label a new material teh first chance we get. contextSmooth = None # Will either be true or false, set bad to force initialization switch. if len(faces) > 0: file.write('\t\t<mesh>\n') else: file.write('\t\t<curve>\n') vertname = "%s-Vertices" % obnamestring vertarrayname = "%s-Array" % vertname normname = "%s-Normals" % obnamestring normarrayname = "%s-Array" % normname texname = "%s-TexCoord" % obnamestring texarrayname = "%s-Array" % texname # Vert file.write('\t\t\t<float_array count="%d" id="%s">' % (len(me.verts), vertarrayname)) for v in me.verts: file.write(' %.6f %.6f %.6f' % tuple(v.co)) file.write('</float_array>\n') file.write('\t\t\t<vertices id="%s" source="#%s" />\n' % (vertname, vertarrayname)) # UV if faceuv: file.write('\t\t\t<float_array id="%s">' % texarrayname) uv_face_mapping = [[0, 0, 0, 0] for f in faces ] # a bit of a waste for tri's :/ uv_dict = {} # could use a set() here for f_index, f in enumerate(faces): for uv_index, uv in enumerate(f.uv): uvkey = veckey2d(uv) try: uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] except: uv_face_mapping[f_index][uv_index] = uv_dict[ uvkey] = len(uv_dict) file.write(' %.6f %.6f' % tuple(uv)) uv_unique_count = len(uv_dict) del uv, uvkey, uv_dict, f_index, uv_index # Only need uv_unique_count and uv_face_mapping file.write('</float_array>\n') file.write('\t\t\t<texcoords id="%s" source="#%s" />\n' % (texname, texarrayname)) # NORMAL, Smooth/Non smoothed. if len(faces) > 0: file.write('\t\t\t<float_array id="%s">' % normarrayname) for f in faces: if f.smooth: for v in f: noKey = veckey3d(v.no) if not globalNormals.has_key(noKey): globalNormals[noKey] = totno totno += 1 file.write(' %.6f %.6f %.6f' % noKey) else: # Hard, 1 normal from the face. noKey = veckey3d(f.no) if not globalNormals.has_key(noKey): globalNormals[noKey] = totno totno += 1 file.write(' %.6f %.6f %.6f' % noKey) file.write('</float_array>\n') file.write('\t\t\t<normals id="%s" source="#%s" />\n' % (normname, normarrayname)) if not faceuv: f_image = None in_triangles = False for f_index, f in enumerate(faces): f_v = f.v f_smooth = f.smooth f_mat = min(f.mat, len(materialNames) - 1) if faceuv: f_image = f.image f_uv = f.uv # MAKE KEY if faceuv and f_image: # Object is always true. key = materialNames[f_mat], f_image.name else: key = materialNames[ f_mat], None # No image, use None instead. # CHECK FOR CONTEXT SWITCH if key == contextMat: pass # Context alredy switched, dont do anythoing else: if key[0] == None and key[1] == None: # Write a null material, since we know the context has changed. if in_triangles: file.write('</p>\n') file.write('\t\t\t</triangles>\n') file.write('\t\t\t<triangles id="%s_%s">\n' % (fixName(ob.name), fixName(ob.getData(1)))) in_triangles = True else: mat_data = MTL_DICT.get(key) if not mat_data: # First add to global dict so we can export to mtl # Then write mtl # Make a new names from the mat and image name, # converting any spaces to underscores with fixName. # If none image dont bother adding it to the name if key[1] == None: mat_data = MTL_DICT[key] = ('%s' % fixName( key[0])), materialItems[f_mat], f_image else: mat_data = MTL_DICT[key] = ( '%s_%s' % (fixName(key[0]), fixName(key[1])) ), materialItems[f_mat], f_image if in_triangles: file.write('</p>\n') file.write('\t\t\t</triangles>\n') file.write( '\t\t\t<triangles id="%s_%s_%s" material="#%s">\n' % (fixName(ob.name), fixName( ob.getData(1)), mat_data[0], mat_data[0])) in_triangles = True file.write( '\t\t\t\t<input offset="0" semantic="VERTEX" source="#%s" />\n' % vertname) file.write( '\t\t\t\t<input offset="1" semantic="NORMAL" source="#%s" />\n' % normname) if faceuv: file.write( '\t\t\t\t<input offset="2" semantic="TEXCOORD" source="#%s" />\n' % texname) file.write('\t\t\t\t<p>') contextMat = key if f_smooth != contextSmooth: if f_smooth: # on now off # file.write('s 1\n') contextSmooth = f_smooth else: # was off now on # file.write('s off\n') contextSmooth = f_smooth if faceuv: if f_smooth: # Smoothed, use vertex normals for vi, v in enumerate(f_v): file.write( ' %d %d %d' % (\ v.index+totverts,\ totuvco + uv_face_mapping[f_index][vi],\ globalNormals[ veckey3d(v.no) ])) # vert, uv, normal else: # No smoothing, face normals no = globalNormals[veckey3d(f.no)] for vi, v in enumerate(f_v): file.write( ' %d %d %d' % (\ v.index+totverts,\ totuvco + uv_face_mapping[f_index][vi],\ no)) # vert, uv, normal face_vert_index += len(f_v) else: # No UV's if f_smooth: # Smoothed, use vertex normals for v in f_v: file.write( ' %d %d' % (\ v.index+totverts,\ globalNormals[ veckey3d(v.no) ])) else: # No smoothing, face normals no = globalNormals[veckey3d(f.no)] for v in f_v: file.write( ' %d %d' % (\ v.index+totverts,\ no)) if in_triangles: file.write('</p>\n') file.write('\t\t\t</triangles>\n') # Write edges. LOOSE = Mesh.EdgeFlags.LOOSE has_edge = False for ed in edges: if ed.flag & LOOSE: has_edge = True if has_edge: file.write('\t\t\t<edges>\n') file.write( '\t\t\t\t<input offset="0" semantic="VERTEX" source="#%s" />\n' % vertname) file.write('\t\t\t\t<p>') for ed in edges: if ed.flag & LOOSE: file.write( ' %d %d' % (ed.v1.index + totverts, ed.v2.index + totverts)) file.write('</p>\n') file.write('\t\t\t</edges>\n') # Make the indicies global rather then per mesh # totverts += len(me.verts) # if faceuv: # totuvco += uv_unique_count me.verts = None if len(faces) > 0: file.write('\t\t</mesh>\n') else: file.write('\t\t</curve>\n') file.write('\t</object>\n') file.write('</library_objects>\n\n') # Now we have all our materials, save them if EXPORT_MTL: write_library_materials(file) # Save the groups write_library_groups(file) file.write('</OPEN_TRACK>\n') file.close() if EXPORT_COPY_IMAGES: dest_dir = filename # Remove chars until we are just the path. while dest_dir and dest_dir[-1] not in '\\/': dest_dir = dest_dir[:-1] if dest_dir: copy_images(dest_dir) else: print '\tError: "%s" could not be used as a base for an image path.' % filename print "WTF Export time: %.2f" % (sys.time() - time1)
def write(filename, objects,\ EXPORT_TRI=False, EXPORT_EDGES=False, EXPORT_NORMALS=False, EXPORT_NORMALS_HQ=False,\ EXPORT_UV=True, EXPORT_MTL=True, EXPORT_COPY_IMAGES=False,\ EXPORT_APPLY_MODIFIERS=True, EXPORT_ROTX90=True, EXPORT_BLEN_OBS=True,\ EXPORT_GROUP_BY_OB=False, EXPORT_GROUP_BY_MAT=False, EXPORT_KEEP_VERT_ORDER=False,\ EXPORT_POLYGROUPS=False, EXPORT_CURVE_AS_NURBS=True): ''' Basic write function. The context and options must be alredy set This can be accessed externaly eg. write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options. ''' def veckey3d(v): return round(v.x, 6), round(v.y, 6), round(v.z, 6) def veckey2d(v): return round(v.x, 6), round(v.y, 6) def findVertexGroupName(face, vWeightMap): """ Searches the vertexDict to see what groups is assigned to a given face. We use a frequency system in order to sort out the name because a given vetex can belong to two or more groups at the same time. To find the right name for the face we list all the possible vertex group names with their frequency and then sort by frequency in descend order. The top element is the one shared by the highest number of vertices is the face's group """ weightDict = {} for vert in face: vWeights = vWeightMap[vert.index] for vGroupName, weight in vWeights: weightDict[vGroupName] = weightDict.get(vGroupName, 0) + weight if weightDict: alist = [(weight,vGroupName) for vGroupName, weight in weightDict.iteritems()] # sort least to greatest amount of weight alist.sort() return(alist[-1][1]) # highest value last else: return '(null)' print 'OBJ Export path: "%s"' % filename temp_mesh_name = '~tmp-mesh' time1 = sys.time() scn = Scene.GetCurrent() file = open(filename, "w") # Write Header file.write('# Blender3D v%s OBJ File: %s\n' % (Blender.Get('version'), Blender.Get('filename').split('/')[-1].split('\\')[-1] )) file.write('# www.blender3d.org\n') # Tell the obj file what material file to use. if EXPORT_MTL: mtlfilename = '%s.mtl' % '.'.join(filename.split('.')[:-1]) file.write('mtllib %s\n' % ( mtlfilename.split('\\')[-1].split('/')[-1] )) # Get the container mesh. - used for applying modifiers and non mesh objects. containerMesh = meshName = tempMesh = None for meshName in Blender.NMesh.GetNames(): if meshName.startswith(temp_mesh_name): tempMesh = Mesh.Get(meshName) if not tempMesh.users: containerMesh = tempMesh if not containerMesh: containerMesh = Mesh.New(temp_mesh_name) if EXPORT_ROTX90: mat_xrot90= Blender.Mathutils.RotationMatrix(-90, 4, 'x') del meshName del tempMesh # Initialize totals, these are updated each object totverts = totuvco = totno = 1 face_vert_index = 1 globalNormals = {} # Get all meshes for ob_main in objects: for ob, ob_mat in BPyObject.getDerivedObjects(ob_main): # Nurbs curve support if EXPORT_CURVE_AS_NURBS and test_nurbs_compat(ob): if EXPORT_ROTX90: ob_mat = ob_mat * mat_xrot90 totverts += write_nurb(file, ob, ob_mat) continue # end nurbs # Will work for non meshes now! :) # getMeshFromObject(ob, container_mesh=None, apply_modifiers=True, vgroups=True, scn=None) me= BPyMesh.getMeshFromObject(ob, containerMesh, EXPORT_APPLY_MODIFIERS, EXPORT_POLYGROUPS, scn) if not me: continue if EXPORT_UV: faceuv= me.faceUV else: faceuv = False # We have a valid mesh if EXPORT_TRI and me.faces: # Add a dummy object to it. has_quads = False for f in me.faces: if len(f) == 4: has_quads = True break if has_quads: oldmode = Mesh.Mode() Mesh.Mode(Mesh.SelectModes['FACE']) me.sel = True tempob = scn.objects.new(me) me.quadToTriangle(0) # more=0 shortest length oldmode = Mesh.Mode(oldmode) scn.objects.unlink(tempob) Mesh.Mode(oldmode) # Make our own list so it can be sorted to reduce context switching faces = [ f for f in me.faces ] if EXPORT_EDGES: edges = me.edges else: edges = [] if not (len(faces)+len(edges)+len(me.verts)): # Make sure there is somthing to write continue # dont bother with this mesh. if EXPORT_ROTX90: me.transform(ob_mat*mat_xrot90) else: me.transform(ob_mat) # High Quality Normals if EXPORT_NORMALS and faces: if EXPORT_NORMALS_HQ: BPyMesh.meshCalcNormals(me) else: # transforming normals is incorrect # when the matrix is scaled, # better to recalculate them me.calcNormals() # # Crash Blender #materials = me.getMaterials(1) # 1 == will return None in the list. materials = me.materials materialNames = [] materialItems = materials[:] if materials: for mat in materials: if mat: # !=None materialNames.append(mat.name) else: materialNames.append(None) # Cant use LC because some materials are None. # materialNames = map(lambda mat: mat.name, materials) # Bug Blender, dosent account for null materials, still broken. # Possible there null materials, will mess up indicies # but at least it will export, wait until Blender gets fixed. materialNames.extend((16-len(materialNames)) * [None]) materialItems.extend((16-len(materialItems)) * [None]) # Sort by Material, then images # so we dont over context switch in the obj file. if EXPORT_KEEP_VERT_ORDER: pass elif faceuv: try: faces.sort(key = lambda a: (a.mat, a.image, a.smooth)) except: faces.sort(lambda a,b: cmp((a.mat, a.image, a.smooth), (b.mat, b.image, b.smooth))) elif len(materials) > 1: try: faces.sort(key = lambda a: (a.mat, a.smooth)) except: faces.sort(lambda a,b: cmp((a.mat, a.smooth), (b.mat, b.smooth))) else: # no materials try: faces.sort(key = lambda a: a.smooth) except: faces.sort(lambda a,b: cmp(a.smooth, b.smooth)) # Set the default mat to no material and no image. contextMat = (0, 0) # Can never be this, so we will label a new material teh first chance we get. contextSmooth = None # Will either be true or false, set bad to force initialization switch. if EXPORT_BLEN_OBS or EXPORT_GROUP_BY_OB: name1 = ob.name name2 = ob.getData(1) if name1 == name2: obnamestring = fixName(name1) else: obnamestring = '%s_%s' % (fixName(name1), fixName(name2)) if EXPORT_BLEN_OBS: file.write('o %s\n' % obnamestring) # Write Object name else: # if EXPORT_GROUP_BY_OB: file.write('g %s\n' % obnamestring) # Vert for v in me.verts: file.write('v %.6f %.6f %.6f\n' % tuple(v.co)) # UV if faceuv: uv_face_mapping = [[0,0,0,0] for f in faces] # a bit of a waste for tri's :/ uv_dict = {} # could use a set() here for f_index, f in enumerate(faces): for uv_index, uv in enumerate(f.uv): uvkey = veckey2d(uv) try: uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] except: uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] = len(uv_dict) file.write('vt %.6f %.6f\n' % tuple(uv)) uv_unique_count = len(uv_dict) del uv, uvkey, uv_dict, f_index, uv_index # Only need uv_unique_count and uv_face_mapping # NORMAL, Smooth/Non smoothed. if EXPORT_NORMALS: for f in faces: if f.smooth: for v in f: noKey = veckey3d(v.no) if not globalNormals.has_key( noKey ): globalNormals[noKey] = totno totno +=1 file.write('vn %.6f %.6f %.6f\n' % noKey) else: # Hard, 1 normal from the face. noKey = veckey3d(f.no) if not globalNormals.has_key( noKey ): globalNormals[noKey] = totno totno +=1 file.write('vn %.6f %.6f %.6f\n' % noKey) if not faceuv: f_image = None if EXPORT_POLYGROUPS: # Retrieve the list of vertex groups vertGroupNames = me.getVertGroupNames() currentVGroup = '' # Create a dictionary keyed by face id and listing, for each vertex, the vertex groups it belongs to vgroupsMap = [[] for _i in xrange(len(me.verts))] for vertexGroupName in vertGroupNames: for vIdx, vWeight in me.getVertsFromGroup(vertexGroupName, 1): vgroupsMap[vIdx].append((vertexGroupName, vWeight)) for f_index, f in enumerate(faces): f_v= f.v f_smooth= f.smooth f_mat = min(f.mat, len(materialNames)-1) if faceuv: f_image = f.image f_uv= f.uv # MAKE KEY if faceuv and f_image: # Object is always true. key = materialNames[f_mat], f_image.name else: key = materialNames[f_mat], None # No image, use None instead. # Write the vertex group if EXPORT_POLYGROUPS: if vertGroupNames: # find what vertext group the face belongs to theVGroup = findVertexGroupName(f,vgroupsMap) if theVGroup != currentVGroup: currentVGroup = theVGroup file.write('g %s\n' % theVGroup) # CHECK FOR CONTEXT SWITCH if key == contextMat: pass # Context alredy switched, dont do anything else: if key[0] == None and key[1] == None: # Write a null material, since we know the context has changed. if EXPORT_GROUP_BY_MAT: file.write('g %s_%s\n' % (fixName(ob.name), fixName(ob.getData(1))) ) # can be mat_image or (null) file.write('usemtl (null)\n') # mat, image else: mat_data= MTL_DICT.get(key) if not mat_data: # First add to global dict so we can export to mtl # Then write mtl # Make a new names from the mat and image name, # converting any spaces to underscores with fixName. # If none image dont bother adding it to the name if key[1] == None: mat_data = MTL_DICT[key] = ('%s'%fixName(key[0])), materialItems[f_mat], f_image else: mat_data = MTL_DICT[key] = ('%s_%s' % (fixName(key[0]), fixName(key[1]))), materialItems[f_mat], f_image if EXPORT_GROUP_BY_MAT: file.write('g %s_%s_%s\n' % (fixName(ob.name), fixName(ob.getData(1)), mat_data[0]) ) # can be mat_image or (null) file.write('usemtl %s\n' % mat_data[0]) # can be mat_image or (null) contextMat = key if f_smooth != contextSmooth: if f_smooth: # on now off file.write('s 1\n') contextSmooth = f_smooth else: # was off now on file.write('s off\n') contextSmooth = f_smooth file.write('f') if faceuv: if EXPORT_NORMALS: if f_smooth: # Smoothed, use vertex normals for vi, v in enumerate(f_v): file.write( ' %d/%d/%d' % (\ v.index+totverts,\ totuvco + uv_face_mapping[f_index][vi],\ globalNormals[ veckey3d(v.no) ])) # vert, uv, normal else: # No smoothing, face normals no = globalNormals[ veckey3d(f.no) ] for vi, v in enumerate(f_v): file.write( ' %d/%d/%d' % (\ v.index+totverts,\ totuvco + uv_face_mapping[f_index][vi],\ no)) # vert, uv, normal else: # No Normals for vi, v in enumerate(f_v): file.write( ' %d/%d' % (\ v.index+totverts,\ totuvco + uv_face_mapping[f_index][vi])) # vert, uv face_vert_index += len(f_v) else: # No UV's if EXPORT_NORMALS: if f_smooth: # Smoothed, use vertex normals for v in f_v: file.write( ' %d//%d' % (\ v.index+totverts,\ globalNormals[ veckey3d(v.no) ])) else: # No smoothing, face normals no = globalNormals[ veckey3d(f.no) ] for v in f_v: file.write( ' %d//%d' % (\ v.index+totverts,\ no)) else: # No Normals for v in f_v: file.write( ' %d' % (\ v.index+totverts)) file.write('\n') # Write edges. if EXPORT_EDGES: LOOSE= Mesh.EdgeFlags.LOOSE for ed in edges: if ed.flag & LOOSE: file.write('f %d %d\n' % (ed.v1.index+totverts, ed.v2.index+totverts)) # Make the indicies global rather then per mesh totverts += len(me.verts) if faceuv: totuvco += uv_unique_count me.verts= None file.close() # Now we have all our materials, save them if EXPORT_MTL: write_mtl(mtlfilename) if EXPORT_COPY_IMAGES: dest_dir = filename # Remove chars until we are just the path. while dest_dir and dest_dir[-1] not in '\\/': dest_dir = dest_dir[:-1] if dest_dir: copy_images(dest_dir) else: print '\tError: "%s" could not be used as a base for an image path.' % filename print "OBJ Export time: %.2f" % (sys.time() - time1)
def redux(ob, REDUX=0.5, BOUNDRY_WEIGHT=2.0, REMOVE_DOUBLES=False, FACE_AREA_WEIGHT=1.0, FACE_TRIANGULATE=True, DO_UV=True, DO_VCOL=True, DO_WEIGHTS=True, VGROUP_INF_REDUX=None, VGROUP_INF_WEIGHT=0.5): """ BOUNDRY_WEIGHT - 0 is no boundry weighting. 2.0 will make them twice as unlikely to collapse. FACE_AREA_WEIGHT - 0 is no weight. 1 is normal, 2.0 is higher. """ if REDUX < 0 or REDUX > 1.0: raise 'Error, factor must be between 0 and 1.0' elif not set: raise 'Error, this function requires Python 2.4 or a full install of Python 2.3' BOUNDRY_WEIGHT = 1 + BOUNDRY_WEIGHT """ # DEBUG! if Blender.Get('rt') == 1000: DEBUG=True else: DEBUG= False """ me = ob.getData(mesh=1) me.hide = False # unhide all data,. if len(me.faces) < 5: return if FACE_TRIANGULATE or REMOVE_DOUBLES: me.sel = True if FACE_TRIANGULATE: me.quadToTriangle() if REMOVE_DOUBLES: #me.remDoubles(0.0001) print "ERROR: No RemDoubles in background mode!!" vgroups = me.getVertGroupNames() if not me.getVertGroupNames(): DO_WEIGHTS = False if (VGROUP_INF_REDUX!= None and VGROUP_INF_REDUX not in vgroups) or\ VGROUP_INF_WEIGHT==0.0: VGROUP_INF_REDUX = None try: VGROUP_INF_REDUX_INDEX = vgroups.index(VGROUP_INF_REDUX) except: VGROUP_INF_REDUX_INDEX = -1 # del vgroups len_vgroups = len(vgroups) OLD_MESH_MODE = Blender.Mesh.Mode() Blender.Mesh.Mode(Blender.Mesh.SelectModes.VERTEX) if DO_UV and not me.faceUV: DO_UV = False if DO_VCOL and not me.vertexColors: DO_VCOL = False current_face_count = len(me.faces) target_face_count = int(current_face_count * REDUX) # % of the collapseable faces to collapse per pass. #collapse_per_pass= 0.333 # between 0.1 - lots of small nibbles, slow but high q. and 0.9 - big passes and faster. collapse_per_pass = 0.333 # between 0.1 - lots of small nibbles, slow but high q. and 0.9 - big passes and faster. """# DEBUG! if DEBUG: COUNT= [0] def rd(): if COUNT[0]< 330: COUNT[0]+=1 return me.update() Blender.Window.RedrawAll() print 'Press key for next, count "%s"' % COUNT[0] try: input() except KeyboardInterrupt: raise "Error" except: pass COUNT[0]+=1 """ class collapseEdge(object): __slots__ = 'length', 'key', 'faces', 'collapse_loc', 'v1', 'v2', 'uv1', 'uv2', 'col1', 'col2', 'collapse_weight' def __init__(self, ed): self.init_from_edge( ed) # So we can re-use the classes without using more memory. def init_from_edge(self, ed): self.key = ed.key self.length = ed.length self.faces = [] self.v1 = ed.v1 self.v2 = ed.v2 if DO_UV or DO_VCOL: self.uv1 = [] self.uv2 = [] self.col1 = [] self.col2 = [] # self.collapse_loc= None # new collapse location. # Basic weighting. #self.collapse_weight= self.length * (1+ ((ed.v1.no-ed.v2.no).length**2)) self.collapse_weight = 1.0 def collapse_locations(self, w1, w2): ''' Generate a smart location for this edge to collapse to w1 and w2 are vertex location bias ''' v1co = self.v1.co v2co = self.v2.co v1no = self.v1.no v2no = self.v2.no # Basic operation, works fine but not as good as predicting the best place. #between= ((v1co*w1) + (v2co*w2)) #self.collapse_loc= between # normalize the weights of each vert - se we can use them as scalers. wscale = w1 + w2 if not wscale: # no scale? w1 = w2 = 0.5 else: w1 /= wscale w2 /= wscale length = self.length between = MidpointVecs(v1co, v2co) # Collapse # new_location = between # Replace tricky code below. this code predicts the best collapse location. # Make lines at right angles to the normals- these 2 lines will intersect and be # the point of collapsing. # Enlarge so we know they intersect: self.length*2 cv1 = v1no.cross(v1no.cross(v1co - v2co)) cv2 = v2no.cross(v2no.cross(v2co - v1co)) # Scale to be less then the edge lengths. cv2.length = cv1.length = 1 cv1 = cv1 * (length * 0.4) cv2 = cv2 * (length * 0.4) smart_offset_loc = between + (cv1 + cv2) # Now we need to blend between smart_offset_loc and w1/w2 # you see were blending between a vert and the edges midpoint, so we cant use a normal weighted blend. if w1 > 0.5: # between v1 and smart_offset_loc #self.collapse_loc= v1co*(w2+0.5) + smart_offset_loc*(w1-0.5) w2 *= 2 w1 = 1 - w2 new_loc_smart = v1co * w1 + smart_offset_loc * w2 else: # w between v2 and smart_offset_loc w1 *= 2 w2 = 1 - w1 new_loc_smart = v2co * w2 + smart_offset_loc * w1 if new_loc_smart.x != new_loc_smart.x: # NAN LOCATION, revert to between new_loc_smart = None return new_loc_smart, between, v1co * 0.99999 + v2co * 0.00001, v1co * 0.00001 + v2co * 0.99999 class collapseFace(object): __slots__ = 'verts', 'normal', 'area', 'index', 'orig_uv', 'orig_col', 'uv', 'col' # , 'collapse_edge_count' def __init__(self, f): self.init_from_face(f) def init_from_face(self, f): self.verts = f.v self.normal = f.no self.area = f.area self.index = f.index if DO_UV: self.orig_uv = [uv_key(uv) for uv in f.uv] self.uv = f.uv if DO_VCOL: self.orig_col = [col_key(col) for col in f.col] self.col = f.col collapse_edges = collapse_faces = None # So meshCalcNormals can avoid making a new list all the time. reuse_vertNormals = [Vector() for v in xrange(len(me.verts))] while target_face_count <= len(me.faces): BPyMesh.meshCalcNormals(me, reuse_vertNormals) if DO_WEIGHTS: #groupNames, vWeightDict= BPyMesh.meshWeight2Dict(me) groupNames, vWeightList = BPyMesh.meshWeight2List(me) # THIS CRASHES? Not anymore. verts = list(me.verts) edges = list(me.edges) faces = list(me.faces) # THIS WORKS #verts= me.verts #edges= me.edges #faces= me.faces # if DEBUG: DOUBLE_CHECK= [0]*len(verts) me.sel = False if not collapse_faces: # Initialize the list. collapse_faces = [collapseFace(f) for f in faces] collapse_edges = [collapseEdge(ed) for ed in edges] else: for i, ed in enumerate(edges): collapse_edges[i].init_from_edge(ed) # Strip the unneeded end off the list collapse_edges[i + 1:] = [] for i, f in enumerate(faces): collapse_faces[i].init_from_face(f) # Strip the unneeded end off the list collapse_faces[i + 1:] = [] collapse_edges_dict = dict([(ced.key, ced) for ced in collapse_edges]) # Store verts edges. vert_ed_users = [[] for i in xrange(len(verts))] for ced in collapse_edges: vert_ed_users[ced.key[0]].append(ced) vert_ed_users[ced.key[1]].append(ced) # Store face users vert_face_users = [[] for i in xrange(len(verts))] # Have decieded not to use this. area is better. #face_perim= [0.0]* len(me.faces) for ii, cfa in enumerate(collapse_faces): for i, v1 in enumerate(cfa.verts): vert_face_users[v1.index].append((i, cfa)) # add the uv coord to the vert v2 = cfa.verts[i - 1] i1 = v1.index i2 = v2.index if i1 > i2: ced = collapse_edges_dict[i2, i1] else: ced = collapse_edges_dict[i1, i2] ced.faces.append(cfa) if DO_UV or DO_VCOL: # if the edge is flipped from its order in the face then we need to flip the order indicies. if cfa.verts[i] == ced.v1: i1, i2 = i, i - 1 else: i1, i2 = i - 1, i if DO_UV: ced.uv1.append(cfa.orig_uv[i1]) ced.uv2.append(cfa.orig_uv[i2]) if DO_VCOL: ced.col1.append(cfa.orig_col[i1]) ced.col2.append(cfa.orig_col[i2]) # PERIMITER #face_perim[ii]+= ced.length # How weight the verts by the area of their faces * the normal difference. # when the edge collapses, to vert weights are taken into account vert_weights = [0.5] * len(verts) for ii, vert_faces in enumerate(vert_face_users): for f in vert_faces: try: no_ang = (Ang(verts[ii].no, f[1].normal) / 180) * f[1].area except: no_ang = 1.0 vert_weights[ii] += no_ang # Use a vertex group as a weighting. if VGROUP_INF_REDUX != None: # Get Weights from a vgroup. """ vert_weights_map= [1.0] * len(verts) for i, wd in enumerate(vWeightDict): try: vert_weights_map[i]= 1+(wd[VGROUP_INF_REDUX] * VGROUP_INF_WEIGHT) except: pass """ vert_weights_map = [ 1 + (wl[VGROUP_INF_REDUX_INDEX] * VGROUP_INF_WEIGHT) for wl in vWeightList ] # BOUNDRY CHECKING AND WEIGHT EDGES. CAN REMOVE # Now we know how many faces link to an edge. lets get all the boundry verts if BOUNDRY_WEIGHT > 0: verts_boundry = [1] * len(verts) #for ed_idxs, faces_and_uvs in edge_faces_and_uvs.iteritems(): for ced in collapse_edges: if len(ced.faces) < 2: for key in ced.key: # only ever 2 key indicies. verts_boundry[key] = 2 for ced in collapse_edges: b1 = verts_boundry[ced.key[0]] b2 = verts_boundry[ced.key[1]] if b1 != b2: # Edge has 1 boundry and 1 non boundry vert. weight higher ced.collapse_weight = BOUNDRY_WEIGHT #elif b1==b2==2: # if both are on a seam then weigh half as bad. # ced.collapse_weight= ((BOUNDRY_WEIGHT-1)/2) +1 # weight the verts by their boundry status del b1 del b2 for ii, boundry in enumerate(verts_boundry): if boundry == 2: vert_weights[ii] *= BOUNDRY_WEIGHT vert_collapsed = verts_boundry del verts_boundry else: vert_collapsed = [1] * len(verts) # Best method, no quick hacks here, Correction. Should be the best but needs tweaks. def ed_set_collapse_error(ced): # Use the vertex weights to bias the new location. new_locs = ced.collapse_locations(vert_weights[ced.key[0]], vert_weights[ced.key[1]]) # Find the connecting faces of the 2 verts. i1, i2 = ced.key test_faces = set() for i in (i1, i2): # faster then LC's for f in vert_face_users[i]: test_faces.add(f[1].index) for f in ced.faces: test_faces.remove(f.index) v1_orig = Vector(ced.v1.co) v2_orig = Vector(ced.v2.co) def test_loc(new_loc): ''' Takes a location and tests the error without changing anything ''' new_weight = ced.collapse_weight ced.v1.co = ced.v2.co = new_loc new_nos = [faces[i].no for i in test_faces] # So we can compare the befire and after normals ced.v1.co = v1_orig ced.v2.co = v2_orig # now see how bad the normals are effected angle_diff = 1.0 for ii, i in enumerate( test_faces): # local face index, global face index cfa = collapse_faces[i] # this collapse face try: # can use perim, but area looks better. if FACE_AREA_WEIGHT: # Psudo code for wrighting # angle_diff= The before and after angle difference between the collapsed and un-collapsed face. # ... devide by 180 so the value will be between 0 and 1.0 # ... add 1 so we can use it as a multiplyer and not make the area have no eefect (below) # area_weight= The faces original area * the area weight # ... add 1.0 so a small area face dosent make the angle_diff have no effect. # # Now multiply - (angle_diff * area_weight) # ... The weight will be a minimum of 1.0 - we need to subtract this so more faces done give the collapse an uneven weighting. angle_diff += ( (1 + (Ang(cfa.normal, new_nos[ii]) / 180)) * (1 + (cfa.area * FACE_AREA_WEIGHT)) ) - 1 # 4 is how much to influence area else: angle_diff += (Ang(cfa.normal), new_nos[ii]) / 180 except: pass # This is very arbirary, feel free to modify try: no_ang = (Ang(ced.v1.no, ced.v2.no) / 180) + 1 except: no_ang = 2.0 # do *= because we face the boundry weight to initialize the weight. 1.0 default. new_weight *= ((no_ang * ced.length) * (1 - (1 / angle_diff)) ) # / max(len(test_faces), 1) return new_weight # End testloc # Test the collapse locatons collapse_loc_best = None collapse_weight_best = 1000000000 ii = 0 for collapse_loc in new_locs: if collapse_loc: # will only ever fail if smart loc is NAN test_weight = test_loc(collapse_loc) if test_weight < collapse_weight_best: iii = ii collapse_weight_best = test_weight collapse_loc_best = collapse_loc ii += 1 ced.collapse_loc = collapse_loc_best ced.collapse_weight = collapse_weight_best # are we using a weight map if VGROUP_INF_REDUX: v = vert_weights_map[i1] + vert_weights_map[i2] ced.collapse_weight *= v # End collapse Error # We can calculate the weights on __init__ but this is higher qualuity. for ced in collapse_edges: if ced.faces: # dont collapse faceless edges. ed_set_collapse_error(ced) # Wont use the function again. del ed_set_collapse_error # END BOUNDRY. Can remove # sort by collapse weight try: collapse_edges.sort(key=lambda ced: ced.collapse_weight ) # edges will be used for sorting except: collapse_edges.sort(lambda ced1, ced2: cmp(ced1.collapse_weight, ced2.collapse_weight) ) # edges will be used for sorting vert_collapsed = [0] * len(verts) collapse_edges_to_collapse = [] # Make a list of the first half edges we can collapse, # these will better edges to remove. collapse_count = 0 for ced in collapse_edges: if ced.faces: i1, i2 = ced.key # Use vert selections if vert_collapsed[i1] or vert_collapsed[i2]: pass else: # Now we know the verts havnyt been collapsed. vert_collapsed[i2] = vert_collapsed[ i1] = 1 # Dont collapse again. collapse_count += 1 collapse_edges_to_collapse.append(ced) # Get a subset of the entire list- the first "collapse_per_pass", that are best to collapse. if collapse_count > 4: collapse_count = int(collapse_count * collapse_per_pass) else: collapse_count = len(collapse_edges) # We know edge_container_list_collapse can be removed. for ced in collapse_edges_to_collapse: """# DEBUG! if DEBUG: if DOUBLE_CHECK[ced.v1.index] or\ DOUBLE_CHECK[ced.v2.index]: raise 'Error' else: DOUBLE_CHECK[ced.v1.index]=1 DOUBLE_CHECK[ced.v2.index]=1 tmp= (ced.v1.co+ced.v2.co)*0.5 Blender.Window.SetCursorPos(tmp.x, tmp.y, tmp.z) Blender.Window.RedrawAll() """ # Chech if we have collapsed our quota. collapse_count -= 1 if not collapse_count: break current_face_count -= len(ced.faces) # Find and assign the real weights based on collapse loc. # Find the weights from the collapse error if DO_WEIGHTS or DO_UV or DO_VCOL: i1, i2 = ced.key # Dont use these weights since they may not have been used to make the collapse loc. #w1= vert_weights[i1] #w2= vert_weights[i2] w1 = (ced.v2.co - ced.collapse_loc).length w2 = (ced.v1.co - ced.collapse_loc).length # Normalize weights wscale = w1 + w2 if not wscale: # no scale? w1 = w2 = 0.5 else: w1 /= wscale w2 /= wscale # Interpolate the bone weights. if DO_WEIGHTS: # add verts vgroups to eachother wl1 = vWeightList[i1] # v1 weight dict wl2 = vWeightList[i2] # v2 weight dict for group_index in xrange(len_vgroups): wl1[group_index] = wl2[group_index] = ( wl1[group_index] * w1) + (wl2[group_index] * w2) # Done finding weights. if DO_UV or DO_VCOL: # Handel UV's and vert Colors! for v, my_weight, other_weight, edge_my_uvs, edge_other_uvs, edge_my_cols, edge_other_cols in (\ (ced.v1, w1, w2, ced.uv1, ced.uv2, ced.col1, ced.col2),\ (ced.v2, w2, w1, ced.uv2, ced.uv1, ced.col2, ced.col1)\ ): uvs_mixed = [ uv_key_mix(edge_my_uvs[iii], edge_other_uvs[iii], my_weight, other_weight) for iii in xrange(len(edge_my_uvs)) ] cols_mixed = [ col_key_mix(edge_my_cols[iii], edge_other_cols[iii], my_weight, other_weight) for iii in xrange(len(edge_my_cols)) ] for face_vert_index, cfa in vert_face_users[v.index]: if len( cfa.verts ) == 3 and cfa not in ced.faces: # if the face is apart of this edge then dont bother finding the uvs since the face will be removed anyway. if DO_UV: # UV COORDS uvk = cfa.orig_uv[face_vert_index] try: tex_index = edge_my_uvs.index(uvk) except: tex_index = None """ # DEBUG! if DEBUG: print 'not found', uvk, 'in', edge_my_uvs, 'ed index', ii, '\nwhat about', edge_other_uvs """ if tex_index != None: # This face uses a uv in the collapsing face. - do a merge other_uv = edge_other_uvs[tex_index] uv_vec = cfa.uv[face_vert_index] uv_vec.x, uv_vec.y = uvs_mixed[ tex_index] # TEXFACE COLORS if DO_VCOL: colk = cfa.orig_col[face_vert_index] try: tex_index = edge_my_cols.index(colk) except: pass if tex_index != None: other_col = edge_other_cols[tex_index] col_ob = cfa.col[face_vert_index] col_ob.r, col_ob.g, col_ob.b = cols_mixed[ tex_index] # DEBUG! if DEBUG: rd() # Execute the collapse ced.v1.sel = ced.v2.sel = True # Select so remove doubles removed the edges and faces that use it ced.v1.co = ced.v2.co = ced.collapse_loc # DEBUG! if DEBUG: rd() if current_face_count <= target_face_count: break # Copy weights back to the mesh before we remove doubles. if DO_WEIGHTS: #BPyMesh.dict2MeshWeight(me, groupNames, vWeightDict) BPyMesh.list2MeshWeight(me, groupNames, vWeightList) #doubles= me.remDoubles(0.0001) doubles = 0 current_face_count = len(me.faces) if current_face_count <= target_face_count or not doubles: # not doubles shoule never happen. break me.update() Blender.Mesh.Mode(OLD_MESH_MODE)
def write(directory, filename, objects): def v_n_uv_key(v, n, uv): return round(v.x, 6), round(v.y, 6), round(v.z, 6), round(n.x, 6), round( n.y, 6), round(n.z, 6), round(uv[0], 6), round(uv[1], 6) def v_n_key(v, n): return round(v.x, 6), round(v.y, 6), round(v.z, 6), round(n.x, 6), round(n.y, 6), round(n.z, 6) def adjust_key(key, obCenter): keyList = list(key) keyList[0] -= obCenter[0] keyList[1] -= obCenter[1] keyList[2] -= obCenter[2] return tuple(keyList) temp_mesh_name = '~tmp-mesh' scn = Scene.GetCurrent() # Get the container mesh. - used for applying modifiers and non mesh objects. containerMesh = meshName = tempMesh = None for meshName in Blender.NMesh.GetNames(): if meshName.startswith(temp_mesh_name): tempMesh = Mesh.Get(meshName) if not tempMesh.users: containerMesh = tempMesh if not containerMesh: containerMesh = Mesh.New(temp_mesh_name) del meshName del tempMesh try: armature = Blender.Object.Get("Armature") write_armature(directory + filename, armature) except: armature = None # Get all meshs for ob_main in objects: for ob, ob_mat in BPyObject.getDerivedObjects(ob_main): me = BPyMesh.getMeshFromObject(ob, containerMesh, True, False, scn) if not me: continue # Initialize globalVertices and globalMaterials dictionaries vertIndex = 0 matIndex = 0 globalVertices = {} globalMaterials = {} # Dictionary of materials: (material.name, image.name):matname_imagename # matname_imagename has fixed names. materialDict = {} # We have a valid mesh if me.faces: # Add a dummy object to it. has_quads = False for f in me.faces: if len(f) == 4: has_quads = True break if has_quads: oldmode = Mesh.Mode() Mesh.Mode(Mesh.SelectModes['FACE']) me.sel = True tempob = scn.objects.new(me) me.quadToTriangle(0) # more=0 shortest length oldmode = Mesh.Mode(oldmode) scn.objects.unlink(tempob) Mesh.Mode(oldmode) else: continue # High Quality Normals BPyMesh.meshCalcNormals(me) # Make our own list so it can be sorted to reduce context switching faces = [f for f in me.faces] faceuv = me.faceUV edges = me.edges materials = me.materials materialNames = [] materialItems = materials[:] if materials: for mat in materials: if mat: materialNames.append(mat.name) else: materialNames.append(None) # Possible there null materials, will mess up indicies # but at least it will export, wait until Blender gets fixed. materialNames.extend((16 - len(materialNames)) * [None]) materialItems.extend((16 - len(materialItems)) * [None]) # Sort by Material, then images # so we dont over context switch in the obj file. if faceuv: try: faces.sort(key=lambda a: (a.mat, a.image, a.smooth)) except: faces.sort(lambda a, b: cmp((a.mat, a.image, a.smooth), (b.mat, b.image, b.smooth))) elif len(materials) > 1: try: faces.sort(key=lambda a: (a.mat, a.smooth)) except: faces.sort(lambda a, b: cmp((a.mat, a.smooth), (b.mat, b.smooth))) else: # no materials try: faces.sort(key=lambda a: a.smooth) except: faces.sort(lambda a, b: cmp(a.smooth, b.smooth)) # Set the default mat to no material and no image. contextMat = ( 0, 0 ) # Can never be this, so we will label a new material the first chance we get. contextSmooth = None # Will either be true or false, set bad to force initialization switch. name1 = ob.name name2 = ob.getData(1) obnamestring = fixName(name1) file = open(directory + obnamestring + ".drkMesh", "w") # Fill globalVertices dictionary by creating (vert, normal, uv) tuple for all vertices of all faces vertString = "" obCenter = ob.getLocation() if faceuv: vOutputFormat = 'v %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f\n' else: vOutputFormat = 'v %.6f %.6f %.6f %.6f %.6f %.6f\n' f_image = None #Loop through all faces submeshCount = 0 faceCount = 0 faceCounts = [] for face in faces: if faceuv: faceUVs = list(face.uv) faceUVindex = 0 faceIndices = [] for v in face: if face.smooth: vno = v.no else: vno = face.no if faceuv: key = v_n_uv_key(v.co, v.no, faceUVs[faceUVindex]) faceUVindex += 1 else: key = v_n_key(v.co, v.no) if not globalVertices.has_key(key): globalVertices[key] = vertIndex vertString += vOutputFormat % key faceIndices.append(vertIndex) vertIndex += 1 else: faceIndices.append(globalVertices[key]) # Make material,texture key f_mat = min(face.mat, len(materialNames) - 1) if faceuv: f_image = face.image if faceuv and f_image: matKey = materialNames[f_mat], f_image.name else: matKey = materialNames[f_mat], None # Check for context switch if matKey != contextMat: submeshCount += 1 if matKey[0] == None and matKey[1] == None: # Write a null material, since we know the context has changed. faceString += 'use (null)\n' # mat, image else: mat_data = materialDict.get(matKey) if not mat_data: mat_data = materialDict[matKey] = fixName( matKey[0]), materialItems[f_mat], f_image vertString += 'use %d\n' % matIndex globalMaterials[mat_data[0]] = matIndex matIndex += 1 if faceCount != 0: faceCounts.append(faceCount) faceCount = 0 contextMat = matKey vertString += 'face %d %d %d\n' % tuple(faceIndices) faceCount += 1 faceCounts.append(faceCount) file.write('count %d\n' % vertIndex) if faceuv: file.write('uvs\n') file.write('submeshes %d\n' % submeshCount) for faceCount in faceCounts: file.write('faces %d\n' % faceCount) file.write(vertString) me.verts = None write_mtl(file, materialDict, globalMaterials) file.close()
def export_map(filepath): pup_block = [\ ('Scale:', PREF_SCALE, 1, 1000, 'Scale the blender scene by this value.'),\ ('Face Width:', PREF_FACE_THICK, 0.01, 10, 'Thickness of faces exported as brushes.'),\ ('Grid Snap', PREF_GRID_SNAP, 'snaps floating point values to whole numbers.'),\ 'Null Texture',\ ('', PREF_NULL_TEX, 1, 128, 'Export textureless faces with this texture'),\ 'Unseen Texture',\ ('', PREF_INVIS_TEX, 1, 128, 'Export invisible faces with this texture'),\ ] if not Draw.PupBlock('map export', pup_block): return Window.WaitCursor(1) time = sys.time() print 'Map Exporter 0.0' file = open(filepath, 'w') obs_mesh = [] obs_lamp = [] obs_surf = [] obs_empty = [] SCALE_MAT = Mathutils.Matrix() SCALE_MAT[0][0] = SCALE_MAT[1][1] = SCALE_MAT[2][2] = PREF_SCALE.val dummy_mesh = Mesh.New() TOTBRUSH = TOTLAMP = TOTNODE = 0 for ob in Object.GetSelected(): type = ob.getType() if type == 'Mesh': obs_mesh.append(ob) elif type == 'Surf': obs_surf.append(ob) elif type == 'Lamp': obs_lamp.append(ob) elif type == 'Empty': obs_empty.append(ob) if obs_mesh or obs_surf: # brushes and surf's must be under worldspan file.write('\n// entity 0\n') file.write('{\n') file.write('"classname" "worldspawn"\n') print '\twriting cubes from meshes' for ob in obs_mesh: dummy_mesh.getFromObject(ob.name) #print len(mesh_split2connected(dummy_mesh)) # Is the object 1 cube? - object-is-a-brush dummy_mesh.transform(ob.matrixWorld * SCALE_MAT) # 1 to tx the normals also if PREF_GRID_SNAP.val: for v in dummy_mesh.verts: co = v.co co.x = round(co.x) co.y = round(co.y) co.z = round(co.z) # High quality normals BPyMesh.meshCalcNormals(dummy_mesh) # Split mesh into connected regions for face_group in BPyMesh.mesh2linkedFaces(dummy_mesh): if is_cube_facegroup(face_group): write_cube2brush(file, face_group) TOTBRUSH += 1 elif is_tricyl_facegroup(face_group): write_cube2brush(file, face_group) TOTBRUSH += 1 else: for f in face_group: write_face2brush(file, f) TOTBRUSH += 1 #print 'warning, not exporting "%s" it is not a cube' % ob.name dummy_mesh.verts = None valid_dims = 3, 5, 7, 9, 11, 13, 15 for ob in obs_surf: ''' Surf, patches ''' surf_name = ob.getData(name_only=1) data = Curve.Get(surf_name) mat = ob.matrixWorld * SCALE_MAT # This is what a valid patch looks like """ // brush 0 { patchDef2 { NULL ( 3 3 0 0 0 ) ( ( ( -64 -64 0 0 0 ) ( -64 0 0 0 -2 ) ( -64 64 0 0 -4 ) ) ( ( 0 -64 0 2 0 ) ( 0 0 0 2 -2 ) ( 0 64 0 2 -4 ) ) ( ( 64 -64 0 4 0 ) ( 64 0 0 4 -2 ) ( 80 88 0 4 -4 ) ) ) } } """ for i, nurb in enumerate(data): u = nurb.pointsU v = nurb.pointsV if u in valid_dims and v in valid_dims: file.write('// brush %d surf_name\n' % i) file.write('{\n') file.write('patchDef2\n') file.write('{\n') file.write('NULL\n') file.write('( %d %d 0 0 0 )\n' % (u, v)) file.write('(\n') u_iter = 0 for p in nurb: if u_iter == 0: file.write('(') u_iter += 1 # add nmapping 0 0 ? if PREF_GRID_SNAP.val: file.write(' ( %d %d %d 0 0 )' % round_vec(Mathutils.Vector(p[0:3]) * mat)) else: file.write(' ( %.6f %.6f %.6f 0 0 )' % tuple(Mathutils.Vector(p[0:3]) * mat)) # Move to next line if u_iter == u: file.write(' )\n') u_iter = 0 file.write(')\n') file.write('}\n') file.write('}\n') # Debugging # for p in nurb: print 'patch', p else: print "NOT EXPORTING PATCH", surf_name, u, v, 'Unsupported' file.write('}\n') # end worldspan print '\twriting lamps' for ob in obs_lamp: print '\t\t%s' % ob.name lamp = ob.data file.write('{\n') file.write('"classname" "light"\n') file.write('"light" "%.6f"\n' % (lamp.dist * PREF_SCALE.val)) if PREF_GRID_SNAP.val: file.write('"origin" "%d %d %d"\n' % tuple([ round(axis * PREF_SCALE.val) for axis in ob.getLocation('worldspace') ])) else: file.write('"origin" "%.6f %.6f %.6f"\n' % tuple([ axis * PREF_SCALE.val for axis in ob.getLocation('worldspace') ])) file.write('"_color" "%.6f %.6f %.6f"\n' % tuple(lamp.col)) file.write('"style" "0"\n') file.write('}\n') TOTLAMP += 1 print '\twriting empty objects as nodes' for ob in obs_empty: if write_node_map(file, ob): print '\t\t%s' % ob.name TOTNODE += 1 else: print '\t\tignoring %s' % ob.name Window.WaitCursor(0) print 'Exported Map in %.4fsec' % (sys.time() - time) print 'Brushes: %d Nodes: %d Lamps %d\n' % (TOTBRUSH, TOTNODE, TOTLAMP)
def export_map(filepath): pup_block = [\ ('Scale:', PREF_SCALE, 1, 1000, 'Scale the blender scene by this value.'),\ ('Face Width:', PREF_FACE_THICK, 0.01, 10, 'Thickness of faces exported as brushes.'),\ ('Grid Snap', PREF_GRID_SNAP, 'snaps floating point values to whole numbers.'),\ 'Null Texture',\ ('', PREF_NULL_TEX, 1, 128, 'Export textureless faces with this texture'),\ 'Unseen Texture',\ ('', PREF_INVIS_TEX, 1, 128, 'Export invisible faces with this texture'),\ ] if not Draw.PupBlock('map export', pup_block): return Window.WaitCursor(1) time= sys.time() print 'Map Exporter 0.0' file= open(filepath, 'w') obs_mesh= [] obs_lamp= [] obs_surf= [] obs_empty= [] SCALE_MAT= Mathutils.Matrix() SCALE_MAT[0][0]= SCALE_MAT[1][1]= SCALE_MAT[2][2]= PREF_SCALE.val dummy_mesh= Mesh.New() TOTBRUSH= TOTLAMP= TOTNODE= 0 for ob in Object.GetSelected(): type= ob.type if type == 'Mesh': obs_mesh.append(ob) elif type == 'Surf': obs_surf.append(ob) elif type == 'Lamp': obs_lamp.append(ob) elif type == 'Empty': obs_empty.append(ob) if obs_mesh or obs_surf: # brushes and surf's must be under worldspan file.write('\n// entity 0\n') file.write('{\n') file.write('"classname" "worldspawn"\n') print '\twriting cubes from meshes' for ob in obs_mesh: dummy_mesh.getFromObject(ob.name) #print len(mesh_split2connected(dummy_mesh)) # Is the object 1 cube? - object-is-a-brush dummy_mesh.transform(ob.matrixWorld*SCALE_MAT) # 1 to tx the normals also if PREF_GRID_SNAP.val: for v in dummy_mesh.verts: co= v.co co.x= round(co.x) co.y= round(co.y) co.z= round(co.z) # High quality normals BPyMesh.meshCalcNormals(dummy_mesh) # Split mesh into connected regions for face_group in BPyMesh.mesh2linkedFaces(dummy_mesh): if is_cube_facegroup(face_group): write_cube2brush(file, face_group) TOTBRUSH+=1 elif is_tricyl_facegroup(face_group): write_cube2brush(file, face_group) TOTBRUSH+=1 else: for f in face_group: write_face2brush(file, f) TOTBRUSH+=1 #print 'warning, not exporting "%s" it is not a cube' % ob.name dummy_mesh.verts= None valid_dims= 3,5,7,9,11,13,15 for ob in obs_surf: ''' Surf, patches ''' surf_name= ob.getData(name_only=1) data= Curve.Get(surf_name) mat = ob.matrixWorld*SCALE_MAT # This is what a valid patch looks like """ // brush 0 { patchDef2 { NULL ( 3 3 0 0 0 ) ( ( ( -64 -64 0 0 0 ) ( -64 0 0 0 -2 ) ( -64 64 0 0 -4 ) ) ( ( 0 -64 0 2 0 ) ( 0 0 0 2 -2 ) ( 0 64 0 2 -4 ) ) ( ( 64 -64 0 4 0 ) ( 64 0 0 4 -2 ) ( 80 88 0 4 -4 ) ) ) } } """ for i, nurb in enumerate(data): u= nurb.pointsU v= nurb.pointsV if u in valid_dims and v in valid_dims: file.write('// brush %d surf_name\n' % i) file.write('{\n') file.write('patchDef2\n') file.write('{\n') file.write('NULL\n') file.write('( %d %d 0 0 0 )\n' % (u, v) ) file.write('(\n') u_iter = 0 for p in nurb: if u_iter == 0: file.write('(') u_iter += 1 # add nmapping 0 0 ? if PREF_GRID_SNAP.val: file.write(' ( %d %d %d 0 0 )' % round_vec(Mathutils.Vector(p[0:3]) * mat)) else: file.write(' ( %.6f %.6f %.6f 0 0 )' % tuple(Mathutils.Vector(p[0:3]) * mat)) # Move to next line if u_iter == u: file.write(' )\n') u_iter = 0 file.write(')\n') file.write('}\n') file.write('}\n') # Debugging # for p in nurb: print 'patch', p else: print "NOT EXPORTING PATCH", surf_name, u,v, 'Unsupported' if obs_mesh or obs_surf: file.write('}\n') # end worldspan print '\twriting lamps' for ob in obs_lamp: print '\t\t%s' % ob.name lamp= ob.data file.write('{\n') file.write('"classname" "light"\n') file.write('"light" "%.6f"\n' % (lamp.dist* PREF_SCALE.val)) if PREF_GRID_SNAP.val: file.write('"origin" "%d %d %d"\n' % tuple([round(axis*PREF_SCALE.val) for axis in ob.getLocation('worldspace')]) ) else: file.write('"origin" "%.6f %.6f %.6f"\n' % tuple([axis*PREF_SCALE.val for axis in ob.getLocation('worldspace')]) ) file.write('"_color" "%.6f %.6f %.6f"\n' % tuple(lamp.col)) file.write('"style" "0"\n') file.write('}\n') TOTLAMP+=1 print '\twriting empty objects as nodes' for ob in obs_empty: if write_node_map(file, ob): print '\t\t%s' % ob.name TOTNODE+=1 else: print '\t\tignoring %s' % ob.name Window.WaitCursor(0) print 'Exported Map in %.4fsec' % (sys.time()-time) print 'Brushes: %d Nodes: %d Lamps %d\n' % (TOTBRUSH, TOTNODE, TOTLAMP)
def write(filename, objects,\ EXPORT_TRI=False, EXPORT_EDGES=False, EXPORT_NORMALS=False, EXPORT_NORMALS_HQ=False,\ EXPORT_UV=True, EXPORT_MTL=True, EXPORT_COPY_IMAGES=False,\ EXPORT_APPLY_MODIFIERS=True, EXPORT_ROTX90=True, EXPORT_BLEN_OBS=True,\ EXPORT_GROUP_BY_OB=False, EXPORT_GROUP_BY_MAT=False, EXPORT_MORPH_TARGET=False, EXPORT_ARMATURE=False): ''' Basic write function. The context and options must be alredy set This can be accessed externaly eg. write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options. ''' def veckey3d(v): return round(v.x, 6), round(v.y, 6), round(v.z, 6) def veckey2d(v): return round(v.x, 6), round(v.y, 6) print 'OBJ Export path: "%s"' % filename temp_mesh_name = '~tmp-mesh' time1 = sys.time() scn = Scene.GetCurrent() file = open(filename, "w") # Write Header file.write('# Blender3D v%s OBJ File: %s\n' % (Blender.Get('version'), Blender.Get('filename').split('/')[-1].split('\\')[-1] )) file.write('# www.blender3d.org\n') # Tell the obj file what material file to use. if EXPORT_MTL: mtlfilename = '%s.mtl' % '.'.join(filename.split('.')[:-1]) file.write('mtllib %s\n' % ( mtlfilename.split('\\')[-1].split('/')[-1] )) # Get the container mesh. - used for applying modifiers and non mesh objects. containerMesh = meshName = tempMesh = None for meshName in Blender.NMesh.GetNames(): if meshName.startswith(temp_mesh_name): tempMesh = Mesh.Get(meshName) if not tempMesh.users: containerMesh = tempMesh if not containerMesh: containerMesh = Mesh.New(temp_mesh_name) if EXPORT_ROTX90: mat_xrot90= Blender.Mathutils.RotationMatrix(-90, 4, 'x') del meshName del tempMesh # Initialize totals, these are updated each object totverts = totuvco = totno = 1 face_vert_index = 1 globalNormals = {} # Get all meshs for ob_main in objects: for ob, ob_mat in BPyObject.getDerivedObjects(ob_main): # Will work for non meshes now! :) # getMeshFromObject(ob, container_mesh=None, apply_modifiers=True, vgroups=True, scn=None) if EXPORT_ARMATURE: write_armature(file,ob) write_poses(file,ob) me= BPyMesh.getMeshFromObject(ob, containerMesh, EXPORT_APPLY_MODIFIERS, False, scn) if not me: continue if EXPORT_UV: faceuv= me.faceUV else: faceuv = False # We have a valid mesh if EXPORT_TRI and me.faces: # Add a dummy object to it. has_quads = False for f in me.faces: if len(f) == 4: has_quads = True break if has_quads: oldmode = Mesh.Mode() Mesh.Mode(Mesh.SelectModes['FACE']) me.sel = True tempob = scn.objects.new(me) me.quadToTriangle(0) # more=0 shortest length oldmode = Mesh.Mode(oldmode) scn.objects.unlink(tempob) Mesh.Mode(oldmode) faces = [ f for f in me.faces ] if EXPORT_EDGES: edges = me.edges else: edges = [] if not (len(faces)+len(edges)+len(me.verts)): # Make sure there is somthing to write continue # dont bother with this mesh. if EXPORT_ROTX90: me.transform(ob_mat*mat_xrot90) else: me.transform(ob_mat) # High Quality Normals if EXPORT_NORMALS and faces: if EXPORT_NORMALS_HQ: BPyMesh.meshCalcNormals(me) else: # transforming normals is incorrect # when the matrix is scaled, # better to recalculate them me.calcNormals() # # Crash Blender #materials = me.getMaterials(1) # 1 == will return None in the list. materials = me.materials materialNames = [] materialItems = materials[:] if materials: for mat in materials: if mat: # !=None materialNames.append(mat.name) else: materialNames.append(None) # Cant use LC because some materials are None. # materialNames = map(lambda mat: mat.name, materials) # Bug Blender, dosent account for null materials, still broken. # Possible there null materials, will mess up indicies # but at least it will export, wait until Blender gets fixed. materialNames.extend((16-len(materialNames)) * [None]) materialItems.extend((16-len(materialItems)) * [None]) # Sort by Material, then images # so we dont over context switch in the obj file. if EXPORT_MORPH_TARGET: pass elif faceuv: try: faces.sort(key = lambda a: (a.mat, a.image, a.smooth)) except: faces.sort(lambda a,b: cmp((a.mat, a.image, a.smooth), (b.mat, b.image, b.smooth))) elif len(materials) > 1: try: faces.sort(key = lambda a: (a.mat, a.smooth)) except: faces.sort(lambda a,b: cmp((a.mat, a.smooth), (b.mat, b.smooth))) else: # no materials try: faces.sort(key = lambda a: a.smooth) except: faces.sort(lambda a,b: cmp(a.smooth, b.smooth)) # Set the default mat to no material and no image. contextMat = (0, 0) # Can never be this, so we will label a new material teh first chance we get. contextSmooth = None # Will either be true or false, set bad to force initialization switch. if EXPORT_BLEN_OBS or EXPORT_GROUP_BY_OB: name1 = ob.name name2 = ob.getData(1) if name1 == name2: obnamestring = fixName(name1) else: obnamestring = '%s_%s' % (fixName(name1), fixName(name2)) if EXPORT_BLEN_OBS: file.write('o %s\n' % obnamestring) # Write Object name else: # if EXPORT_GROUP_BY_OB: file.write('g %s\n' % obnamestring) # Vert mesh = ob.getData() objmat = ob.getMatrix() for i in objmat: file.write('obm: %.6f %.6f %.6f %.6f\n' % tuple(i)) vgrouplist = mesh.getVertGroupNames() file.write('vgroupcount: %i\n' % len(vgrouplist)) for vgname in vgrouplist: file.write('vgroup: %s\n' % vgname) for v in mesh.verts: file.write('v %.6f %.6f %.6f\n' % tuple(v.co)) influences = mesh.getVertexInfluences(v.index) file.write('influence: %i\n' % len(influences)) for name,weight in influences: file.write('GroupName: %s\n' % name) file.write('Weight: %f\n' % weight) # UV if faceuv: uv_face_mapping = [[0,0,0,0] for f in faces] # a bit of a waste for tri's :/ uv_dict = {} # could use a set() here for f_index, f in enumerate(faces): for uv_index, uv in enumerate(f.uv): uvkey = veckey2d(uv) try: uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] except: uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] = len(uv_dict) file.write('vt %.6f %.6f\n' % tuple(uv)) uv_unique_count = len(uv_dict) del uv, uvkey, uv_dict, f_index, uv_index # Only need uv_unique_count and uv_face_mapping # NORMAL, Smooth/Non smoothed. if EXPORT_NORMALS: for f in faces: if f.smooth: for v in f: noKey = veckey3d(v.no) if not globalNormals.has_key( noKey ): globalNormals[noKey] = totno totno +=1 file.write('vn %.6f %.6f %.6f\n' % noKey) else: # Hard, 1 normal from the face. noKey = veckey3d(f.no) if not globalNormals.has_key( noKey ): globalNormals[noKey] = totno totno +=1 file.write('vn %.6f %.6f %.6f\n' % noKey) if not faceuv: f_image = None for f_index, f in enumerate(faces): f_v= f.v f_smooth= f.smooth f_mat = min(f.mat, len(materialNames)-1) if faceuv: f_image = f.image f_uv= f.uv # MAKE KEY if faceuv and f_image: # Object is always true. key = materialNames[f_mat], f_image.name else: key = materialNames[f_mat], None # No image, use None instead. # CHECK FOR CONTEXT SWITCH if key == contextMat: pass # Context alredy switched, dont do anythoing else: if key[0] == None and key[1] == None: # Write a null material, since we know the context has changed. if EXPORT_GROUP_BY_MAT: file.write('g %s_%s\n' % (fixName(ob.name), fixName(ob.getData(1))) ) # can be mat_image or (null) file.write('usemtl (null)\n') # mat, image else: mat_data= MTL_DICT.get(key) if not mat_data: # First add to global dict so we can export to mtl # Then write mtl # Make a new names from the mat and image name, # converting any spaces to underscores with fixName. # If none image dont bother adding it to the name if key[1] == None: mat_data = MTL_DICT[key] = ('%s'%fixName(key[0])), materialItems[f_mat], f_image else: mat_data = MTL_DICT[key] = ('%s_%s' % (fixName(key[0]), fixName(key[1]))), materialItems[f_mat], f_image if EXPORT_GROUP_BY_MAT: file.write('g %s_%s_%s\n' % (fixName(ob.name), fixName(ob.getData(1)), mat_data[0]) ) # can be mat_image or (null) file.write('usemtl %s\n' % mat_data[0]) # can be mat_image or (null) contextMat = key if f_smooth != contextSmooth: if f_smooth: # on now off file.write('s 1\n') contextSmooth = f_smooth else: # was off now on file.write('s off\n') contextSmooth = f_smooth file.write('f') if faceuv: if EXPORT_NORMALS: if f_smooth: # Smoothed, use vertex normals for vi, v in enumerate(f_v): file.write( ' %d/%d/%d' % (\ v.index+totverts,\ totuvco + uv_face_mapping[f_index][vi],\ globalNormals[ veckey3d(v.no) ])) # vert, uv, normal else: # No smoothing, face normals no = globalNormals[ veckey3d(f.no) ] for vi, v in enumerate(f_v): file.write( ' %d/%d/%d' % (\ v.index+totverts,\ totuvco + uv_face_mapping[f_index][vi],\ no)) # vert, uv, normal else: # No Normals for vi, v in enumerate(f_v): file.write( ' %d/%d' % (\ v.index+totverts,\ totuvco + uv_face_mapping[f_index][vi])) # vert, uv face_vert_index += len(f_v) else: # No UV's if EXPORT_NORMALS: if f_smooth: # Smoothed, use vertex normals for v in f_v: file.write( ' %d//%d' % (\ v.index+totverts,\ globalNormals[ veckey3d(v.no) ])) else: # No smoothing, face normals no = globalNormals[ veckey3d(f.no) ] for v in f_v: file.write( ' %d//%d' % (\ v.index+totverts,\ no)) else: # No Normals for v in f_v: file.write( ' %d' % (\ v.index+totverts)) file.write('\n') # Write edges. if EXPORT_EDGES: LOOSE= Mesh.EdgeFlags.LOOSE for ed in edges: if ed.flag & LOOSE: file.write('f %d %d\n' % (ed.v1.index+totverts, ed.v2.index+totverts)) # Make the indicies global rather then per mesh totverts += len(me.verts) if faceuv: totuvco += uv_unique_count me.verts= None file.close() # Now we have all our materials, save them if EXPORT_MTL: write_mtl(mtlfilename) if EXPORT_COPY_IMAGES: dest_dir = filename # Remove chars until we are just the path. while dest_dir and dest_dir[-1] not in '\\/': dest_dir = dest_dir[:-1] if dest_dir: copy_images(dest_dir) else: print '\tError: "%s" could not be used as a base for an image path.' % filename print "OBJ Export time: %.2f" % (sys.time() - time1)
def solidify(me, PREF_THICK, PREF_SKIN_SIDES=True, PREF_REM_ORIG=False, PREF_COLLAPSE_SIDES=False): # Main code function me_faces = me.faces faces_sel= [f for f in me_faces if f.sel] BPyMesh.meshCalcNormals(me) normals= [v.no for v in me.verts] vertFaces= [[] for i in xrange(len(me.verts))] for f in me_faces: no=f.no for v in f: vertFaces[v.index].append(no) # Scale the normals by the face angles from the vertex Normals. for i in xrange(len(me.verts)): length=0.0 if vertFaces[i]: for fno in vertFaces[i]: try: a= Ang(fno, normals[i]) except: a= 0 if a>=90: length+=1 elif a < SMALL_NUM: length+= 1 else: length+= angleToLength(a) length= length/len(vertFaces[i]) #print 'LENGTH %.6f' % length # normals[i]= (normals[i] * length) * PREF_THICK normals[i] *= length * PREF_THICK len_verts = len( me.verts ) len_faces = len( me_faces ) vert_mapping= [-1] * len(me.verts) verts= [] for f in faces_sel: for v in f: i= v.index if vert_mapping[i]==-1: vert_mapping[i]= len_verts + len(verts) verts.append(v.co + normals[i]) #verts= [v.co + normals[v.index] for v in me.verts] me.verts.extend( verts ) #faces= [tuple([ me.verts[v.index+len_verts] for v in reversed(f.v)]) for f in me_faces ] faces= [ tuple([vert_mapping[v.index] for v in reversed(f.v)]) for f in faces_sel ] me_faces.extend( faces ) # Old method before multi UVs """ has_uv = me.faceUV has_vcol = me.vertexColors for i, orig_f in enumerate(faces_sel): new_f= me_faces[len_faces + i] new_f.mat = orig_f.mat new_f.smooth = orig_f.smooth orig_f.sel=False new_f.sel= True new_f = me_faces[i+len_faces] if has_uv: new_f.uv = [c for c in reversed(orig_f.uv)] new_f.mode = orig_f.mode new_f.flag = orig_f.flag if orig_f.image: new_f.image = orig_f.image if has_vcol: new_f.col = [c for c in reversed(orig_f.col)] """ copy_facedata_multilayer(me, faces_sel, [me_faces[len_faces + i] for i in xrange(len(faces_sel))]) if PREF_SKIN_SIDES or PREF_COLLAPSE_SIDES: skin_side_faces= [] skin_side_faces_orig= [] # Get edges of faces that only have 1 user - so we can make walls edges = {} # So we can reference indicies that wrap back to the start. ROT_TRI_INDEX = 0,1,2,0 ROT_QUAD_INDEX = 0,1,2,3,0 for f in faces_sel: f_v= f.v for i, edgekey in enumerate(f.edge_keys): if edges.has_key(edgekey): edges[edgekey]= None else: if len(f_v) == 3: edges[edgekey] = f, f_v, i, ROT_TRI_INDEX[i+1] else: edges[edgekey] = f, f_v, i, ROT_QUAD_INDEX[i+1] del ROT_QUAD_INDEX, ROT_TRI_INDEX # So we can remove doubles with edges only. if PREF_COLLAPSE_SIDES: me.sel = False # Edges are done. extrude the single user edges. for edge_face_data in edges.itervalues(): if edge_face_data: # != None f, f_v, i1, i2 = edge_face_data v1i,v2i= f_v[i1].index, f_v[i2].index if PREF_COLLAPSE_SIDES: # Collapse cv1 = me.verts[v1i] cv2 = me.verts[vert_mapping[v1i]] cv3 = me.verts[v2i] cv4 = me.verts[vert_mapping[v2i]] cv1.co = cv2.co = (cv1.co+cv2.co)/2 cv3.co = cv4.co = (cv3.co+cv4.co)/2 cv1.sel=cv2.sel=cv3.sel=cv4.sel=True else: # Now make a new Face # skin_side_faces.append( (v1i, v2i, vert_mapping[v2i], vert_mapping[v1i]) ) skin_side_faces.append( (v2i, v1i, vert_mapping[v1i], vert_mapping[v2i]) ) skin_side_faces_orig.append((f, len(me_faces) + len(skin_side_faces_orig), i1, i2)) if PREF_COLLAPSE_SIDES: me.remDoubles(0.0001) else: me_faces.extend(skin_side_faces) # Now assign properties. """ # Before MultiUVs for i, origfData in enumerate(skin_side_faces_orig): orig_f, new_f_idx, i1, i2 = origfData new_f= me_faces[new_f_idx] new_f.mat= orig_f.mat new_f.smooth= orig_f.smooth if has_uv: new_f.mode= orig_f.mode new_f.flag= orig_f.flag if orig_f.image: new_f.image= orig_f.image uv1= orig_f.uv[i1] uv2= orig_f.uv[i2] new_f.uv= (uv1, uv2, uv2, uv1) if has_vcol: col1= orig_f.col[i1] col2= orig_f.col[i2] new_f.col= (col1, col2, col2, col1) """ for i, origfData in enumerate(skin_side_faces_orig): orig_f, new_f_idx, i2, i1 = origfData new_f= me_faces[new_f_idx] new_f.mat= orig_f.mat new_f.smooth= orig_f.smooth for uvlayer in me.getUVLayerNames(): me.activeUVLayer = uvlayer for i, origfData in enumerate(skin_side_faces_orig): orig_f, new_f_idx, i2, i1 = origfData new_f= me_faces[new_f_idx] new_f.mode= orig_f.mode new_f.flag= orig_f.flag new_f.image= orig_f.image uv1= orig_f.uv[i1] uv2= orig_f.uv[i2] new_f.uv= (uv1, uv2, uv2, uv1) for collayer in me.getColorLayerNames(): me.activeColorLayer = collayer for i, origfData in enumerate(skin_side_faces_orig): orig_f, new_f_idx, i2, i1 = origfData new_f= me_faces[new_f_idx] col1= orig_f.col[i1] col2= orig_f.col[i2] new_f.col= (col1, col2, col2, col1) if PREF_REM_ORIG: me_faces.delete(0, faces_sel)
def redux(ob, REDUX=0.5, BOUNDRY_WEIGHT=2.0, REMOVE_DOUBLES=False, FACE_AREA_WEIGHT=1.0, FACE_TRIANGULATE=True, DO_UV=True, DO_VCOL=True, DO_WEIGHTS=True, VGROUP_INF_REDUX= None, VGROUP_INF_WEIGHT=0.5): """ BOUNDRY_WEIGHT - 0 is no boundry weighting. 2.0 will make them twice as unlikely to collapse. FACE_AREA_WEIGHT - 0 is no weight. 1 is normal, 2.0 is higher. """ if REDUX<0 or REDUX>1.0: raise 'Error, factor must be between 0 and 1.0' elif not set: raise 'Error, this function requires Python 2.4 or a full install of Python 2.3' BOUNDRY_WEIGHT= 1+BOUNDRY_WEIGHT """ # DEBUG! if Blender.Get('rt') == 1000: DEBUG=True else: DEBUG= False """ me= ob.getData(mesh=1) me.hide= False # unhide all data,. if len(me.faces)<5: return if FACE_TRIANGULATE or REMOVE_DOUBLES: me.sel= True if FACE_TRIANGULATE: me.quadToTriangle() if REMOVE_DOUBLES: me.remDoubles(0.0001) vgroups= me.getVertGroupNames() if not me.getVertGroupNames(): DO_WEIGHTS= False if (VGROUP_INF_REDUX!= None and VGROUP_INF_REDUX not in vgroups) or\ VGROUP_INF_WEIGHT==0.0: VGROUP_INF_REDUX= None try: VGROUP_INF_REDUX_INDEX= vgroups.index(VGROUP_INF_REDUX) except: VGROUP_INF_REDUX_INDEX= -1 # del vgroups len_vgroups= len(vgroups) OLD_MESH_MODE= Blender.Mesh.Mode() Blender.Mesh.Mode(Blender.Mesh.SelectModes.VERTEX) if DO_UV and not me.faceUV: DO_UV= False if DO_VCOL and not me.vertexColors: DO_VCOL = False current_face_count= len(me.faces) target_face_count= int(current_face_count * REDUX) # % of the collapseable faces to collapse per pass. #collapse_per_pass= 0.333 # between 0.1 - lots of small nibbles, slow but high q. and 0.9 - big passes and faster. collapse_per_pass= 0.333 # between 0.1 - lots of small nibbles, slow but high q. and 0.9 - big passes and faster. """# DEBUG! if DEBUG: COUNT= [0] def rd(): if COUNT[0]< 330: COUNT[0]+=1 return me.update() Blender.Window.RedrawAll() print 'Press key for next, count "%s"' % COUNT[0] try: input() except KeyboardInterrupt: raise "Error" except: pass COUNT[0]+=1 """ class collapseEdge(object): __slots__ = 'length', 'key', 'faces', 'collapse_loc', 'v1', 'v2','uv1', 'uv2', 'col1', 'col2', 'collapse_weight' def __init__(self, ed): self.init_from_edge(ed) # So we can re-use the classes without using more memory. def init_from_edge(self, ed): self.key= ed.key self.length= ed.length self.faces= [] self.v1= ed.v1 self.v2= ed.v2 if DO_UV or DO_VCOL: self.uv1= [] self.uv2= [] self.col1= [] self.col2= [] # self.collapse_loc= None # new collapse location. # Basic weighting. #self.collapse_weight= self.length * (1+ ((ed.v1.no-ed.v2.no).length**2)) self.collapse_weight= 1.0 def collapse_locations(self, w1, w2): ''' Generate a smart location for this edge to collapse to w1 and w2 are vertex location bias ''' v1co= self.v1.co v2co= self.v2.co v1no= self.v1.no v2no= self.v2.no # Basic operation, works fine but not as good as predicting the best place. #between= ((v1co*w1) + (v2co*w2)) #self.collapse_loc= between # normalize the weights of each vert - se we can use them as scalers. wscale= w1+w2 if not wscale: # no scale? w1=w2= 0.5 else: w1/=wscale w2/=wscale length= self.length between= MidpointVecs(v1co, v2co) # Collapse # new_location = between # Replace tricky code below. this code predicts the best collapse location. # Make lines at right angles to the normals- these 2 lines will intersect and be # the point of collapsing. # Enlarge so we know they intersect: self.length*2 cv1= v1no.cross(v1no.cross(v1co-v2co)) cv2= v2no.cross(v2no.cross(v2co-v1co)) # Scale to be less then the edge lengths. cv2.length = cv1.length = 1 cv1 = cv1 * (length* 0.4) cv2 = cv2 * (length* 0.4) smart_offset_loc= between + (cv1 + cv2) # Now we need to blend between smart_offset_loc and w1/w2 # you see were blending between a vert and the edges midpoint, so we cant use a normal weighted blend. if w1 > 0.5: # between v1 and smart_offset_loc #self.collapse_loc= v1co*(w2+0.5) + smart_offset_loc*(w1-0.5) w2*=2 w1= 1-w2 new_loc_smart= v1co*w1 + smart_offset_loc*w2 else: # w between v2 and smart_offset_loc w1*=2 w2= 1-w1 new_loc_smart= v2co*w2 + smart_offset_loc*w1 if new_loc_smart.x != new_loc_smart.x: # NAN LOCATION, revert to between new_loc_smart= None return new_loc_smart, between, v1co*0.99999 + v2co*0.00001, v1co*0.00001 + v2co*0.99999 class collapseFace(object): __slots__ = 'verts', 'normal', 'area', 'index', 'orig_uv', 'orig_col', 'uv', 'col' # , 'collapse_edge_count' def __init__(self, f): self.init_from_face(f) def init_from_face(self, f): self.verts= f.v self.normal= f.no self.area= f.area self.index= f.index if DO_UV: self.orig_uv= [uv_key(uv) for uv in f.uv] self.uv= f.uv if DO_VCOL: self.orig_col= [col_key(col) for col in f.col] self.col= f.col collapse_edges= collapse_faces= None # So meshCalcNormals can avoid making a new list all the time. reuse_vertNormals= [ Vector() for v in xrange(len(me.verts)) ] while target_face_count <= len(me.faces): BPyMesh.meshCalcNormals(me, reuse_vertNormals) if DO_WEIGHTS: #groupNames, vWeightDict= BPyMesh.meshWeight2Dict(me) groupNames, vWeightList= BPyMesh.meshWeight2List(me) # THIS CRASHES? Not anymore. verts= list(me.verts) edges= list(me.edges) faces= list(me.faces) # THIS WORKS #verts= me.verts #edges= me.edges #faces= me.faces # if DEBUG: DOUBLE_CHECK= [0]*len(verts) me.sel= False if not collapse_faces: # Initialize the list. collapse_faces= [collapseFace(f) for f in faces] collapse_edges= [collapseEdge(ed) for ed in edges] else: for i, ed in enumerate(edges): collapse_edges[i].init_from_edge(ed) # Strip the unneeded end off the list collapse_edges[i+1:]= [] for i, f in enumerate(faces): collapse_faces[i].init_from_face(f) # Strip the unneeded end off the list collapse_faces[i+1:]= [] collapse_edges_dict= dict( [(ced.key, ced) for ced in collapse_edges] ) # Store verts edges. vert_ed_users= [[] for i in xrange(len(verts))] for ced in collapse_edges: vert_ed_users[ced.key[0]].append(ced) vert_ed_users[ced.key[1]].append(ced) # Store face users vert_face_users= [[] for i in xrange(len(verts))] # Have decieded not to use this. area is better. #face_perim= [0.0]* len(me.faces) for ii, cfa in enumerate(collapse_faces): for i, v1 in enumerate(cfa.verts): vert_face_users[v1.index].append( (i,cfa) ) # add the uv coord to the vert v2 = cfa.verts[i-1] i1= v1.index i2= v2.index if i1>i2: ced= collapse_edges_dict[i2,i1] else: ced= collapse_edges_dict[i1,i2] ced.faces.append(cfa) if DO_UV or DO_VCOL: # if the edge is flipped from its order in the face then we need to flip the order indicies. if cfa.verts[i]==ced.v1: i1,i2 = i, i-1 else: i1,i2 = i-1, i if DO_UV: ced.uv1.append( cfa.orig_uv[i1] ) ced.uv2.append( cfa.orig_uv[i2] ) if DO_VCOL: ced.col1.append( cfa.orig_col[i1] ) ced.col2.append( cfa.orig_col[i2] ) # PERIMITER #face_perim[ii]+= ced.length # How weight the verts by the area of their faces * the normal difference. # when the edge collapses, to vert weights are taken into account vert_weights= [0.5] * len(verts) for ii, vert_faces in enumerate(vert_face_users): for f in vert_faces: try: no_ang= (Ang(verts[ii].no, f[1].normal)/180) * f[1].area except: no_ang= 1.0 vert_weights[ii] += no_ang # Use a vertex group as a weighting. if VGROUP_INF_REDUX!=None: # Get Weights from a vgroup. """ vert_weights_map= [1.0] * len(verts) for i, wd in enumerate(vWeightDict): try: vert_weights_map[i]= 1+(wd[VGROUP_INF_REDUX] * VGROUP_INF_WEIGHT) except: pass """ vert_weights_map= [1+(wl[VGROUP_INF_REDUX_INDEX]*VGROUP_INF_WEIGHT) for wl in vWeightList ] # BOUNDRY CHECKING AND WEIGHT EDGES. CAN REMOVE # Now we know how many faces link to an edge. lets get all the boundry verts if BOUNDRY_WEIGHT > 0: verts_boundry= [1] * len(verts) #for ed_idxs, faces_and_uvs in edge_faces_and_uvs.iteritems(): for ced in collapse_edges: if len(ced.faces) < 2: for key in ced.key: # only ever 2 key indicies. verts_boundry[key]= 2 for ced in collapse_edges: b1= verts_boundry[ced.key[0]] b2= verts_boundry[ced.key[1]] if b1 != b2: # Edge has 1 boundry and 1 non boundry vert. weight higher ced.collapse_weight= BOUNDRY_WEIGHT #elif b1==b2==2: # if both are on a seam then weigh half as bad. # ced.collapse_weight= ((BOUNDRY_WEIGHT-1)/2) +1 # weight the verts by their boundry status del b1 del b2 for ii, boundry in enumerate(verts_boundry): if boundry==2: vert_weights[ii] *= BOUNDRY_WEIGHT vert_collapsed= verts_boundry del verts_boundry else: vert_collapsed= [1] * len(verts) # Best method, no quick hacks here, Correction. Should be the best but needs tweaks. def ed_set_collapse_error(ced): # Use the vertex weights to bias the new location. new_locs= ced.collapse_locations(vert_weights[ced.key[0]], vert_weights[ced.key[1]]) # Find the connecting faces of the 2 verts. i1, i2= ced.key test_faces= set() for i in (i1,i2): # faster then LC's for f in vert_face_users[i]: test_faces.add(f[1].index) for f in ced.faces: test_faces.remove(f.index) v1_orig= Vector(ced.v1.co) v2_orig= Vector(ced.v2.co) def test_loc(new_loc): ''' Takes a location and tests the error without changing anything ''' new_weight= ced.collapse_weight ced.v1.co= ced.v2.co= new_loc new_nos= [faces[i].no for i in test_faces] # So we can compare the befire and after normals ced.v1.co= v1_orig ced.v2.co= v2_orig # now see how bad the normals are effected angle_diff= 1.0 for ii, i in enumerate(test_faces): # local face index, global face index cfa= collapse_faces[i] # this collapse face try: # can use perim, but area looks better. if FACE_AREA_WEIGHT: # Psudo code for wrighting # angle_diff= The before and after angle difference between the collapsed and un-collapsed face. # ... devide by 180 so the value will be between 0 and 1.0 # ... add 1 so we can use it as a multiplyer and not make the area have no eefect (below) # area_weight= The faces original area * the area weight # ... add 1.0 so a small area face dosent make the angle_diff have no effect. # # Now multiply - (angle_diff * area_weight) # ... The weight will be a minimum of 1.0 - we need to subtract this so more faces done give the collapse an uneven weighting. angle_diff+= ((1+(Ang(cfa.normal, new_nos[ii])/180)) * (1+(cfa.area * FACE_AREA_WEIGHT))) -1 # 4 is how much to influence area else: angle_diff+= (Ang(cfa.normal), new_nos[ii])/180 except: pass # This is very arbirary, feel free to modify try: no_ang= (Ang(ced.v1.no, ced.v2.no)/180) + 1 except: no_ang= 2.0 # do *= because we face the boundry weight to initialize the weight. 1.0 default. new_weight *= ((no_ang * ced.length) * (1-(1/angle_diff)))# / max(len(test_faces), 1) return new_weight # End testloc # Test the collapse locatons collapse_loc_best= None collapse_weight_best= 1000000000 ii= 0 for collapse_loc in new_locs: if collapse_loc: # will only ever fail if smart loc is NAN test_weight= test_loc(collapse_loc) if test_weight < collapse_weight_best: iii= ii collapse_weight_best = test_weight collapse_loc_best= collapse_loc ii+=1 ced.collapse_loc= collapse_loc_best ced.collapse_weight= collapse_weight_best # are we using a weight map if VGROUP_INF_REDUX: v= vert_weights_map[i1]+vert_weights_map[i2] ced.collapse_weight*= v # End collapse Error # We can calculate the weights on __init__ but this is higher qualuity. for ced in collapse_edges: if ced.faces: # dont collapse faceless edges. ed_set_collapse_error(ced) # Wont use the function again. del ed_set_collapse_error # END BOUNDRY. Can remove # sort by collapse weight try: collapse_edges.sort(key = lambda ced: ced.collapse_weight) # edges will be used for sorting except: collapse_edges.sort(lambda ced1, ced2: cmp(ced1.collapse_weight, ced2.collapse_weight)) # edges will be used for sorting vert_collapsed= [0]*len(verts) collapse_edges_to_collapse= [] # Make a list of the first half edges we can collapse, # these will better edges to remove. collapse_count=0 for ced in collapse_edges: if ced.faces: i1, i2= ced.key # Use vert selections if vert_collapsed[i1] or vert_collapsed[i2]: pass else: # Now we know the verts havnyt been collapsed. vert_collapsed[i2]= vert_collapsed[i1]= 1 # Dont collapse again. collapse_count+=1 collapse_edges_to_collapse.append(ced) # Get a subset of the entire list- the first "collapse_per_pass", that are best to collapse. if collapse_count > 4: collapse_count = int(collapse_count*collapse_per_pass) else: collapse_count = len(collapse_edges) # We know edge_container_list_collapse can be removed. for ced in collapse_edges_to_collapse: """# DEBUG! if DEBUG: if DOUBLE_CHECK[ced.v1.index] or\ DOUBLE_CHECK[ced.v2.index]: raise 'Error' else: DOUBLE_CHECK[ced.v1.index]=1 DOUBLE_CHECK[ced.v2.index]=1 tmp= (ced.v1.co+ced.v2.co)*0.5 Blender.Window.SetCursorPos(tmp.x, tmp.y, tmp.z) Blender.Window.RedrawAll() """ # Chech if we have collapsed our quota. collapse_count-=1 if not collapse_count: break current_face_count -= len(ced.faces) # Find and assign the real weights based on collapse loc. # Find the weights from the collapse error if DO_WEIGHTS or DO_UV or DO_VCOL: i1, i2= ced.key # Dont use these weights since they may not have been used to make the collapse loc. #w1= vert_weights[i1] #w2= vert_weights[i2] w1= (ced.v2.co-ced.collapse_loc).length w2= (ced.v1.co-ced.collapse_loc).length # Normalize weights wscale= w1+w2 if not wscale: # no scale? w1=w2= 0.5 else: w1/= wscale w2/= wscale # Interpolate the bone weights. if DO_WEIGHTS: # add verts vgroups to eachother wl1= vWeightList[i1] # v1 weight dict wl2= vWeightList[i2] # v2 weight dict for group_index in xrange(len_vgroups): wl1[group_index]= wl2[group_index]= (wl1[group_index]*w1) + (wl2[group_index]*w2) # Done finding weights. if DO_UV or DO_VCOL: # Handel UV's and vert Colors! for v, my_weight, other_weight, edge_my_uvs, edge_other_uvs, edge_my_cols, edge_other_cols in (\ (ced.v1, w1, w2, ced.uv1, ced.uv2, ced.col1, ced.col2),\ (ced.v2, w2, w1, ced.uv2, ced.uv1, ced.col2, ced.col1)\ ): uvs_mixed= [ uv_key_mix(edge_my_uvs[iii], edge_other_uvs[iii], my_weight, other_weight) for iii in xrange(len(edge_my_uvs)) ] cols_mixed= [ col_key_mix(edge_my_cols[iii], edge_other_cols[iii], my_weight, other_weight) for iii in xrange(len(edge_my_cols)) ] for face_vert_index, cfa in vert_face_users[v.index]: if len(cfa.verts)==3 and cfa not in ced.faces: # if the face is apart of this edge then dont bother finding the uvs since the face will be removed anyway. if DO_UV: # UV COORDS uvk= cfa.orig_uv[face_vert_index] try: tex_index= edge_my_uvs.index(uvk) except: tex_index= None """ # DEBUG! if DEBUG: print 'not found', uvk, 'in', edge_my_uvs, 'ed index', ii, '\nwhat about', edge_other_uvs """ if tex_index != None: # This face uses a uv in the collapsing face. - do a merge other_uv= edge_other_uvs[tex_index] uv_vec= cfa.uv[face_vert_index] uv_vec.x, uv_vec.y= uvs_mixed[tex_index] # TEXFACE COLORS if DO_VCOL: colk= cfa.orig_col[face_vert_index] try: tex_index= edge_my_cols.index(colk) except: pass if tex_index != None: other_col= edge_other_cols[tex_index] col_ob= cfa.col[face_vert_index] col_ob.r, col_ob.g, col_ob.b= cols_mixed[tex_index] # DEBUG! if DEBUG: rd() # Execute the collapse ced.v1.sel= ced.v2.sel= True # Select so remove doubles removed the edges and faces that use it ced.v1.co= ced.v2.co= ced.collapse_loc # DEBUG! if DEBUG: rd() if current_face_count <= target_face_count: break # Copy weights back to the mesh before we remove doubles. if DO_WEIGHTS: #BPyMesh.dict2MeshWeight(me, groupNames, vWeightDict) BPyMesh.list2MeshWeight(me, groupNames, vWeightList) doubles= me.remDoubles(0.0001) current_face_count= len(me.faces) if current_face_count <= target_face_count or not doubles: # not doubles shoule never happen. break me.update() Blender.Mesh.Mode(OLD_MESH_MODE)
def write(filename, objects,\ EXPORT_NORMALS_HQ=False,\ EXPORT_MTL=True, EXPORT_COPY_IMAGES=False,\ EXPORT_APPLY_MODIFIERS=True, EXPORT_BLEN_OBS=True,\ EXPORT_GROUP_BY_OB=False): ''' Basic write function. The context and options must be alredy set This can be accessed externaly eg. write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options. ''' def veckey3d(v): return round(v.x, 6), round(v.y, 6), round(v.z, 6) def veckey2d(v): return round(v.x, 6), round(v.y, 6) print 'WTF Export path: "%s"' % filename temp_mesh_name = '~tmp-mesh' time1 = sys.time() scn = Scene.GetCurrent() file = open(filename, "w") file.write('<?xml version="1.0"?>\n') file.write('<OPEN_TRACK>\n') # Write Header # file.write('\n<!--\n' # + ' Blender3D v%s WTF File: %s\n' % (Blender.Get('version'), Blender.Get('filename').split('/')[-1].split('\\')[-1] ) # + ' www.blender3d.org\n' # + '-->\n\n') # Get the container mesh. - used for applying modifiers and non mesh objects. containerMesh = meshName = tempMesh = None for meshName in Blender.NMesh.GetNames(): if meshName.startswith(temp_mesh_name): tempMesh = Mesh.Get(meshName) if not tempMesh.users: containerMesh = tempMesh if not containerMesh: containerMesh = Mesh.New(temp_mesh_name) del meshName del tempMesh # Initialize totals, these are updated each object totverts = totuvco = totno = 0 face_vert_index = 0 globalNormals = {} file.write('\n<library_objects>\n') # Get all meshs for ob_main in objects: obnamestring = fixName(ob_main.name) file.write('\t<object id="%s">\n' % obnamestring) # Write Object name for ob, ob_mat in BPyObject.getDerivedObjects(ob_main): # Will work for non meshes now! :) # getMeshFromObject(ob, container_mesh=None, apply_modifiers=True, vgroups=True, scn=None) me = BPyMesh.getMeshFromObject(ob, containerMesh, EXPORT_APPLY_MODIFIERS, False, scn) if not me: file.write('\t\t<loc>%.6f %.6f %.6f</loc>\n' % tuple(ob_main.loc)) # Write Object name file.write('\t\t<rot>%.6f %.6f %.6f</rot>\n' % tuple(ob_main.rot)) # Write Object name continue faceuv = me.faceUV # We have a valid mesh if me.faces: # Add a dummy object to it. has_quads = False for f in me.faces: if len(f) == 4: has_quads = True break if has_quads: oldmode = Mesh.Mode() Mesh.Mode(Mesh.SelectModes['FACE']) me.sel = True tempob = scn.objects.new(me) me.quadToTriangle(0) # more=0 shortest length oldmode = Mesh.Mode(oldmode) scn.objects.unlink(tempob) Mesh.Mode(oldmode) # Make our own list so it can be sorted to reduce context switching faces = [ f for f in me.faces ] edges = me.edges if not (len(faces)+len(edges)+len(me.verts)): # Make sure there is somthing to write continue # dont bother with this mesh. me.transform(ob_mat) # High Quality Normals if faces: if EXPORT_NORMALS_HQ: BPyMesh.meshCalcNormals(me) else: # transforming normals is incorrect # when the matrix is scaled, # better to recalculate them me.calcNormals() # # Crash Blender #materials = me.getMaterials(1) # 1 == will return None in the list. materials = me.materials materialNames = [] materialItems = materials[:] if materials: for mat in materials: if mat: # !=None materialNames.append(mat.name) else: materialNames.append(None) # Cant use LC because some materials are None. # materialNames = map(lambda mat: mat.name, materials) # Bug Blender, dosent account for null materials, still broken. # Possible there null materials, will mess up indicies # but at least it will export, wait until Blender gets fixed. materialNames.extend((16-len(materialNames)) * [None]) materialItems.extend((16-len(materialItems)) * [None]) # Sort by Material, then images # so we dont over context switch in the obj file. if faceuv: try: faces.sort(key = lambda a: (a.mat, a.image, a.smooth)) except: faces.sort(lambda a,b: cmp((a.mat, a.image, a.smooth), (b.mat, b.image, b.smooth))) elif len(materials) > 1: try: faces.sort(key = lambda a: (a.mat, a.smooth)) except: faces.sort(lambda a,b: cmp((a.mat, a.smooth), (b.mat, b.smooth))) else: # no materials try: faces.sort(key = lambda a: a.smooth) except: faces.sort(lambda a,b: cmp(a.smooth, b.smooth)) # Set the default mat to no material and no image. contextMat = (0, 0) # Can never be this, so we will label a new material teh first chance we get. contextSmooth = None # Will either be true or false, set bad to force initialization switch. if len(faces) > 0: file.write('\t\t<mesh>\n') else: file.write('\t\t<curve>\n') vertname = "%s-Vertices" % obnamestring vertarrayname = "%s-Array" % vertname normname = "%s-Normals" % obnamestring normarrayname = "%s-Array" % normname texname = "%s-TexCoord" % obnamestring texarrayname = "%s-Array" % texname # Vert file.write('\t\t\t<float_array count="%d" id="%s">' % (len(me.verts), vertarrayname)) for v in me.verts: file.write(' %.6f %.6f %.6f' % tuple(v.co)) file.write('</float_array>\n') file.write('\t\t\t<vertices id="%s" source="#%s" />\n' % (vertname, vertarrayname)) # UV if faceuv: file.write('\t\t\t<float_array id="%s">' % texarrayname) uv_face_mapping = [[0,0,0,0] for f in faces] # a bit of a waste for tri's :/ uv_dict = {} # could use a set() here for f_index, f in enumerate(faces): for uv_index, uv in enumerate(f.uv): uvkey = veckey2d(uv) try: uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] except: uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] = len(uv_dict) file.write(' %.6f %.6f' % tuple(uv)) uv_unique_count = len(uv_dict) del uv, uvkey, uv_dict, f_index, uv_index # Only need uv_unique_count and uv_face_mapping file.write('</float_array>\n') file.write('\t\t\t<texcoords id="%s" source="#%s" />\n' % (texname, texarrayname)) # NORMAL, Smooth/Non smoothed. if len(faces) > 0: file.write('\t\t\t<float_array id="%s">' % normarrayname) for f in faces: if f.smooth: for v in f: noKey = veckey3d(v.no) if not globalNormals.has_key( noKey ): globalNormals[noKey] = totno totno +=1 file.write(' %.6f %.6f %.6f' % noKey) else: # Hard, 1 normal from the face. noKey = veckey3d(f.no) if not globalNormals.has_key( noKey ): globalNormals[noKey] = totno totno +=1 file.write(' %.6f %.6f %.6f' % noKey) file.write('</float_array>\n') file.write('\t\t\t<normals id="%s" source="#%s" />\n' % (normname, normarrayname)) if not faceuv: f_image = None in_triangles = False for f_index, f in enumerate(faces): f_v= f.v f_smooth= f.smooth f_mat = min(f.mat, len(materialNames)-1) if faceuv: f_image = f.image f_uv= f.uv # MAKE KEY if faceuv and f_image: # Object is always true. key = materialNames[f_mat], f_image.name else: key = materialNames[f_mat], None # No image, use None instead. # CHECK FOR CONTEXT SWITCH if key == contextMat: pass # Context alredy switched, dont do anythoing else: if key[0] == None and key[1] == None: # Write a null material, since we know the context has changed. if in_triangles: file.write('</p>\n') file.write('\t\t\t</triangles>\n') file.write('\t\t\t<triangles id="%s_%s">\n' % (fixName(ob.name), fixName(ob.getData(1)))) in_triangles = True else: mat_data= MTL_DICT.get(key) if not mat_data: # First add to global dict so we can export to mtl # Then write mtl # Make a new names from the mat and image name, # converting any spaces to underscores with fixName. # If none image dont bother adding it to the name if key[1] == None: mat_data = MTL_DICT[key] = ('%s'%fixName(key[0])), materialItems[f_mat], f_image else: mat_data = MTL_DICT[key] = ('%s_%s' % (fixName(key[0]), fixName(key[1]))), materialItems[f_mat], f_image if in_triangles: file.write('</p>\n') file.write('\t\t\t</triangles>\n') file.write('\t\t\t<triangles id="%s_%s_%s" material="#%s">\n' % (fixName(ob.name), fixName(ob.getData(1)), mat_data[0], mat_data[0]) ) in_triangles = True file.write('\t\t\t\t<input offset="0" semantic="VERTEX" source="#%s" />\n' % vertname) file.write('\t\t\t\t<input offset="1" semantic="NORMAL" source="#%s" />\n' % normname) if faceuv: file.write('\t\t\t\t<input offset="2" semantic="TEXCOORD" source="#%s" />\n' % texname) file.write('\t\t\t\t<p>') contextMat = key if f_smooth != contextSmooth: if f_smooth: # on now off # file.write('s 1\n') contextSmooth = f_smooth else: # was off now on # file.write('s off\n') contextSmooth = f_smooth if faceuv: if f_smooth: # Smoothed, use vertex normals for vi, v in enumerate(f_v): file.write( ' %d %d %d' % (\ v.index+totverts,\ totuvco + uv_face_mapping[f_index][vi],\ globalNormals[ veckey3d(v.no) ])) # vert, uv, normal else: # No smoothing, face normals no = globalNormals[ veckey3d(f.no) ] for vi, v in enumerate(f_v): file.write( ' %d %d %d' % (\ v.index+totverts,\ totuvco + uv_face_mapping[f_index][vi],\ no)) # vert, uv, normal face_vert_index += len(f_v) else: # No UV's if f_smooth: # Smoothed, use vertex normals for v in f_v: file.write( ' %d %d' % (\ v.index+totverts,\ globalNormals[ veckey3d(v.no) ])) else: # No smoothing, face normals no = globalNormals[ veckey3d(f.no) ] for v in f_v: file.write( ' %d %d' % (\ v.index+totverts,\ no)) if in_triangles: file.write('</p>\n') file.write('\t\t\t</triangles>\n') # Write edges. LOOSE = Mesh.EdgeFlags.LOOSE has_edge = False for ed in edges: if ed.flag & LOOSE: has_edge = True if has_edge: file.write('\t\t\t<edges>\n') file.write('\t\t\t\t<input offset="0" semantic="VERTEX" source="#%s" />\n' % vertname) file.write('\t\t\t\t<p>') for ed in edges: if ed.flag & LOOSE: file.write(' %d %d' % (ed.v1.index+totverts, ed.v2.index+totverts)) file.write('</p>\n') file.write('\t\t\t</edges>\n') # Make the indicies global rather then per mesh # totverts += len(me.verts) # if faceuv: # totuvco += uv_unique_count me.verts= None if len(faces) > 0: file.write('\t\t</mesh>\n') else: file.write('\t\t</curve>\n') file.write('\t</object>\n') file.write('</library_objects>\n\n') # Now we have all our materials, save them if EXPORT_MTL: write_library_materials(file) # Save the groups write_library_groups(file) file.write('</OPEN_TRACK>\n') file.close() if EXPORT_COPY_IMAGES: dest_dir = filename # Remove chars until we are just the path. while dest_dir and dest_dir[-1] not in '\\/': dest_dir = dest_dir[:-1] if dest_dir: copy_images(dest_dir) else: print '\tError: "%s" could not be used as a base for an image path.' % filename print "WTF Export time: %.2f" % (sys.time() - time1)
def vertexFakeAO(me, PREF_BLUR_ITERATIONS, PREF_BLUR_STRENGTH, PREF_CLAMP_CONCAVE, PREF_CLAMP_CONVEX, PREF_SHADOW_ONLY, PREF_SEL_ONLY): Window.WaitCursor(1) Ang = Mathutils.AngleBetweenVecs BPyMesh.meshCalcNormals(me) vert_tone = [0.0] * len(me.verts) vert_tone_count = [0] * len(me.verts) min_tone = 0 max_tone = 0 for i, f in enumerate(me.faces): fc = f.cent fno = f.no for v in f.v: vno = v.no # get a scaled down normal. dot = vno.dot(v.co) - vno.dot(fc) vert_tone_count[v.index] += 1 try: a = Ang(vno, fno) except: continue # Convex if dot > 0: a = min(PREF_CLAMP_CONVEX, a) if not PREF_SHADOW_ONLY: vert_tone[v.index] += a else: a = min(PREF_CLAMP_CONCAVE, a) vert_tone[v.index] -= a # average vert_tone_list into vert_tonef for i, tones in enumerate(vert_tone): if vert_tone_count[i]: vert_tone[i] = vert_tone[i] / vert_tone_count[i] # Below we use edges to blur along so the edges need counting, not the faces vert_tone_count = [0] * len(me.verts) for ed in me.edges: vert_tone_count[ed.v1.index] += 1 vert_tone_count[ed.v2.index] += 1 # Blur tone blur = PREF_BLUR_STRENGTH blur_inv = 1.0 - PREF_BLUR_STRENGTH for i in xrange(PREF_BLUR_ITERATIONS): # backup the original tones orig_vert_tone = list(vert_tone) for ed in me.edges: i1 = ed.v1.index i2 = ed.v2.index val1 = (orig_vert_tone[i2] * blur) + (orig_vert_tone[i1] * blur_inv) val2 = (orig_vert_tone[i1] * blur) + (orig_vert_tone[i2] * blur_inv) # Apply the ton divided by the number of faces connected vert_tone[i1] += val1 / max(vert_tone_count[i1], 1) vert_tone[i2] += val2 / max(vert_tone_count[i2], 1) min_tone = min(vert_tone) max_tone = max(vert_tone) #print min_tone, max_tone tone_range = max_tone - min_tone if max_tone == min_tone: return for f in me.faces: if not PREF_SEL_ONLY or f.sel: f_col = f.col for i, v in enumerate(f): col = f_col[i] tone = vert_tone[v.index] tone = (tone - min_tone) / tone_range col.r = int(tone * col.r) col.g = int(tone * col.g) col.b = int(tone * col.b) Window.WaitCursor(0)
def vertexFakeAO(me, PREF_BLUR_ITERATIONS, PREF_BLUR_STRENGTH, PREF_CLAMP_CONCAVE, PREF_CLAMP_CONVEX, PREF_SHADOW_ONLY, PREF_SEL_ONLY): Window.WaitCursor(1) Ang= Mathutils.AngleBetweenVecs BPyMesh.meshCalcNormals(me) vert_tone= [0.0] * len(me.verts) vert_tone_count= [0] * len(me.verts) min_tone=0 max_tone=0 for i, f in enumerate(me.faces): fc= f.cent fno = f.no for v in f.v: vno=v.no # get a scaled down normal. dot= vno.dot(v.co) - vno.dot(fc) vert_tone_count[v.index]+=1 try: a= Ang(vno, fno) except: continue # Convex if dot>0: a= min(PREF_CLAMP_CONVEX, a) if not PREF_SHADOW_ONLY: vert_tone[v.index] += a else: a= min(PREF_CLAMP_CONCAVE, a) vert_tone[v.index] -= a # average vert_tone_list into vert_tonef for i, tones in enumerate(vert_tone): if vert_tone_count[i]: vert_tone[i] = vert_tone[i] / vert_tone_count[i] # Below we use edges to blur along so the edges need counting, not the faces vert_tone_count= [0] * len(me.verts) for ed in me.edges: vert_tone_count[ed.v1.index] += 1 vert_tone_count[ed.v2.index] += 1 # Blur tone blur = PREF_BLUR_STRENGTH blur_inv = 1.0 - PREF_BLUR_STRENGTH for i in xrange(PREF_BLUR_ITERATIONS): # backup the original tones orig_vert_tone= list(vert_tone) for ed in me.edges: i1= ed.v1.index i2= ed.v2.index val1= (orig_vert_tone[i2]*blur) + (orig_vert_tone[i1]*blur_inv) val2= (orig_vert_tone[i1]*blur) + (orig_vert_tone[i2]*blur_inv) # Apply the ton divided by the number of faces connected vert_tone[i1]+= val1 / max(vert_tone_count[i1], 1) vert_tone[i2]+= val2 / max(vert_tone_count[i2], 1) min_tone= min(vert_tone) max_tone= max(vert_tone) #print min_tone, max_tone tone_range= max_tone-min_tone if max_tone==min_tone: return for f in me.faces: if not PREF_SEL_ONLY or f.sel: f_col= f.col for i, v in enumerate(f): col= f_col[i] tone= vert_tone[v.index] tone= (tone-min_tone)/tone_range col.r= int(tone*col.r) col.g= int(tone*col.g) col.b= int(tone*col.b) Window.WaitCursor(0)
def solidify(me, PREF_THICK, PREF_SKIN_SIDES=True, PREF_REM_ORIG=False, PREF_COLLAPSE_SIDES=False): # Main code function me_faces = me.faces faces_sel = [f for f in me_faces if f.sel] BPyMesh.meshCalcNormals(me) normals = [v.no for v in me.verts] vertFaces = [[] for i in xrange(len(me.verts))] for f in me_faces: no = f.no for v in f: vertFaces[v.index].append(no) # Scale the normals by the face angles from the vertex Normals. for i in xrange(len(me.verts)): length = 0.0 if vertFaces[i]: for fno in vertFaces[i]: try: a = Ang(fno, normals[i]) except: a = 0 if a >= 90: length += 1 elif a < SMALL_NUM: length += 1 else: length += angleToLength(a) length = length / len(vertFaces[i]) #print 'LENGTH %.6f' % length # normals[i]= (normals[i] * length) * PREF_THICK normals[i] *= length * PREF_THICK len_verts = len(me.verts) len_faces = len(me_faces) vert_mapping = [-1] * len(me.verts) verts = [] for f in faces_sel: for v in f: i = v.index if vert_mapping[i] == -1: vert_mapping[i] = len_verts + len(verts) verts.append(v.co + normals[i]) #verts= [v.co + normals[v.index] for v in me.verts] me.verts.extend(verts) #faces= [tuple([ me.verts[v.index+len_verts] for v in reversed(f.v)]) for f in me_faces ] faces = [ tuple([vert_mapping[v.index] for v in reversed(f.v)]) for f in faces_sel ] me_faces.extend(faces) # Old method before multi UVs """ has_uv = me.faceUV has_vcol = me.vertexColors for i, orig_f in enumerate(faces_sel): new_f= me_faces[len_faces + i] new_f.mat = orig_f.mat new_f.smooth = orig_f.smooth orig_f.sel=False new_f.sel= True new_f = me_faces[i+len_faces] if has_uv: new_f.uv = [c for c in reversed(orig_f.uv)] new_f.mode = orig_f.mode new_f.flag = orig_f.flag if orig_f.image: new_f.image = orig_f.image if has_vcol: new_f.col = [c for c in reversed(orig_f.col)] """ copy_facedata_multilayer( me, faces_sel, [me_faces[len_faces + i] for i in xrange(len(faces_sel))]) if PREF_SKIN_SIDES or PREF_COLLAPSE_SIDES: skin_side_faces = [] skin_side_faces_orig = [] # Get edges of faces that only have 1 user - so we can make walls edges = {} # So we can reference indicies that wrap back to the start. ROT_TRI_INDEX = 0, 1, 2, 0 ROT_QUAD_INDEX = 0, 1, 2, 3, 0 for f in faces_sel: f_v = f.v for i, edgekey in enumerate(f.edge_keys): if edges.has_key(edgekey): edges[edgekey] = None else: if len(f_v) == 3: edges[edgekey] = f, f_v, i, ROT_TRI_INDEX[i + 1] else: edges[edgekey] = f, f_v, i, ROT_QUAD_INDEX[i + 1] del ROT_QUAD_INDEX, ROT_TRI_INDEX # So we can remove doubles with edges only. if PREF_COLLAPSE_SIDES: me.sel = False # Edges are done. extrude the single user edges. for edge_face_data in edges.itervalues(): if edge_face_data: # != None f, f_v, i1, i2 = edge_face_data v1i, v2i = f_v[i1].index, f_v[i2].index if PREF_COLLAPSE_SIDES: # Collapse cv1 = me.verts[v1i] cv2 = me.verts[vert_mapping[v1i]] cv3 = me.verts[v2i] cv4 = me.verts[vert_mapping[v2i]] cv1.co = cv2.co = (cv1.co + cv2.co) / 2 cv3.co = cv4.co = (cv3.co + cv4.co) / 2 cv1.sel = cv2.sel = cv3.sel = cv4.sel = True else: # Now make a new Face # skin_side_faces.append( (v1i, v2i, vert_mapping[v2i], vert_mapping[v1i]) ) skin_side_faces.append( (v2i, v1i, vert_mapping[v1i], vert_mapping[v2i])) skin_side_faces_orig.append( (f, len(me_faces) + len(skin_side_faces_orig), i1, i2)) if PREF_COLLAPSE_SIDES: me.remDoubles(0.0001) else: me_faces.extend(skin_side_faces) # Now assign properties. """ # Before MultiUVs for i, origfData in enumerate(skin_side_faces_orig): orig_f, new_f_idx, i1, i2 = origfData new_f= me_faces[new_f_idx] new_f.mat= orig_f.mat new_f.smooth= orig_f.smooth if has_uv: new_f.mode= orig_f.mode new_f.flag= orig_f.flag if orig_f.image: new_f.image= orig_f.image uv1= orig_f.uv[i1] uv2= orig_f.uv[i2] new_f.uv= (uv1, uv2, uv2, uv1) if has_vcol: col1= orig_f.col[i1] col2= orig_f.col[i2] new_f.col= (col1, col2, col2, col1) """ for i, origfData in enumerate(skin_side_faces_orig): orig_f, new_f_idx, i2, i1 = origfData new_f = me_faces[new_f_idx] new_f.mat = orig_f.mat new_f.smooth = orig_f.smooth for uvlayer in me.getUVLayerNames(): me.activeUVLayer = uvlayer for i, origfData in enumerate(skin_side_faces_orig): orig_f, new_f_idx, i2, i1 = origfData new_f = me_faces[new_f_idx] new_f.mode = orig_f.mode new_f.flag = orig_f.flag new_f.image = orig_f.image uv1 = orig_f.uv[i1] uv2 = orig_f.uv[i2] new_f.uv = (uv1, uv2, uv2, uv1) for collayer in me.getColorLayerNames(): me.activeColorLayer = collayer for i, origfData in enumerate(skin_side_faces_orig): orig_f, new_f_idx, i2, i1 = origfData new_f = me_faces[new_f_idx] col1 = orig_f.col[i1] col2 = orig_f.col[i2] new_f.col = (col1, col2, col2, col1) if PREF_REM_ORIG: me_faces.delete(0, faces_sel)
def vertexFakeAO(me, PREF_BLUR_ITERATIONS, PREF_BLUR_RADIUS, PREF_MIN_EDLEN, PREF_CLAMP_CONCAVE, PREF_CLAMP_CONVEX, PREF_SHADOW_ONLY, PREF_SEL_ONLY): Window.WaitCursor(1) DotVecs = Mathutils.DotVecs Ang= Mathutils.AngleBetweenVecs BPyMesh.meshCalcNormals(me) vert_tone= [0.0] * len(me.verts) vert_tone_count= [0] * len(me.verts) min_tone=0 max_tone=0 for i, f in enumerate(me.faces): fc= f.cent fno = f.no for v in f.v: vno=v.no # get a scaled down normal. dot= DotVecs(vno, v.co) - DotVecs(vno, fc) vert_tone_count[v.index]+=1 try: a= Ang(vno, fno) except: continue # Convex if dot>0: a= min(PREF_CLAMP_CONVEX, a) if not PREF_SHADOW_ONLY: vert_tone[v.index] += a else: a= min(PREF_CLAMP_CONCAVE, a) vert_tone[v.index] -= a # average vert_tone_list into vert_tonef for i, tones in enumerate(vert_tone): if vert_tone_count[i]: vert_tone[i] = vert_tone[i] / vert_tone_count[i] # BLUR TONE edge_lengths= [ ed.length for ed in me.edges] for i in xrange(PREF_BLUR_ITERATIONS): orig_vert_tone= list(vert_tone) for ii, ed in enumerate(me.edges): i1= ed.v1.index i2= ed.v2.index l= edge_lengths[ii] f=1.0 if l > PREF_MIN_EDLEN and l < PREF_BLUR_RADIUS: f= l/PREF_BLUR_RADIUS len_vert_tone_list_i1 = vert_tone_count[i1] len_vert_tone_list_i2 = vert_tone_count[i2] if not len_vert_tone_list_i1: len_vert_tone_list_i1=1 if not len_vert_tone_list_i2: len_vert_tone_list_i2=1 val1= (orig_vert_tone[i2]/len_vert_tone_list_i1)/ f val2= (orig_vert_tone[i1]/len_vert_tone_list_i2)/ f vert_tone[i1]+= val1 vert_tone[i2]+= val2 min_tone= min(vert_tone) max_tone= max(vert_tone) #print min_tone, max_tone tone_range= max_tone-min_tone if max_tone==min_tone: return for f in me.faces: if not PREF_SEL_ONLY or f.sel: f_col= f.col for i, v in enumerate(f): col= f_col[i] tone= vert_tone[v.index] tone= (tone-min_tone)/tone_range col.r= int(tone*col.r) col.g= int(tone*col.g) col.b= int(tone*col.b) Window.WaitCursor(0)