Пример #1
0
def copy_act_vgroup(me, PREF_NAME, PREF_SEL_ONLY):
    Window.WaitCursor(1)
    groupNames, vWeightDict = BPyMesh.meshWeight2Dict(me)
    act_group = me.activeGroup

    if not PREF_SEL_ONLY:
        for wd in vWeightDict:
            try:
                wd[PREF_NAME] = wd[act_group]
            except:
                pass
    else:
        # Selected faces only
        verts = {}  # should use set
        for f in me.faces:
            if f.sel:
                for v in f:
                    verts[v.index] = None

        for i in verts.iterkeys():
            wd = vWeightDict[i]
            try:
                wd[PREF_NAME] = wd[act_group]
            except:
                pass

    groupNames.append(PREF_NAME)
    # Copy weights back to the mesh.
    BPyMesh.dict2MeshWeight(me, groupNames, vWeightDict)
    Window.WaitCursor(0)
Пример #2
0
def copy_act_vgroup(me, PREF_NAME, PREF_SEL_ONLY):
	Window.WaitCursor(1)
	groupNames, vWeightDict= BPyMesh.meshWeight2Dict(me)
	act_group= me.activeGroup
	
	if not PREF_SEL_ONLY:
		for wd in vWeightDict:
			try:		wd[PREF_NAME] = wd[act_group]
			except:		pass
	else:
		# Selected faces only
		verts = {} # should use set
		for f in me.faces:
			if f.sel:
				for v in f:
					verts[v.index] = None
		
		for i in verts.iterkeys():
			wd = vWeightDict[i]
			try:		wd[PREF_NAME] = wd[act_group]
			except:		pass
		
		
	
	groupNames.append(PREF_NAME)
	# Copy weights back to the mesh.
	BPyMesh.dict2MeshWeight(me, groupNames, vWeightDict)
	Window.WaitCursor(0)
Пример #3
0
def actWeightNormalize(me, PREF_MODE, PREF_MAX_DIST, PREF_STRENGTH, PREF_ITERATIONS):
	Window.WaitCursor(1)
	groupNames, vWeightDict= BPyMesh.meshWeight2Dict(me)
	act_group= me.activeGroup
	
	# Start with assumed zero weights
	orig_vert_weights= [0.0] * len(vWeightDict) # Will be directly assigned to orig_vert_weights
	
	
	# fill in the zeros with real weights.
	for i, wd in enumerate(vWeightDict):
		try:
			orig_vert_weights[i]= wd[act_group]
		except:
			pass
	
	new_vert_weights= list(orig_vert_weights)
	
	for dummy in xrange(PREF_ITERATIONS):
		# Minimize or maximize the weights. connection based.
		
		if PREF_MODE==0: # Grow
			op= max
		else: # Shrink
			op= min
		
		for ed in me.edges:
			if not PREF_MAX_DIST or ed.length < PREF_MAX_DIST:
			
				i1= ed.v1.index
				i2= ed.v2.index
				new_weight= op(orig_vert_weights[i1], orig_vert_weights[i2])
				
				if PREF_STRENGTH==1.0: # do a full copy
					new_vert_weights[i1]= op(new_weight, new_vert_weights[i1])
					new_vert_weights[i2]= op(new_weight, new_vert_weights[i2])
					
				else: # Do a faded copy
					new_vert_weights[i1]= op(new_weight, new_vert_weights[i1])
					new_vert_weights[i2]= op(new_weight, new_vert_weights[i2])
					
					# Face the copy with the original (orig is updated per iteration)
					new_vert_weights[i1]= (new_vert_weights[i1]*PREF_STRENGTH) + (orig_vert_weights[i1]*(1-PREF_STRENGTH))
					new_vert_weights[i2]= (new_vert_weights[i2]*PREF_STRENGTH) + (orig_vert_weights[i2]*(1-PREF_STRENGTH))
		
		
		for i, wd in enumerate(vWeightDict):
			new_weight= new_vert_weights[i]
			if new_weight != orig_vert_weights[i]:
				wd[act_group]= new_weight
		
		if dummy+1 != PREF_ITERATIONS: # dont copy the list on the last round.
			orig_vert_weights= list(new_vert_weights)
		
		
	# Copy weights back to the mesh.
	BPyMesh.dict2MeshWeight(me, groupNames, vWeightDict)
	Window.WaitCursor(0)
Пример #4
0
def actWeightNormalize(me, PREF_MODE, PREF_MAX_DIST, PREF_STRENGTH, PREF_ITERATIONS):
	Window.WaitCursor(1)
	groupNames, vWeightDict= BPyMesh.meshWeight2Dict(me)
	act_group= me.activeGroup
	
	# Start with assumed zero weights
	orig_vert_weights= [0.0] * len(vWeightDict) # Will be directly assigned to orig_vert_weights
	
	
	# fill in the zeros with real weights.
	for i, wd in enumerate(vWeightDict):
		try:
			orig_vert_weights[i]= wd[act_group]
		except:
			pass
	
	new_vert_weights= list(orig_vert_weights)
	
	for dummy in xrange(PREF_ITERATIONS):
		# Minimize or maximize the weights. connection based.
		
		if PREF_MODE==0: # Grow
			op= max
		else: # Shrink
			op= min
		
		for ed in me.edges:
			if not PREF_MAX_DIST or ed.length < PREF_MAX_DIST:
			
				i1= ed.v1.index
				i2= ed.v2.index
				new_weight= op(orig_vert_weights[i1], orig_vert_weights[i2])
				
				if PREF_STRENGTH==1.0: # do a full copy
					new_vert_weights[i1]= op(new_weight, new_vert_weights[i1])
					new_vert_weights[i2]= op(new_weight, new_vert_weights[i2])
					
				else: # Do a faded copy
					new_vert_weights[i1]= op(new_weight, new_vert_weights[i1])
					new_vert_weights[i2]= op(new_weight, new_vert_weights[i2])
					
					# Face the copy with the original (orig is updated per iteration)
					new_vert_weights[i1]= (new_vert_weights[i1]*PREF_STRENGTH) + (orig_vert_weights[i1]*(1-PREF_STRENGTH))
					new_vert_weights[i2]= (new_vert_weights[i2]*PREF_STRENGTH) + (orig_vert_weights[i2]*(1-PREF_STRENGTH))
		
		
		for i, wd in enumerate(vWeightDict):
			new_weight= new_vert_weights[i]
			if new_weight != orig_vert_weights[i]:
				wd[act_group]= new_weight
		
		if dummy+1 != PREF_ITERATIONS: # dont copy the list on the last round.
			orig_vert_weights= list(new_vert_weights)
		
		
	# Copy weights back to the mesh.
	BPyMesh.dict2MeshWeight(me, groupNames, vWeightDict)
	Window.WaitCursor(0)
Пример #5
0
def load_objs(parent, group):
    group.obj_count = 0
    for o in objs:
        if o.getParent() == parent:
            new = Obj()
            try:
                new.name = o.getName().split('-')[1] + '-' + o.getName().split(
                    '-')[2]
            except:
                Blender.Draw.PupMenu(
                    "ERROR:%t| Object name \"" + o.getName() +
                    "\"not valid.|%l|Name should look like OBJECT_NAME-XX-XX")
                return -1
            me = BPyMesh.getMeshFromObject(o, None, True, False, tp)
            #me = NMesh.GetRaw(o.data.name)
            if len(me.materials) > 0:
                new.tex = add_mat(me.materials[0], o)
            else:
                print "No Material: %s\n" % o.getName()
                new.tex = 'FFFF'
            if load_meshes(me, new, o.matrix) == -1:
                Blender.Draw.PupMenu('ERROR:%t| Unexpected Error. ')
                return -1
            group.obj_list.append(new)
            group.obj_count += 1

    return 1
Пример #6
0
def write(filename):
	start = Blender.sys.time()
	if not filename.lower().endswith('.raw'):
		filename += '.raw'
	
	scn= Blender.Scene.GetCurrent()
	ob= scn.objects.active
	if not ob:
		Blender.Draw.PupMenu('Error%t|Select 1 active object')
		return
	
	file = open(filename, 'wb')
	
	mesh = BPyMesh.getMeshFromObject(ob, None, True, False, scn)
	if not mesh:
		Blender.Draw.PupMenu('Error%t|Could not get mesh data from active object')
		return
	
	mesh.transform(ob.matrixWorld)
	
	
	file = open(filename, "wb")
	for f in mesh.faces:
		for v in f:
			file.write('%.6f %.6f %.6f ' % tuple(v.co))
		file.write('\n')
	file.close()
	
	end = Blender.sys.time()
	message = 'Successfully exported "%s" in %.4f seconds' % ( Blender.sys.basename(filename), end-start)
	print message
Пример #7
0
def write(filename):
    start = Blender.sys.time()
    if not filename.lower().endswith('.raw'):
        filename += '.raw'

    scn = Blender.Scene.GetCurrent()
    ob = scn.objects.active
    if not ob:
        Blender.Draw.PupMenu('Error%t|Select 1 active object')
        return

    file = open(filename, 'wb')

    mesh = BPyMesh.getMeshFromObject(ob, None, True, False, scn)
    if not mesh:
        Blender.Draw.PupMenu(
            'Error%t|Could not get mesh data from active object')
        return

    mesh.transform(ob.matrixWorld)

    file = open(filename, "wb")
    for f in mesh.faces:
        for v in f:
            file.write('%.6f %.6f %.6f ' % tuple(v.co))
        file.write('\n')
    file.close()

    end = Blender.sys.time()
    message = 'Successfully exported "%s" in %.4f seconds' % (
        Blender.sys.basename(filename), end - start)
    print message
def write(filename):

    Blender.Window.WaitCursor(1)

    if not filename.lower().endswith('.begc'):
        filename += '.begc'
    out = file(filename, "w")
    objects = Blender.Object.GetSelected()

    num_objects = 0
    for object in objects:
        if object.type == 'Mesh':
            num_objects = num_objects + 1

    out.write('%d\n' % num_objects)
    node_offset = 0
    for object in objects:
        if object.type == 'Mesh':
            out.write(object.name)
            out.write('\n')
    for object in objects:
        if object.type == 'Mesh':

            mesh = BPyMesh.getMeshFromObject(object, None, True, False,
                                             bpy.data.scenes.active)
            #mesh  = object.getData(0,1)
            mesh.transform(object.matrixWorld)
            faces = mesh.faces
            nodes = mesh.verts
            out.write('%d' % len(nodes))
            out.write(' %d\n' % len(faces))
            for n in nodes:
                #out.write("%e " % n.co[0])
                #out.write("%e " % n.co[1])
                #out.write("%e\n" % n.co[2])
                out.write("%e " % n.co[0])
                out.write("%e " % n.co[1])
                out.write("%e\n" % n.co[2])
            for f in faces:
                N = len(f.verts)
                if N < 3 and N > 4:
                    Blender.Draw.PupMenu(
                        'Error%t|Only triangles and quads allowed')
                    return
                out.write("%d" % N)
                for v in f.verts:
                    out.write(' %d' % (v.index + node_offset))
                out.write('\n')
            node_offset = node_offset + len(nodes)

    Blender.Window.WaitCursor(0)
Пример #9
0
def weightClean(me, PREF_THRESH, PREF_KEEP_SINGLE, PREF_OTHER_GROUPS):
	
	groupNames, vWeightDict= BPyMesh.meshWeight2Dict(me)
	act_group= me.activeGroup
	
	rem_count = 0
	
	if PREF_OTHER_GROUPS:
		for wd in vWeightDict:
			l = len(wd)
			if not PREF_KEEP_SINGLE or l > 1:
				# cant use iteritems because the dict is having items removed
				for group in wd.keys():
					w= wd[group]
					if w <= PREF_THRESH:
						# small weight, remove.
						del wd[group]
						rem_count +=1
						l-=1
					
					if PREF_KEEP_SINGLE and l == 1:
						break
	
	else:
		for wd in vWeightDict:
			if not PREF_KEEP_SINGLE or len(wd) > 1:
				try:
					w= wd[act_group]
					if w <= PREF_THRESH:
						# small weight, remove.
						del wd[act_group]
						rem_count +=1
				except:
					pass
	
	# Copy weights back to the mesh.
	BPyMesh.dict2MeshWeight(me, groupNames, vWeightDict)
	return rem_count
Пример #10
0
def weightClean(me, PREF_THRESH, PREF_KEEP_SINGLE, PREF_OTHER_GROUPS):

    groupNames, vWeightDict = BPyMesh.meshWeight2Dict(me)
    act_group = me.activeGroup

    rem_count = 0

    if PREF_OTHER_GROUPS:
        for wd in vWeightDict:
            l = len(wd)
            if not PREF_KEEP_SINGLE or l > 1:
                # cant use iteritems because the dict is having items removed
                for group in wd.keys():
                    w = wd[group]
                    if w <= PREF_THRESH:
                        # small weight, remove.
                        del wd[group]
                        rem_count += 1
                        l -= 1

                    if PREF_KEEP_SINGLE and l == 1:
                        break

    else:
        for wd in vWeightDict:
            if not PREF_KEEP_SINGLE or len(wd) > 1:
                try:
                    w = wd[act_group]
                    if w <= PREF_THRESH:
                        # small weight, remove.
                        del wd[act_group]
                        rem_count += 1
                except:
                    pass

    # Copy weights back to the mesh.
    BPyMesh.dict2MeshWeight(me, groupNames, vWeightDict)
    return rem_count
Пример #11
0
def write(filename):
  
  Blender.Window.WaitCursor(1)
  
  if not filename.lower().endswith('.begc'):
    filename += '.begc'
  out = file(filename, "w")
  objects = Blender.Object.GetSelected()
  
  num_objects = 0
  for object in objects:
    if object.type == 'Mesh':
      num_objects = num_objects + 1
      
  out.write('%d\n' % num_objects)
  node_offset = 0
  for object in objects:
    if object.type == 'Mesh':
      out.write(object.name)
      out.write('\n')
  for object in objects:
    if object.type == 'Mesh':

      mesh = BPyMesh.getMeshFromObject(object, None, True, False, bpy.data.scenes.active)
      #mesh  = object.getData(0,1)
      mesh.transform(object.matrixWorld)
      faces = mesh.faces
      nodes = mesh.verts
      out.write('%d' % len(nodes))
      out.write(' %d\n' % len(faces))
      for n in nodes:
        #out.write("%e " % n.co[0])
        #out.write("%e " % n.co[1])
        #out.write("%e\n" % n.co[2])
        out.write("%e "  % n.co[0])
        out.write("%e "  % n.co[1])
        out.write("%e\n" % n.co[2])
      for f in faces:
        N = len(f.verts)
        if N < 3 and N > 4:
          Blender.Draw.PupMenu('Error%t|Only triangles and quads allowed')
          return
        out.write("%d" % N)
        for v in f.verts:
          out.write(' %d' % (v.index + node_offset))
        out.write('\n')
      node_offset = node_offset + len(nodes)

  Blender.Window.WaitCursor(0)
Пример #12
0
def exportPath(curve, filename):
  print "Exporting curve "+curve.name+" to "+filename
  
  mesh = BPyMesh.getMeshFromObject(curve)
  mesh.transform(curve.matrixWorld)
  numVertices = len(mesh.verts)

  file = open(filename, "w")
  for i in range(0, numVertices):
    vertex = mesh.verts[i]
    file.write("%g %g %g\n" % (vertex.co[0], vertex.co[1], vertex.co[2]))
  if curve.data.isCyclic():
    vertex = mesh.verts[0]
    file.write("%g %g %g\n" % (vertex.co[0], vertex.co[1], vertex.co[2]))    
  file.close()
Пример #13
0
def selSameWeights(me, PREF_TOLERENCE):
	
	# Check for missing data
	if not me.faceUV:	return
	
	act_group= me.activeGroup
	if not act_group:	return
	
	act_face = me.faces[me.activeFace]
	if act_face == None:	return
	
	
	
	groupNames, vWeightDict= BPyMesh.meshWeight2Dict(me)
	
	def get_face_weight(f):
		'''
		Return the faces median weight and weight range.
		'''
		wmin = 1.0
		wmax = 0.0
		w = 0.0
		for v in f:
			try:
				new_weight = vWeightDict[v.index][act_group]
				if wmin > new_weight: wmin = new_weight
				if wmax < new_weight: wmax = new_weight
				w += new_weight
			except:
				pass
		return w, wmax-wmin # weight, range
	
	weight_from, weight_range_from = get_face_weight(act_face)
	for f in me.faces:
		if (not f.sel) and f != act_face:
			weight, weight_range = get_face_weight(f)
			
			# Compare the 2 faces weight difference and difference in their contrast.
			if\
			abs(weight - weight_from) <= PREF_TOLERENCE and\
			abs(weight_range - weight_range_from) <= PREF_TOLERENCE:
				f.sel = True
Пример #14
0
def selSameWeights(me, PREF_TOLERENCE):

    # Check for missing data
    if not me.faceUV: return

    act_group = me.activeGroup
    if not act_group: return

    act_face = me.faces[me.activeFace]
    if act_face == None: return

    groupNames, vWeightDict = BPyMesh.meshWeight2Dict(me)

    def get_face_weight(f):
        '''
		Return the faces median weight and weight range.
		'''
        wmin = 1.0
        wmax = 0.0
        w = 0.0
        for v in f:
            try:
                new_weight = vWeightDict[v.index][act_group]
                if wmin > new_weight: wmin = new_weight
                if wmax < new_weight: wmax = new_weight
                w += new_weight
            except:
                pass
        return w, wmax - wmin  # weight, range

    weight_from, weight_range_from = get_face_weight(act_face)
    for f in me.faces:
        if (not f.sel) and f != act_face:
            weight, weight_range = get_face_weight(f)

            # Compare the 2 faces weight difference and difference in their contrast.
            if\
            abs(weight - weight_from) <= PREF_TOLERENCE and\
            abs(weight_range - weight_range_from) <= PREF_TOLERENCE:
                f.sel = True
Пример #15
0
def write(filename):
	start = Blender.sys.time()
	if not filename.lower().endswith('.ml.txt'):
		filename += '.ml.txt'
	
	scn= Blender.Scene.GetCurrent()
	ob= scn.objects.active
	if not ob:
		Blender.Draw.PupMenu('Error%t|Select 1 active object')
		return
	
	file = open(filename, 'wb')
	
	mesh = BPyMesh.getMeshFromObject(ob, None, True, False, scn)
	if not mesh:
		Blender.Draw.PupMenu('Error%t|Could not get mesh data from active object')
		return
	
	mesh.transform(ob.matrixWorld)
	
	with open(filename, "w") as ml_file:
		ml_file.write('#MiniLight\n')
		ml_file.write('%d\n' % DEFAULT_ITERATIONS)
		ml_file.write('%d %d\n' % DEFAULT_RESOLUTION)
		ml_file.write('(%.2f %.2f %.2f) (%.2f %.2f %.2f) %.2f\n' % DEFAULT_CAMERA)
		ml_file.write('(%.2f %.2f %.2f) (%.2f %.2f %.2f)\n' % DEFAULT_SKY_AND_GROUND)
		
		for f in mesh.faces:
			if len(f) != 3:
				Blender.Draw.PupMenu('Error%t|Non-triangular face found in mesh')
				return
			for v in f:
				ml_file.write('(%.6f %.6f %.6f) ' % tuple(v.co))
			ml_file.write('(%.6f %.6f %.6f) ' % DEFAULT_COLOR)
			ml_file.write('(%.6f %.6f %.6f)\n' % DEFAULT_REFLECTIVITY)
	
	end = Blender.sys.time()
	message = 'Successfully exported "%s" in %.4f seconds' % ( Blender.sys.basename(filename), end-start)
	print message
Пример #16
0
def meshNormalizedWeights(mesh):
  try:
    groupNames, vWeightList = BPyMesh.meshWeight2List(mesh)
  except:
    return [],[]

  if not groupNames:
    return [],[]

  for i, vWeights in enumerate(vWeightList):
    tot = 0.0
    for w in vWeights:
      tot+=w

    #print 'i:%d tot:%f' %  (i, tot)
    if tot:
      for j, w in enumerate(vWeights):
        vWeights[j] = w/tot
        #if w/tot > 0:
          #print 'i:%d j:%d w:%f w/tot:%f' %  (i, j, w, vWeights[j])

  return groupNames, vWeightList
def meshNormalizedWeights(mesh):
	try: 
		groupNames, vWeightList = BPyMesh.meshWeight2List(mesh)
	except:
		return [],[]
	
	if not groupNames:
		return [],[]
		
	for i, vWeights in enumerate(vWeightList):
		tot = 0.0
		for w in vWeights:
			tot+=w
			
		#print 'i:%d tot:%f' %  (i, tot)
		if tot:
			for j, w in enumerate(vWeights):
				vWeights[j] = w/tot
				#if w/tot > 0:
					#print 'i:%d j:%d w:%f w/tot:%f' %  (i, j, w, vWeights[j])
	
	return groupNames, vWeightList
Пример #18
0
def split(mesh, splitHeight):
	'''
	Split the mesh into tetra-, pentahedra, one for every face.
	Very close opposite faces (like a concave thin wall) will break visibility info.
	'''
	# we'll return a list of new meshes, together making the original one
	ms = []
	for f in mesh.faces:
		vs = [v.co for v in f.verts]

		# check if the face won't cause trouble
		if len(vs) != 3:
			warning('\t\tSkipping a face not being a triangle, with %d vertices.' % len(vs))
			continue
		if f.area < minFaceArea:
			warning('\t\tSkipping a face with very small area: %.2f.' % f.area)
			continue
		as = filter(lambda a: a < minFaceAngle, BPyMesh.faceAngles(f)) # *** just till the first is found
		if as:
			warning('\t\tSkipping a face with a very small angle: %.2f.' % as[0])
			continue
		es = filter(lambda ei: mesh.edges[ei].length < minEdgeLength, mesh.findEdges(f.edge_keys)) # *** same
		if es:
			warning('\t\tSkipping a face with a very short edge: %.2f.' % mesh.edges[es[0]].length)
			continue

		# make a new tetrahedron, watch vertex order not to flip any normals
		m = Mesh.New()
		m.verts.extend(vs)
		m.verts.extend(f.cent - f.no * splitHeight)
		m.faces.extend(((0, 1, 2), (1, 0, 3), (2, 1, 3), (0, 2, 3)))
		m.faces[0].image = f.image
		m.faces[0].uv = f.uv
		ms.append(m)
	return ms
	'''
Пример #19
0
def extend():
	sce = bpy.data.scenes.active
	ob = sce.objects.active
	
	# print ob, ob.type
	if ob == None or ob.type != 'Mesh':
		Draw.PupMenu('ERROR: No mesh object.')
		return
	
	# Toggle Edit mode
	is_editmode = Window.EditMode()
	if is_editmode:
		Window.EditMode(0)
	
	me = ob.getData(mesh=1)
	me_verts = me.verts
	
	# 0:normal extend, 1:edge length
	EXTEND_MODE = Draw.PupMenu("Use Face Area%t|Loop Average%x2|None%x0")
	if EXTEND_MODE == -1:
		return
	
	Window.WaitCursor(1)
	t = sys.time()
	edge_average_lengths = {}
	
	OTHER_INDEX = 2,3,0,1
	FAST_INDICIES = 0,2,1,3 # order is faster
	def extend_uvs(face_source, face_target, edge_key):
		'''
		Takes 2 faces,
		Projects its extends its UV coords onto the face next to it.
		Both faces must share an edge.
		'''
		
		def face_edge_vs(vi):
			# assunme a quad
			return [(vi[0], vi[1]), (vi[1], vi[2]), (vi[2], vi[3]), (vi[3], vi[0])]
		
		uvs_source = face_source.uv
		uvs_target = face_target.uv
		
		vidx_source = [v.index for v in face_source] 
		vidx_target = [v.index for v in face_target]
		
		# vertex index is the key, uv is the value
		uvs_vhash_source = dict( [ (vindex, uvs_source[i]) for i, vindex in enumerate(vidx_source)] )
		uvs_vhash_target = dict( [ (vindex, uvs_target[i]) for i, vindex in enumerate(vidx_target)] )
		
		edge_idxs_source = face_edge_vs(vidx_source)
		edge_idxs_target = face_edge_vs(vidx_target)
		
		source_matching_edge = -1
		target_matching_edge = -1
		
		edge_key_swap = edge_key[1], edge_key[0]
		
		try:	source_matching_edge = edge_idxs_source.index(edge_key)
		except:	source_matching_edge = edge_idxs_source.index(edge_key_swap)
		try:	target_matching_edge = edge_idxs_target.index(edge_key)
		except:	target_matching_edge = edge_idxs_target.index(edge_key_swap)
		

		
		edgepair_inner_source = edge_idxs_source[source_matching_edge]
		edgepair_inner_target = edge_idxs_target[target_matching_edge]
		edgepair_outer_source = edge_idxs_source[OTHER_INDEX[source_matching_edge]]
		edgepair_outer_target = edge_idxs_target[OTHER_INDEX[target_matching_edge]]
		
		if edge_idxs_source[source_matching_edge] == edge_idxs_target[target_matching_edge]:
			iA= 0; iB= 1 # Flipped, most common
		else: # The normals of these faces must be different
			iA= 1; iB= 0

		
		# Set the target UV's touching source face, no tricky calc needed,
		uvs_vhash_target[edgepair_inner_target[0]][:] = uvs_vhash_source[edgepair_inner_source[iA]]
		uvs_vhash_target[edgepair_inner_target[1]][:] = uvs_vhash_source[edgepair_inner_source[iB]]


		# Set the 2 UV's on the target face that are not touching
		# for this we need to do basic expaning on the source faces UV's
		if EXTEND_MODE == 2:
			
			try: # divide by zero is possible
				'''
				measure the length of each face from the middle of each edge to the opposite
				allong the axis we are copying, use this
				'''
				i1a= edgepair_outer_target[iB]
				i2a= edgepair_inner_target[iA]
				if i1a>i2a: i1a, i2a = i2a, i1a
				
				i1b= edgepair_outer_source[iB]
				i2b= edgepair_inner_source[iA]
				if i1b>i2b: i1b, i2b = i2b, i1b
				# print edge_average_lengths
				factor = edge_average_lengths[i1a, i2a][0] / edge_average_lengths[i1b, i2b][0]
			except:
				# Div By Zero?
				factor = 1.0
			
			uvs_vhash_target[edgepair_outer_target[iB]][:] = uvs_vhash_source[edgepair_inner_source[0]]  +factor * (uvs_vhash_source[edgepair_inner_source[0]] - uvs_vhash_source[edgepair_outer_source[1]])
			uvs_vhash_target[edgepair_outer_target[iA]][:] = uvs_vhash_source[edgepair_inner_source[1]]  +factor * (uvs_vhash_source[edgepair_inner_source[1]] - uvs_vhash_source[edgepair_outer_source[0]])
		
		else:
			# same as above but with no factor
			uvs_vhash_target[edgepair_outer_target[iB]][:] = uvs_vhash_source[edgepair_inner_source[0]] + (uvs_vhash_source[edgepair_inner_source[0]] - uvs_vhash_source[edgepair_outer_source[1]])
			uvs_vhash_target[edgepair_outer_target[iA]][:] = uvs_vhash_source[edgepair_inner_source[1]] + (uvs_vhash_source[edgepair_inner_source[1]] - uvs_vhash_source[edgepair_outer_source[0]])
	
	if not me.faceUV:
		Draw.PupMenu('ERROR: Mesh has no face UV coords.')
		return
	
	face_act = 	me.activeFace
	if face_act == -1:
		Draw.PupMenu('ERROR: No active face')
		return
	
	face_sel= [f for f in me.faces if len(f) == 4 and f.sel]
	
	face_act_local_index = -1
	for i, f in enumerate(face_sel):
		if f.index == face_act:
			face_act_local_index = i
			break
	
	if face_act_local_index == -1:
		Draw.PupMenu('ERROR: Active face not selected')
		return
	
	
	
	# Modes
	# 0 unsearched
	# 1:mapped, use search from this face. - removed!!
	# 2:all siblings have been searched. dont search again.
	face_modes = [0] * len(face_sel)
	face_modes[face_act_local_index] = 1 # extend UV's from this face.
	
	
	# Edge connectivty
	edge_faces = {}
	for i, f in enumerate(face_sel):
		for edkey in f.edge_keys:
			try:	edge_faces[edkey].append(i)
			except:	edge_faces[edkey] = [i]
	
	SEAM = Mesh.EdgeFlags.SEAM
	
	if EXTEND_MODE == 2:
		edge_loops = BPyMesh.getFaceLoopEdges(face_sel, [ed.key for ed in me.edges if ed.flag & SEAM] )
		me_verts = me.verts
		for loop in edge_loops:
			looplen = [0.0]
			for ed in loop:
				edge_average_lengths[ed] = looplen
				looplen[0] += (me_verts[ed[0]].co - me_verts[ed[1]].co).length
			looplen[0] = looplen[0] / len(loop)
		
	
	
	# remove seams, so we dont map accross seams.
	for ed in me.edges:
		if ed.flag & SEAM:
			# remove the edge pair if we can
			try:	del edge_faces[ed.key]
			except:	pass
	# Done finding seams
	
	
	# face connectivity - faces around each face
	# only store a list of indicies for each face.
	face_faces = [[] for i in xrange(len(face_sel))]
	
	for edge_key, faces in edge_faces.iteritems():
		if len(faces) == 2: # Only do edges with 2 face users for now
			face_faces[faces[0]].append((faces[1], edge_key))
			face_faces[faces[1]].append((faces[0], edge_key))
	
	
	# Now we know what face is connected to what other face, map them by connectivity
	ok = True
	while ok:
		ok = False
		for i in xrange(len(face_sel)):
			if face_modes[i] == 1: # searchable
				for f_sibling, edge_key in face_faces[i]:
					if face_modes[f_sibling] == 0:
						face_modes[f_sibling] = 1 # mapped and search from.
						extend_uvs(face_sel[i], face_sel[f_sibling], edge_key)
						face_modes[i] = 1 # we can map from this one now.
						ok= True # keep searching
				
				face_modes[i] = 2 # dont search again
	print  sys.time() - t
	
	if is_editmode:
		Window.EditMode(1)
	else:
		me.update()
	
	Window.RedrawAll()
	Window.WaitCursor(0)
Пример #20
0
def redux(ob, REDUX=0.5, BOUNDRY_WEIGHT=2.0, REMOVE_DOUBLES=False, FACE_AREA_WEIGHT=1.0, FACE_TRIANGULATE=True, DO_UV=True, DO_VCOL=True, DO_WEIGHTS=True, VGROUP_INF_REDUX= None, VGROUP_INF_WEIGHT=0.5):
	"""
	BOUNDRY_WEIGHT - 0 is no boundry weighting. 2.0 will make them twice as unlikely to collapse.
	FACE_AREA_WEIGHT - 0 is no weight. 1 is normal, 2.0 is higher.
	"""
	
	if REDUX<0 or REDUX>1.0:
		raise 'Error, factor must be between 0 and 1.0'
	elif not set:
		raise 'Error, this function requires Python 2.4 or a full install of Python 2.3'
	
	BOUNDRY_WEIGHT= 1+BOUNDRY_WEIGHT
	
	""" # DEBUG!
	if Blender.Get('rt') == 1000:
		DEBUG=True
	else:
		DEBUG= False
	"""
	
	me= ob.getData(mesh=1)
	me.hide= False # unhide all data,.
	if len(me.faces)<5:
		return
	
	
	
	if FACE_TRIANGULATE or REMOVE_DOUBLES:
		me.sel= True
	
	if FACE_TRIANGULATE:
		me.quadToTriangle()
	
	if REMOVE_DOUBLES:
		me.remDoubles(0.0001)
	
	vgroups= me.getVertGroupNames()
	
	if not me.getVertGroupNames():
		DO_WEIGHTS= False
	
	if (VGROUP_INF_REDUX!= None and VGROUP_INF_REDUX not in vgroups) or\
	VGROUP_INF_WEIGHT==0.0:
		VGROUP_INF_REDUX= None
	
	try:
		VGROUP_INF_REDUX_INDEX= vgroups.index(VGROUP_INF_REDUX)
	except:
		VGROUP_INF_REDUX_INDEX= -1
	
	# del vgroups
	len_vgroups= len(vgroups)
	
	
	
	OLD_MESH_MODE= Blender.Mesh.Mode()
	Blender.Mesh.Mode(Blender.Mesh.SelectModes.VERTEX)
	
	if DO_UV and not me.faceUV:
		DO_UV= False
	
	if DO_VCOL and not me.vertexColors:
		DO_VCOL = False
	
	current_face_count= len(me.faces)
	target_face_count= int(current_face_count * REDUX)
	# % of the collapseable faces to collapse per pass.
	#collapse_per_pass= 0.333 # between 0.1 - lots of small nibbles, slow but high q. and 0.9 - big passes and faster.
	collapse_per_pass= 0.333 # between 0.1 - lots of small nibbles, slow but high q. and 0.9 - big passes and faster.
	
	"""# DEBUG!
	if DEBUG:
		COUNT= [0]
		def rd():
			if COUNT[0]< 330:
				COUNT[0]+=1
				return
			me.update()
			Blender.Window.RedrawAll()
			print 'Press key for next, count "%s"' % COUNT[0]
			try: input()
			except KeyboardInterrupt:
				raise "Error"
			except:
				pass
				
			COUNT[0]+=1
	"""
	
	class collapseEdge(object):
		__slots__ = 'length', 'key', 'faces', 'collapse_loc', 'v1', 'v2','uv1', 'uv2', 'col1', 'col2', 'collapse_weight'
		def __init__(self, ed):
			self.init_from_edge(ed) # So we can re-use the classes without using more memory.
		
		def init_from_edge(self, ed):
			self.key= ed.key
			self.length= ed.length
			self.faces= []
			self.v1= ed.v1
			self.v2= ed.v2
			if DO_UV or DO_VCOL:
				self.uv1= []
				self.uv2= []
				self.col1= []
				self.col2= []
				
			# self.collapse_loc= None # new collapse location.
			# Basic weighting.
			#self.collapse_weight= self.length *  (1+ ((ed.v1.no-ed.v2.no).length**2))
			self.collapse_weight= 1.0
		
		def collapse_locations(self, w1, w2):
			'''
			Generate a smart location for this edge to collapse to
			w1 and w2 are vertex location bias
			'''
			
			v1co= self.v1.co
			v2co= self.v2.co
			v1no= self.v1.no
			v2no= self.v2.no
			
			# Basic operation, works fine but not as good as predicting the best place.
			#between= ((v1co*w1) + (v2co*w2))
			#self.collapse_loc= between
			
			# normalize the weights of each vert - se we can use them as scalers.
			wscale= w1+w2
			if not wscale: # no scale?
				w1=w2= 0.5
			else:
				w1/=wscale
				w2/=wscale
			
			length= self.length
			between= MidpointVecs(v1co, v2co)
			
			# Collapse
			# new_location = between # Replace tricky code below. this code predicts the best collapse location.
			
			# Make lines at right angles to the normals- these 2 lines will intersect and be
			# the point of collapsing.
			
			# Enlarge so we know they intersect:  self.length*2
			cv1= v1no.cross(v1no.cross(v1co-v2co))
			cv2= v2no.cross(v2no.cross(v2co-v1co))
			
			# Scale to be less then the edge lengths.
			cv2.length = cv1.length = 1
			
			cv1 = cv1 * (length* 0.4)
			cv2 = cv2 * (length* 0.4)
			
			smart_offset_loc= between + (cv1 + cv2)
			
			# Now we need to blend between smart_offset_loc and w1/w2
			# you see were blending between a vert and the edges midpoint, so we cant use a normal weighted blend.
			if w1 > 0.5: # between v1 and smart_offset_loc
				#self.collapse_loc= v1co*(w2+0.5) + smart_offset_loc*(w1-0.5)
				w2*=2
				w1= 1-w2
				new_loc_smart= v1co*w1 + smart_offset_loc*w2
			else: # w between v2 and smart_offset_loc
				w1*=2
				w2= 1-w1
				new_loc_smart= v2co*w2 + smart_offset_loc*w1
				
			if new_loc_smart.x != new_loc_smart.x: # NAN LOCATION, revert to between
				new_loc_smart= None
			
			return new_loc_smart, between, v1co*0.99999 + v2co*0.00001, v1co*0.00001 + v2co*0.99999
		

	class collapseFace(object):
		__slots__ = 'verts', 'normal', 'area', 'index', 'orig_uv', 'orig_col', 'uv', 'col' # , 'collapse_edge_count'
		def __init__(self, f):
			self.init_from_face(f)
		
		def init_from_face(self, f):
			self.verts= f.v
			self.normal= f.no
			self.area= f.area
			self.index= f.index
			if DO_UV:
				self.orig_uv= [uv_key(uv) for uv in f.uv]
				self.uv= f.uv
			if DO_VCOL:
				self.orig_col= [col_key(col) for col in f.col]
				self.col= f.col
	
	collapse_edges= collapse_faces= None
	
	# So meshCalcNormals can avoid making a new list all the time.
	reuse_vertNormals= [ Vector() for v in xrange(len(me.verts)) ]
	
	while target_face_count <= len(me.faces):
		BPyMesh.meshCalcNormals(me, reuse_vertNormals)
		
		if DO_WEIGHTS:
			#groupNames, vWeightDict= BPyMesh.meshWeight2Dict(me)
			groupNames, vWeightList= BPyMesh.meshWeight2List(me)
		
		# THIS CRASHES? Not anymore.
		verts= list(me.verts)
		edges= list(me.edges)
		faces= list(me.faces)
		
		# THIS WORKS
		#verts= me.verts
		#edges= me.edges
		#faces= me.faces
		
		# if DEBUG: DOUBLE_CHECK= [0]*len(verts)
		me.sel= False
		
		if not collapse_faces: # Initialize the list.
			collapse_faces= [collapseFace(f) for f in faces]
			collapse_edges= [collapseEdge(ed) for ed in edges]
		else:
			for i, ed in enumerate(edges):
				collapse_edges[i].init_from_edge(ed)
			
			# Strip the unneeded end off the list
			collapse_edges[i+1:]= []
				
			for i, f in enumerate(faces):
				collapse_faces[i].init_from_face(f)
			
			# Strip the unneeded end off the list
			collapse_faces[i+1:]= []
			
			
		collapse_edges_dict= dict( [(ced.key, ced) for ced in collapse_edges] )
		
		# Store verts edges.
		vert_ed_users= [[] for i in xrange(len(verts))]
		for ced in collapse_edges:
			vert_ed_users[ced.key[0]].append(ced)
			vert_ed_users[ced.key[1]].append(ced)
		
		# Store face users
		vert_face_users= [[] for i in xrange(len(verts))]
		
		# Have decieded not to use this. area is better.
		#face_perim= [0.0]* len(me.faces)
		
		for ii, cfa in enumerate(collapse_faces):
			for i, v1 in enumerate(cfa.verts):
				vert_face_users[v1.index].append( (i,cfa) )
				
				# add the uv coord to the vert
				v2 = cfa.verts[i-1]
				i1= v1.index
				i2= v2.index
				
				if i1>i2: ced= collapse_edges_dict[i2,i1]
				else: ced= collapse_edges_dict[i1,i2]
				
				ced.faces.append(cfa)
				if DO_UV or DO_VCOL:
					# if the edge is flipped from its order in the face then we need to flip the order indicies.
					if cfa.verts[i]==ced.v1:	i1,i2 = i, i-1
					else:						i1,i2 = i-1, i
					
					if DO_UV:
						ced.uv1.append( cfa.orig_uv[i1] )
						ced.uv2.append( cfa.orig_uv[i2] )
					
					if DO_VCOL:
						ced.col1.append( cfa.orig_col[i1] )
						ced.col2.append( cfa.orig_col[i2] )
					
				
				# PERIMITER
				#face_perim[ii]+= ced.length
		
		
		
		# How weight the verts by the area of their faces * the normal difference.
		# when the edge collapses, to vert weights are taken into account 
		
		vert_weights= [0.5] * len(verts)
		
		for ii, vert_faces in enumerate(vert_face_users):
			for f in vert_faces:
				try:
					no_ang= (Ang(verts[ii].no, f[1].normal)/180) * f[1].area
				except:
					no_ang= 1.0
				
				vert_weights[ii] += no_ang
		
		# Use a vertex group as a weighting.
		if VGROUP_INF_REDUX!=None:
			
			# Get Weights from a vgroup.
			"""
			vert_weights_map= [1.0] * len(verts)
			for i, wd in enumerate(vWeightDict):
				try:	vert_weights_map[i]= 1+(wd[VGROUP_INF_REDUX] * VGROUP_INF_WEIGHT)
				except:	pass
			"""
			vert_weights_map= [1+(wl[VGROUP_INF_REDUX_INDEX]*VGROUP_INF_WEIGHT) for wl in vWeightList ]
			
		
		# BOUNDRY CHECKING AND WEIGHT EDGES. CAN REMOVE
		# Now we know how many faces link to an edge. lets get all the boundry verts
		if BOUNDRY_WEIGHT > 0:
			verts_boundry= [1] * len(verts)
			#for ed_idxs, faces_and_uvs in edge_faces_and_uvs.iteritems():
			for ced in collapse_edges:
				if len(ced.faces) < 2:
					for key in ced.key: # only ever 2 key indicies.
						verts_boundry[key]= 2
			
			for ced in collapse_edges:
				b1= verts_boundry[ced.key[0]]
				b2= verts_boundry[ced.key[1]]
				if b1 != b2:
					# Edge has 1 boundry and 1 non boundry vert. weight higher
					ced.collapse_weight= BOUNDRY_WEIGHT
				#elif b1==b2==2: # if both are on a seam then weigh half as bad.
				#	ced.collapse_weight= ((BOUNDRY_WEIGHT-1)/2) +1
			# weight the verts by their boundry status
			del b1
			del b2
			
			for ii, boundry in enumerate(verts_boundry):
				if boundry==2:
					vert_weights[ii] *= BOUNDRY_WEIGHT
			
			vert_collapsed= verts_boundry
			del verts_boundry
		else:
			vert_collapsed= [1] * len(verts)
		
		
				
		
		# Best method, no quick hacks here, Correction. Should be the best but needs tweaks.
		def ed_set_collapse_error(ced):
			# Use the vertex weights to bias the new location.
			new_locs= ced.collapse_locations(vert_weights[ced.key[0]], vert_weights[ced.key[1]])
			
			
			# Find the connecting faces of the 2 verts.
			i1, i2= ced.key
			test_faces= set()
			for i in (i1,i2): # faster then LC's
				for f in vert_face_users[i]:
					test_faces.add(f[1].index)
			for f in ced.faces:
				test_faces.remove(f.index)
			
			
			v1_orig= Vector(ced.v1.co)
			v2_orig= Vector(ced.v2.co)
			
			def test_loc(new_loc):
				'''
				Takes a location and tests the error without changing anything
				'''
				new_weight= ced.collapse_weight
				ced.v1.co= ced.v2.co= new_loc
				
				new_nos= [faces[i].no for i in test_faces]
				
				# So we can compare the befire and after normals
				ced.v1.co= v1_orig
				ced.v2.co= v2_orig
				
				# now see how bad the normals are effected
				angle_diff= 1.0
				
				for ii, i in enumerate(test_faces): # local face index, global face index
					cfa= collapse_faces[i] # this collapse face
					try:
						# can use perim, but area looks better.
						if FACE_AREA_WEIGHT:
							# Psudo code for wrighting
							# angle_diff= The before and after angle difference between the collapsed and un-collapsed face.
							# ... devide by 180 so the value will be between 0 and 1.0
							# ... add 1 so we can use it as a multiplyer and not make the area have no eefect (below)
							# area_weight= The faces original area * the area weight
							# ... add 1.0 so a small area face dosent make the angle_diff have no effect.
							#
							# Now multiply - (angle_diff * area_weight)
							# ... The weight will be a minimum of 1.0 - we need to subtract this so more faces done give the collapse an uneven weighting.
							
							angle_diff+= ((1+(Ang(cfa.normal, new_nos[ii])/180)) * (1+(cfa.area * FACE_AREA_WEIGHT))) -1 # 4 is how much to influence area
						else:
							angle_diff+= (Ang(cfa.normal), new_nos[ii])/180
							
					except:
						pass
								
				
				# This is very arbirary, feel free to modify
				try:		no_ang= (Ang(ced.v1.no, ced.v2.no)/180) + 1
				except:		no_ang= 2.0
				
				# do *= because we face the boundry weight to initialize the weight. 1.0 default.
				new_weight *=  ((no_ang * ced.length) * (1-(1/angle_diff)))# / max(len(test_faces), 1)
				return new_weight
			# End testloc
			
			
			# Test the collapse locatons
			collapse_loc_best= None
			collapse_weight_best= 1000000000
			ii= 0
			for collapse_loc in new_locs:
				if collapse_loc: # will only ever fail if smart loc is NAN
					test_weight= test_loc(collapse_loc)
					if test_weight < collapse_weight_best:
						iii= ii
						collapse_weight_best = test_weight
						collapse_loc_best= collapse_loc
					ii+=1
			
			ced.collapse_loc= collapse_loc_best
			ced.collapse_weight= collapse_weight_best
			
			
			# are we using a weight map
			if VGROUP_INF_REDUX:
				v= vert_weights_map[i1]+vert_weights_map[i2]
				ced.collapse_weight*= v
		# End collapse Error
		
		# We can calculate the weights on __init__ but this is higher qualuity.
		for ced in collapse_edges:
			if ced.faces: # dont collapse faceless edges.
				ed_set_collapse_error(ced)
		
		# Wont use the function again.
		del ed_set_collapse_error
		# END BOUNDRY. Can remove
		
		# sort by collapse weight
		try:	collapse_edges.sort(key = lambda ced: ced.collapse_weight) # edges will be used for sorting
		except:	collapse_edges.sort(lambda ced1, ced2: cmp(ced1.collapse_weight, ced2.collapse_weight)) # edges will be used for sorting
		
		
		vert_collapsed= [0]*len(verts)
		
		collapse_edges_to_collapse= []
		
		# Make a list of the first half edges we can collapse,
		# these will better edges to remove.
		collapse_count=0
		for ced in collapse_edges:
			if ced.faces:
				i1, i2= ced.key
				# Use vert selections 
				if vert_collapsed[i1] or vert_collapsed[i2]:
					pass
				else:
					# Now we know the verts havnyt been collapsed.
					vert_collapsed[i2]= vert_collapsed[i1]= 1 # Dont collapse again.
					collapse_count+=1
					collapse_edges_to_collapse.append(ced)
		
		# Get a subset of the entire list- the first "collapse_per_pass", that are best to collapse.
		if collapse_count > 4:
			collapse_count = int(collapse_count*collapse_per_pass)
		else:
			collapse_count = len(collapse_edges)
		# We know edge_container_list_collapse can be removed.
		for ced in collapse_edges_to_collapse:
			"""# DEBUG!
			if DEBUG:
				if DOUBLE_CHECK[ced.v1.index] or\
				DOUBLE_CHECK[ced.v2.index]:
					raise 'Error'
				else:
					DOUBLE_CHECK[ced.v1.index]=1
					DOUBLE_CHECK[ced.v2.index]=1
				
				tmp= (ced.v1.co+ced.v2.co)*0.5
				Blender.Window.SetCursorPos(tmp.x, tmp.y, tmp.z)
				Blender.Window.RedrawAll()
			"""
			
			# Chech if we have collapsed our quota.
			collapse_count-=1
			if not collapse_count:
				break
			
			current_face_count -= len(ced.faces)
			
			# Find and assign the real weights based on collapse loc.
			
			# Find the weights from the collapse error
			if DO_WEIGHTS or DO_UV or DO_VCOL:
				i1, i2= ced.key
				# Dont use these weights since they may not have been used to make the collapse loc.
				#w1= vert_weights[i1]
				#w2= vert_weights[i2]
				w1= (ced.v2.co-ced.collapse_loc).length
				w2= (ced.v1.co-ced.collapse_loc).length
				
				# Normalize weights
				wscale= w1+w2
				if not wscale: # no scale?
					w1=w2= 0.5
				else:
					w1/= wscale
					w2/= wscale
				
				
				# Interpolate the bone weights.
				if DO_WEIGHTS:
					
					# add verts vgroups to eachother
					wl1= vWeightList[i1] # v1 weight dict
					wl2= vWeightList[i2] # v2 weight dict
					for group_index in xrange(len_vgroups):
						wl1[group_index]= wl2[group_index]= (wl1[group_index]*w1) + (wl2[group_index]*w2)
				# Done finding weights.
				
				
				
				if DO_UV or DO_VCOL:
					# Handel UV's and vert Colors!
					for v, my_weight, other_weight, edge_my_uvs, edge_other_uvs, edge_my_cols, edge_other_cols in (\
					(ced.v1, w1, w2, ced.uv1, ced.uv2, ced.col1, ced.col2),\
					(ced.v2, w2, w1, ced.uv2, ced.uv1, ced.col2, ced.col1)\
					):
						uvs_mixed=   [ uv_key_mix(edge_my_uvs[iii],   edge_other_uvs[iii],  my_weight, other_weight)  for iii in xrange(len(edge_my_uvs))  ]
						cols_mixed=  [ col_key_mix(edge_my_cols[iii], edge_other_cols[iii], my_weight, other_weight) for iii in xrange(len(edge_my_cols)) ]
						
						for face_vert_index, cfa in vert_face_users[v.index]:
							if len(cfa.verts)==3 and cfa not in ced.faces: # if the face is apart of this edge then dont bother finding the uvs since the face will be removed anyway.
							
								if DO_UV:
									# UV COORDS
									uvk=  cfa.orig_uv[face_vert_index] 
									try:
										tex_index= edge_my_uvs.index(uvk)
									except:
										tex_index= None
										""" # DEBUG!
										if DEBUG:
											print 'not found', uvk, 'in', edge_my_uvs, 'ed index', ii, '\nwhat about', edge_other_uvs
										"""
									if tex_index != None: # This face uses a uv in the collapsing face. - do a merge
										other_uv= edge_other_uvs[tex_index]
										uv_vec= cfa.uv[face_vert_index]
										uv_vec.x, uv_vec.y= uvs_mixed[tex_index]
								
								# TEXFACE COLORS
								if DO_VCOL:
									colk= cfa.orig_col[face_vert_index] 
									try:    tex_index= edge_my_cols.index(colk)
									except: pass
									if tex_index != None:
										other_col= edge_other_cols[tex_index]
										col_ob= cfa.col[face_vert_index]
										col_ob.r, col_ob.g, col_ob.b= cols_mixed[tex_index]
								
								# DEBUG! if DEBUG: rd()
			
			# Execute the collapse
			ced.v1.sel= ced.v2.sel= True # Select so remove doubles removed the edges and faces that use it
			ced.v1.co= ced.v2.co=  ced.collapse_loc
				
			# DEBUG! if DEBUG: rd()
			if current_face_count <= target_face_count:
				break
		
		# Copy weights back to the mesh before we remove doubles.
		if DO_WEIGHTS:
			#BPyMesh.dict2MeshWeight(me, groupNames, vWeightDict)
			BPyMesh.list2MeshWeight(me, groupNames, vWeightList)
		
		doubles= me.remDoubles(0.0001) 
		current_face_count= len(me.faces)
		
		if current_face_count <= target_face_count or not doubles: # not doubles shoule never happen.
			break
	
	me.update()
	Blender.Mesh.Mode(OLD_MESH_MODE)
Пример #21
0
def export_map(filepath):

    pup_block = [\
    ('Scale:', PREF_SCALE, 1, 1000, 'Scale the blender scene by this value.'),\
    ('Face Width:', PREF_FACE_THICK, 0.01, 10, 'Thickness of faces exported as brushes.'),\
    ('Grid Snap', PREF_GRID_SNAP, 'snaps floating point values to whole numbers.'),\
    'Null Texture',\
    ('', PREF_NULL_TEX, 1, 128, 'Export textureless faces with this texture'),\
    'Unseen Texture',\
    ('', PREF_INVIS_TEX, 1, 128, 'Export invisible faces with this texture'),\
    ]

    if not Draw.PupBlock('map export', pup_block):
        return

    Window.WaitCursor(1)
    time = sys.time()
    print 'Map Exporter 0.0'
    file = open(filepath, 'w')

    obs_mesh = []
    obs_lamp = []
    obs_surf = []
    obs_empty = []

    SCALE_MAT = Mathutils.Matrix()
    SCALE_MAT[0][0] = SCALE_MAT[1][1] = SCALE_MAT[2][2] = PREF_SCALE.val

    dummy_mesh = Mesh.New()

    TOTBRUSH = TOTLAMP = TOTNODE = 0

    for ob in Object.GetSelected():
        type = ob.getType()
        if type == 'Mesh': obs_mesh.append(ob)
        elif type == 'Surf': obs_surf.append(ob)
        elif type == 'Lamp': obs_lamp.append(ob)
        elif type == 'Empty': obs_empty.append(ob)

    if obs_mesh or obs_surf:
        # brushes and surf's must be under worldspan
        file.write('\n// entity 0\n')
        file.write('{\n')
        file.write('"classname" "worldspawn"\n')

    print '\twriting cubes from meshes'
    for ob in obs_mesh:
        dummy_mesh.getFromObject(ob.name)

        #print len(mesh_split2connected(dummy_mesh))

        # Is the object 1 cube? - object-is-a-brush
        dummy_mesh.transform(ob.matrixWorld *
                             SCALE_MAT)  # 1 to tx the normals also

        if PREF_GRID_SNAP.val:
            for v in dummy_mesh.verts:
                co = v.co
                co.x = round(co.x)
                co.y = round(co.y)
                co.z = round(co.z)

        # High quality normals
        BPyMesh.meshCalcNormals(dummy_mesh)

        # Split mesh into connected regions
        for face_group in BPyMesh.mesh2linkedFaces(dummy_mesh):
            if is_cube_facegroup(face_group):
                write_cube2brush(file, face_group)
                TOTBRUSH += 1
            elif is_tricyl_facegroup(face_group):
                write_cube2brush(file, face_group)
                TOTBRUSH += 1
            else:
                for f in face_group:
                    write_face2brush(file, f)
                    TOTBRUSH += 1

            #print 'warning, not exporting "%s" it is not a cube' % ob.name

    dummy_mesh.verts = None

    valid_dims = 3, 5, 7, 9, 11, 13, 15
    for ob in obs_surf:
        '''
		Surf, patches
		'''
        surf_name = ob.getData(name_only=1)
        data = Curve.Get(surf_name)
        mat = ob.matrixWorld * SCALE_MAT

        # This is what a valid patch looks like
        """
// brush 0
{
patchDef2
{
NULL
( 3 3 0 0 0 )
(
( ( -64 -64 0 0 0 ) ( -64 0 0 0 -2 ) ( -64 64 0 0 -4 ) )
( ( 0 -64 0 2 0 ) ( 0 0 0 2 -2 ) ( 0 64 0 2 -4 ) )
( ( 64 -64 0 4 0 ) ( 64 0 0 4 -2 ) ( 80 88 0 4 -4 ) )
)
}
}
		"""
        for i, nurb in enumerate(data):
            u = nurb.pointsU
            v = nurb.pointsV
            if u in valid_dims and v in valid_dims:

                file.write('// brush %d surf_name\n' % i)
                file.write('{\n')
                file.write('patchDef2\n')
                file.write('{\n')
                file.write('NULL\n')
                file.write('( %d %d 0 0 0 )\n' % (u, v))
                file.write('(\n')

                u_iter = 0
                for p in nurb:

                    if u_iter == 0:
                        file.write('(')

                    u_iter += 1

                    # add nmapping 0 0 ?
                    if PREF_GRID_SNAP.val:
                        file.write(' ( %d %d %d 0 0 )' %
                                   round_vec(Mathutils.Vector(p[0:3]) * mat))
                    else:
                        file.write(' ( %.6f %.6f %.6f 0 0 )' %
                                   tuple(Mathutils.Vector(p[0:3]) * mat))

                    # Move to next line
                    if u_iter == u:
                        file.write(' )\n')
                        u_iter = 0

                file.write(')\n')
                file.write('}\n')
                file.write('}\n')

                # Debugging
                # for p in nurb: print 'patch', p

            else:
                print "NOT EXPORTING PATCH", surf_name, u, v, 'Unsupported'

    file.write('}\n')  # end worldspan

    print '\twriting lamps'
    for ob in obs_lamp:
        print '\t\t%s' % ob.name
        lamp = ob.data
        file.write('{\n')
        file.write('"classname" "light"\n')
        file.write('"light" "%.6f"\n' % (lamp.dist * PREF_SCALE.val))
        if PREF_GRID_SNAP.val:
            file.write('"origin" "%d %d %d"\n' % tuple([
                round(axis * PREF_SCALE.val)
                for axis in ob.getLocation('worldspace')
            ]))
        else:
            file.write('"origin" "%.6f %.6f %.6f"\n' % tuple([
                axis * PREF_SCALE.val for axis in ob.getLocation('worldspace')
            ]))
        file.write('"_color" "%.6f %.6f %.6f"\n' % tuple(lamp.col))
        file.write('"style" "0"\n')
        file.write('}\n')
        TOTLAMP += 1

    print '\twriting empty objects as nodes'
    for ob in obs_empty:
        if write_node_map(file, ob):
            print '\t\t%s' % ob.name
            TOTNODE += 1
        else:
            print '\t\tignoring %s' % ob.name

    Window.WaitCursor(0)

    print 'Exported Map in %.4fsec' % (sys.time() - time)
    print 'Brushes: %d  Nodes: %d  Lamps %d\n' % (TOTBRUSH, TOTNODE, TOTLAMP)
Пример #22
0
def vertexGradientPick(ob, MODE):
    #MODE 0 == VWEIGHT,  1 == VCOL

    me = ob.getData(mesh=1)
    if not me.faceUV: me.faceUV = True

    Window.DrawProgressBar(0.0, '')

    mousedown_wait()

    if MODE == 0:
        act_group = me.activeGroup
        if act_group == None:
            mousedown_wait()
            Draw.PupMenu('Error, mesh has no active group.')
            return

    # Loop until click
    Window.DrawProgressBar(0.25, 'Click to set gradient start')
    mouseup()

    obmat = ob.matrixWorld
    screen_x, screen_y = Window.GetMouseCoords()
    mouseInView, OriginA, DirectionA = mouseViewRay(screen_x, screen_y, obmat)
    if not mouseInView or not OriginA:
        return

    # get the mouse weight

    if MODE == 0:
        pickValA = BPyMesh.pickMeshGroupWeight(me, act_group, OriginA,
                                               DirectionA)
    if MODE == 1:
        pickValA = BPyMesh.pickMeshGroupVCol(me, OriginA, DirectionA)

    Window.DrawProgressBar(0.75, 'Click to set gradient end')
    mouseup()

    TOALPHA = Window.GetKeyQualifiers() & Window.Qual.SHIFT

    screen_x, screen_y = Window.GetMouseCoords()
    mouseInView, OriginB, DirectionB = mouseViewRay(screen_x, screen_y, obmat)
    if not mouseInView or not OriginB:
        return

    if not TOALPHA:  # Only get a second opaque value if we are not blending to alpha
        if MODE == 0:
            pickValB = BPyMesh.pickMeshGroupWeight(me, act_group, OriginB,
                                                   DirectionB)
        else:
            pickValB = BPyMesh.pickMeshGroupVCol(me, OriginB, DirectionB)
    else:
        if MODE == 0: pickValB = 0.0
        else: pickValB = [0.0, 0.0, 0.0]  # Dummy value

    # Neither points touched a face
    if pickValA == pickValB == None:
        return

    # clicking on 1 non face is fine. just set the weight to 0.0
    if pickValA == None:
        pickValA = 0.0

        # swap A/B
        OriginA, OriginB = OriginB, OriginA
        DirectionA, DirectionB = DirectionB, DirectionA
        pickValA, pickValB = pickValA, pickValB

        TOALPHA = True

    if pickValB == None:
        pickValB = 0.0
        TOALPHA = True

    # set up 2 lines so we can measure their distances and calc the gradient

    # make a line 90d to the grad in screenspace.
    if (OriginA - OriginB
        ).length <= eps:  # Persp view. same origin different direction
        cross_grad = DirectionA.cross(DirectionB)
        ORTHO = False

    else:  # Ortho - Same direction, different origin
        cross_grad = DirectionA.cross(OriginA - OriginB)
        ORTHO = True

    cross_grad.normalize()
    cross_grad = cross_grad * 100

    lineA = (OriginA, OriginA + (DirectionA * 100))
    lineB = (OriginB, OriginB + (DirectionB * 100))

    if not ORTHO:
        line_angle = AngleBetweenVecs(lineA[1], lineB[1]) / 2
        line_mid = (lineA[1] + lineB[1]) * 0.5

    VSEL = [False] * (len(me.verts))

    # Get the selected faces and apply the selection to the verts.
    for f in me.faces:
        if f.sel:
            for v in f.v:
                VSEL[v.index] = True
    groupNames, vWeightDict = BPyMesh.meshWeight2Dict(me)

    def grad_weight_from_co(v):
        '''
		Takes a vert and retuens its gradient radio between A and B
		'''

        if not VSEL[v.index]:  # Not bart of a selected face?
            return None, None

        v_co = v.co
        # make a line 90d to the 2 lines the user clicked.
        vert_line = (v_co - cross_grad, v_co + cross_grad)

        xA = LineIntersect(vert_line[0], vert_line[1], lineA[0], lineA[1])
        xB = LineIntersect(vert_line[0], vert_line[1], lineB[0], lineB[1])

        if not xA or not xB:  # Should never happen but support it anyhow
            return None, None

        wA = (xA[0] - xA[1]).length
        wB = (xB[0] - xB[1]).length

        wTot = wA + wB
        if not wTot:  # lines are on the same point.
            return None, None
        '''
		Get the length of the line between both intersections on the 
		2x view lines.
		if the dist between  lineA+VertLine and lineB+VertLine is 
		greater then the lenth between lineA and lineB intersection points, it means
		that the verts are not inbetween the 2 lines.
		'''
        lineAB_length = (xA[1] - xB[1]).length

        # normalzie
        wA = wA / wTot
        wB = wB / wTot

        if ORTHO:  # Con only use line length method with parelelle lines
            if wTot > lineAB_length + eps:
                # vert is outside the range on 1 side. see what side of the grad
                if wA > wB: wA, wB = 1.0, 0.0
                else: wA, wB = 0.0, 1.0
        else:
            # PERSP, lineA[0] is the same origin as lineB[0]

            # Either xA[0] or xB[0]  can be used instead of a possible x_mid between the 2
            # as long as the point is inbetween lineA and lineB it dosent matter.
            a = AngleBetweenVecs(lineA[0] - xA[0], line_mid)
            if a > line_angle:
                # vert is outside the range on 1 side. see what side of the grad
                if wA > wB: wA, wB = 1.0, 0.0
                else: wA, wB = 0.0, 1.0

        return wA, wB

    grad_weights = [grad_weight_from_co(v) for v in me.verts]

    if MODE == 0:
        for v in me.verts:
            i = v.index
            if VSEL[i]:
                wA, wB = grad_weights[i]
                if wA != None:  # and wB
                    if TOALPHA:
                        # Do alpha by using the exiting weight for
                        try:
                            pickValB = vWeightDict[i][act_group]
                        except:
                            pickValB = 0.0  # The weights not there? assume zero
                    # Mix2 2 opaque weights
                    vWeightDict[i][act_group] = pickValB * wA + pickValA * wB

    else:  # MODE==1 VCol
        for f in me.faces:
            if f.sel:
                f_v = f.v
                for i in xrange(len(f_v)):
                    v = f_v[i]
                    wA, wB = grad_weights[v.index]

                    c = f.col[i]

                    if TOALPHA:
                        pickValB = c.r, c.g, c.b

                    c.r = int(pickValB[0] * wA + pickValA[0] * wB)
                    c.g = int(pickValB[1] * wA + pickValA[1] * wB)
                    c.b = int(pickValB[2] * wA + pickValA[2] * wB)

    # Copy weights back to the mesh.
    BPyMesh.dict2MeshWeight(me, groupNames, vWeightDict)
    Window.DrawProgressBar(1.0, '')
Пример #23
0
def create_mesh(scn, new_objects, has_ngons, CREATE_FGONS, CREATE_EDGES, verts_loc, verts_tex, faces, unique_materials, unique_material_images, unique_smooth_groups, dataname):
	'''
	Takes all the data gathered and generates a mesh, adding the new object to new_objects
	deals with fgons, sharp edges and assigning materials
	'''
	if not has_ngons:
		CREATE_FGONS= False
	
	if unique_smooth_groups:
		sharp_edges= {}
		smooth_group_users= dict([ (context_smooth_group, {}) for context_smooth_group in unique_smooth_groups.iterkeys() ])
		context_smooth_group_old= -1
	
	# Split fgons into tri's
	fgon_edges= {} # Used for storing fgon keys
	if CREATE_EDGES:
		edges= []
	
	context_object= None
	
	# reverse loop through face indicies
	for f_idx in xrange(len(faces)-1, -1, -1):
		
		face_vert_loc_indicies,\
		face_vert_tex_indicies,\
		context_material,\
		context_smooth_group,\
		context_object= faces[f_idx]
		
		len_face_vert_loc_indicies = len(face_vert_loc_indicies)
		
		if len_face_vert_loc_indicies==1:
			faces.pop(f_idx)# cant add single vert faces
		
		elif not face_vert_tex_indicies or len_face_vert_loc_indicies == 2: # faces that have no texture coords are lines
			if CREATE_EDGES:
				# generators are better in python 2.4+ but can't be used in 2.3
				# edges.extend( (face_vert_loc_indicies[i], face_vert_loc_indicies[i+1]) for i in xrange(len_face_vert_loc_indicies-1) )
				edges.extend( [(face_vert_loc_indicies[i], face_vert_loc_indicies[i+1]) for i in xrange(len_face_vert_loc_indicies-1)] )

			faces.pop(f_idx)
		else:
			
			# Smooth Group
			if unique_smooth_groups and context_smooth_group:
				# Is a part of of a smooth group and is a face
				if context_smooth_group_old is not context_smooth_group:
					edge_dict= smooth_group_users[context_smooth_group]
					context_smooth_group_old= context_smooth_group
				
				for i in xrange(len_face_vert_loc_indicies):
					i1= face_vert_loc_indicies[i]
					i2= face_vert_loc_indicies[i-1]
					if i1>i2: i1,i2= i2,i1
					
					try:
						edge_dict[i1,i2]+= 1
					except KeyError:
						edge_dict[i1,i2]=  1
			
			# FGons into triangles
			if has_ngons and len_face_vert_loc_indicies > 4:
				
				ngon_face_indices= BPyMesh.ngon(verts_loc, face_vert_loc_indicies)
				faces.extend(\
				[(\
				[face_vert_loc_indicies[ngon[0]], face_vert_loc_indicies[ngon[1]], face_vert_loc_indicies[ngon[2]] ],\
				[face_vert_tex_indicies[ngon[0]], face_vert_tex_indicies[ngon[1]], face_vert_tex_indicies[ngon[2]] ],\
				context_material,\
				context_smooth_group,\
				context_object)\
				for ngon in ngon_face_indices]\
				)
				
				# edges to make fgons
				if CREATE_FGONS:
					edge_users= {}
					for ngon in ngon_face_indices:
						for i in (0,1,2):
							i1= face_vert_loc_indicies[ngon[i  ]]
							i2= face_vert_loc_indicies[ngon[i-1]]
							if i1>i2: i1,i2= i2,i1
							
							try:
								edge_users[i1,i2]+=1
							except KeyError:
								edge_users[i1,i2]= 1
					
					for key, users in edge_users.iteritems():
						if users>1:
							fgon_edges[key]= None
				
				# remove all after 3, means we dont have to pop this one.
				faces.pop(f_idx)
		
		
	# Build sharp edges
	if unique_smooth_groups:
		for edge_dict in smooth_group_users.itervalues():
			for key, users in edge_dict.iteritems():
				if users==1: # This edge is on the boundry of a group
					sharp_edges[key]= None
	
	
	# mat the material names to an index
	material_mapping= dict([(name, i) for i, name in enumerate(unique_materials.keys())])
	
	materials= [None] * len(unique_materials)
	
	for name, index in material_mapping.iteritems():
		materials[index]= unique_materials[name]
	
	me= bpy.data.meshes.new(dataname)
	
	me.materials= materials[0:16] # make sure the list isnt too big.
	#me.verts.extend([(0,0,0)]) # dummy vert
	me.verts.extend(verts_loc)
	
	face_mapping= me.faces.extend([f[0] for f in faces], indexList=True)
	
	if verts_tex and me.faces:
		me.faceUV= 1
		# TEXMODE= Mesh.FaceModes['TEX']
	
	context_material_old= -1 # avoid a dict lookup
	mat= 0 # rare case it may be un-initialized.
	me_faces= me.faces
	ALPHA= Mesh.FaceTranspModes.ALPHA
	
	for i, face in enumerate(faces):
		if len(face[0]) < 2:
			pass #raise "bad face"
		elif len(face[0])==2:
			if CREATE_EDGES:
				edges.append(face[0])
		else:
			face_index_map= face_mapping[i]
			if face_index_map!=None: # None means the face wasnt added
				blender_face= me_faces[face_index_map]
				
				face_vert_loc_indicies,\
				face_vert_tex_indicies,\
				context_material,\
				context_smooth_group,\
				context_object= face
				
				
				
				if context_smooth_group:
					blender_face.smooth= True
				
				if context_material:
					if context_material_old is not context_material:
						mat= material_mapping[context_material]
						if mat>15:
							mat= 15
						context_material_old= context_material
					
					blender_face.mat= mat
				
				
				if verts_tex:	
					if context_material:
						image, has_data= unique_material_images[context_material]
						if image: # Can be none if the material dosnt have an image.
							blender_face.image= image
							if has_data and image.depth == 32:
								blender_face.transp |= ALPHA
					
					# BUG - Evil eekadoodle problem where faces that have vert index 0 location at 3 or 4 are shuffled.
					if len(face_vert_loc_indicies)==4:
						if face_vert_loc_indicies[2]==0 or face_vert_loc_indicies[3]==0:
							face_vert_tex_indicies= face_vert_tex_indicies[2], face_vert_tex_indicies[3], face_vert_tex_indicies[0], face_vert_tex_indicies[1]
					else: # length of 3
						if face_vert_loc_indicies[2]==0:
							face_vert_tex_indicies= face_vert_tex_indicies[1], face_vert_tex_indicies[2], face_vert_tex_indicies[0]
					# END EEEKADOODLE FIX
					
					# assign material, uv's and image
					for ii, uv in enumerate(blender_face.uv):
						uv.x, uv.y=  verts_tex[face_vert_tex_indicies[ii]]
	del me_faces
	del ALPHA
	
	# Add edge faces.
	me_edges= me.edges
	if CREATE_FGONS and fgon_edges:
		FGON= Mesh.EdgeFlags.FGON
		for ed in me.findEdges( fgon_edges.keys() ):
			if ed!=None:
				me_edges[ed].flag |= FGON
		del FGON
	
	if unique_smooth_groups and sharp_edges:
		SHARP= Mesh.EdgeFlags.SHARP
		for ed in me.findEdges( sharp_edges.keys() ):
			if ed!=None:
				me_edges[ed].flag |= SHARP
		del SHARP
	
	if CREATE_EDGES:
		me_edges.extend( edges )
	
	del me_edges
	
	me.calcNormals()
	
	ob= scn.objects.new(me)
	new_objects.append(ob)
Пример #24
0
def file_callback(filename):

    if not filename.lower().endswith('.submesh'):
        filename += '.submesh'

    scn = bpy.data.scenes.active
    ob = scn.objects.active
    if not ob:
        Blender.Draw.PupMenu('Error%t|Select 1 active object')
        return

    file = open(filename, 'wb')

    EXPORT_APPLY_MODIFIERS = Draw.Create(1)
    EXPORT_NORMALS = Draw.Create(1)
    EXPORT_UV = Draw.Create(1)
    EXPORT_COLORS = Draw.Create(1)
    #EXPORT_EDGES = Draw.Create(0)

    pup_block = [\
    ('Apply Modifiers', EXPORT_APPLY_MODIFIERS, 'Use transformed mesh data.'),\
    ('Normals', EXPORT_NORMALS, 'Export vertex normal data.'),\
    ('UVs', EXPORT_UV, 'Export texface UV coords.'),\
    ('Colors', EXPORT_COLORS, 'Export vertex Colors.'),\
	#('Edges', EXPORT_EDGES, 'Edges not connected to faces.'),\
    ]

    if not Draw.PupBlock('Export...', pup_block):
        return

    is_editmode = Blender.Window.EditMode()
    if is_editmode:
        Blender.Window.EditMode(0, '', 0)

    Window.WaitCursor(1)

    EXPORT_APPLY_MODIFIERS = EXPORT_APPLY_MODIFIERS.val
    EXPORT_NORMALS = EXPORT_NORMALS.val
    EXPORT_UV = EXPORT_UV.val
    EXPORT_COLORS = EXPORT_COLORS.val
    #EXPORT_EDGES = EXPORT_EDGES.val

    mesh = BPyMesh.getMeshFromObject(ob, None, EXPORT_APPLY_MODIFIERS, False,
                                     scn)

    if not mesh:
        Blender.Draw.PupMenu(
            'Error%t|Could not get mesh data from active object')
        return

    mesh.transform(ob.matrixWorld)

    faceUV = mesh.faceUV
    vertexUV = mesh.vertexUV
    vertexColors = mesh.vertexColors

    if (not faceUV) and (not vertexUV): EXPORT_UV = False
    if not vertexColors: EXPORT_COLORS = False

    if not EXPORT_UV: faceUV = vertexUV = False
    if not EXPORT_COLORS: vertexColors = False

    # incase
    color = uvcoord = uvcoord_key = normal = normal_key = None

    verts = []  # list of dictionaries
    # vdict = {} # (index, normal, uv) -> new index
    vdict = [{} for i in xrange(len(mesh.verts))]
    vert_count = 0
    for i, f in enumerate(mesh.faces):
        smooth = f.smooth
        if not smooth:
            normal = tuple(f.no)
            normal_key = rvec3d(normal)

        if faceUV: uv = f.uv
        if vertexColors: col = f.col
        for j, v in enumerate(f):
            if smooth:
                normal = tuple(v.no)
                normal_key = rvec3d(normal)

            if faceUV:
                uvcoord = uv[j][0], 1.0 - uv[j][1]
                uvcoord_key = rvec2d(uvcoord)
            elif vertexUV:
                uvcoord = v.uvco[0], 1.0 - v.uvco[1]
                uvcoord_key = rvec2d(uvcoord)

            if vertexColors: color = col[j].r, col[j].g, col[j].b

            key = normal_key, uvcoord_key, color

            vdict_local = vdict[v.index]

            if (not vdict_local) or (not vdict_local.has_key(key)):
                vdict_local[key] = vert_count
                verts.append((tuple(v.co), normal, uvcoord, color))
                vert_count += 1

    file.write('SUBMESHTEXT0001\n')
    file.write(
        '#Created by Blender3D %s - www.blender.org, source file: %s\n' %
        (Blender.Get('version'),
         Blender.Get('filename').split('/')[-1].split('\\')[-1]))

    #file.write('element vertex %d\n' % len(verts))
    file.write('vertex format: position:3,texture0:2,normal:3\n')
    file.write('vertex count: %d\n' % len(verts))

    for i, v in enumerate(verts):
        file.write('[%.6f,%.6f,%.6f]' % v[0])  # co
        #if EXPORT_UV:
        file.write(' [%.6f,%.6f]' % v[2])  # uv
        #if EXPORT_NORMALS:
        file.write(' [%.6f,%.6f,%.6f]' % v[1])  # no

        #if EXPORT_COLORS:
        #	file.write('%u %u %u' % v[3]) # col
        file.write('\n')

    triangles = []
    for (i, f) in enumerate(mesh.faces):
        #file.write('%d ' % len(f))
        smooth = f.smooth
        if not smooth: no = rvec3d(f.no)

        if faceUV: uv = f.uv
        if vertexColors: col = f.col

        if (len(f) == 3):
            triangle = []
            for j, v in enumerate(f):
                if f.smooth: normal = rvec3d(v.no)
                else: normal = no
                if faceUV: uvcoord = rvec2d((uv[j][0], 1.0 - uv[j][1]))
                elif vertexUV: uvcoord = rvec2d((v.uvco[0], 1.0 - v.uvco[1]))
                if vertexColors: color = col[j].r, col[j].g, col[j].b

                triangle += [vdict[v.index][normal, uvcoord, color]]
            triangles += [triangle]
            #file.write('%d ' % vdict[v.index][normal, uvcoord, color])
            #file.write('\n')
        else:
            x = []
            for j, v in enumerate(f):
                if f.smooth: normal = rvec3d(v.no)
                else: normal = no
                if faceUV: uvcoord = rvec2d((uv[j][0], 1.0 - uv[j][1]))
                elif vertexUV: uvcoord = rvec2d((v.uvco[0], 1.0 - v.uvco[1]))
                if vertexColors: color = col[j].r, col[j].g, col[j].b

                #file.write('%d ' % vdict[v.index][normal, uvcoord, color])
                x += [vdict[v.index][normal, uvcoord, color]]
            triangles += [[x[1], x[2], x[0]]]
            triangles += [[x[2], x[3], x[0]]]
            #file.write('[%d,%d,%d]\n'%())
            #file.write('[%d,%d,%d]\n'%(x[1],x[2],x[3]))

    file.write('triangle count: %d\n' % len(triangles))
    for (i, f) in enumerate(triangles):
        file.write('[%d,' % f[0])
        file.write('%d,' % f[1])
        file.write('%d]\n' % f[2])

    file.close()

    if is_editmode:
        Blender.Window.EditMode(1, '', 0)
Пример #25
0
def export_map(filepath):
	
	pup_block = [\
	('Scale:', PREF_SCALE, 1, 1000, 'Scale the blender scene by this value.'),\
	('Face Width:', PREF_FACE_THICK, 0.01, 10, 'Thickness of faces exported as brushes.'),\
	('Grid Snap', PREF_GRID_SNAP, 'snaps floating point values to whole numbers.'),\
	'Null Texture',\
	('', PREF_NULL_TEX, 1, 128, 'Export textureless faces with this texture'),\
	'Unseen Texture',\
	('', PREF_INVIS_TEX, 1, 128, 'Export invisible faces with this texture'),\
	]
	
	if not Draw.PupBlock('map export', pup_block):
		return
	
	Window.WaitCursor(1)
	time= sys.time()
	print 'Map Exporter 0.0'
	file= open(filepath, 'w')
	
	
	obs_mesh= []
	obs_lamp= []
	obs_surf= []
	obs_empty= []
	
	SCALE_MAT= Mathutils.Matrix()
	SCALE_MAT[0][0]= SCALE_MAT[1][1]= SCALE_MAT[2][2]= PREF_SCALE.val
	
	dummy_mesh= Mesh.New()
	
	TOTBRUSH= TOTLAMP= TOTNODE= 0
	
	for ob in Object.GetSelected():
		type= ob.type
		if type == 'Mesh':		obs_mesh.append(ob)
		elif type == 'Surf':	obs_surf.append(ob)
		elif type == 'Lamp':	obs_lamp.append(ob)
		elif type == 'Empty':	obs_empty.append(ob)
	
	if obs_mesh or obs_surf:
		# brushes and surf's must be under worldspan
		file.write('\n// entity 0\n')
		file.write('{\n')
		file.write('"classname" "worldspawn"\n')
	
	
	print '\twriting cubes from meshes'
	for ob in obs_mesh:
		dummy_mesh.getFromObject(ob.name)
		
		#print len(mesh_split2connected(dummy_mesh))
		
		# Is the object 1 cube? - object-is-a-brush
		dummy_mesh.transform(ob.matrixWorld*SCALE_MAT) # 1 to tx the normals also
		
		if PREF_GRID_SNAP.val:
			for v in dummy_mesh.verts:
				co= v.co
				co.x= round(co.x)
				co.y= round(co.y)
				co.z= round(co.z)
		
		# High quality normals
		BPyMesh.meshCalcNormals(dummy_mesh)
		
		# Split mesh into connected regions
		for face_group in BPyMesh.mesh2linkedFaces(dummy_mesh):
			if is_cube_facegroup(face_group):
				write_cube2brush(file, face_group)
				TOTBRUSH+=1
			elif is_tricyl_facegroup(face_group):
				write_cube2brush(file, face_group)
				TOTBRUSH+=1
			else:
				for f in face_group:
					write_face2brush(file, f)
					TOTBRUSH+=1
			
			#print 'warning, not exporting "%s" it is not a cube' % ob.name
			
	
	dummy_mesh.verts= None
	

	valid_dims= 3,5,7,9,11,13,15
	for ob in obs_surf:
		'''
		Surf, patches
		'''
		surf_name= ob.getData(name_only=1)
		data= Curve.Get(surf_name)
		mat = ob.matrixWorld*SCALE_MAT
		
		# This is what a valid patch looks like
		
		"""
// brush 0
{
patchDef2
{
NULL
( 3 3 0 0 0 )
(
( ( -64 -64 0 0 0 ) ( -64 0 0 0 -2 ) ( -64 64 0 0 -4 ) )
( ( 0 -64 0 2 0 ) ( 0 0 0 2 -2 ) ( 0 64 0 2 -4 ) )
( ( 64 -64 0 4 0 ) ( 64 0 0 4 -2 ) ( 80 88 0 4 -4 ) )
)
}
}
		"""
		for i, nurb in enumerate(data):
			u= nurb.pointsU
			v= nurb.pointsV
			if u in valid_dims and v in valid_dims:
				
				file.write('// brush %d surf_name\n' % i)
				file.write('{\n')
				file.write('patchDef2\n')
				file.write('{\n')
				file.write('NULL\n')
				file.write('( %d %d 0 0 0 )\n' % (u, v) )
				file.write('(\n')
				
				u_iter = 0
				for p in nurb:
					
					if u_iter == 0:
						file.write('(')
					
					u_iter += 1
					
					# add nmapping 0 0 ?
					if PREF_GRID_SNAP.val:
						file.write(' ( %d %d %d 0 0 )' % round_vec(Mathutils.Vector(p[0:3]) * mat))
					else:
						file.write(' ( %.6f %.6f %.6f 0 0 )' % tuple(Mathutils.Vector(p[0:3]) * mat))
					
					# Move to next line
					if u_iter == u:
						file.write(' )\n')
						u_iter = 0
				
				file.write(')\n')
				file.write('}\n')
				file.write('}\n')
				
				
				# Debugging
				# for p in nurb: print 'patch', p
				
			else:
				print "NOT EXPORTING PATCH", surf_name, u,v, 'Unsupported'
	
	
	if obs_mesh or obs_surf:
		file.write('}\n') # end worldspan
	
	
	print '\twriting lamps'
	for ob in obs_lamp:
		print '\t\t%s' % ob.name
		lamp= ob.data
		file.write('{\n')
		file.write('"classname" "light"\n')
		file.write('"light" "%.6f"\n' % (lamp.dist* PREF_SCALE.val))
		if PREF_GRID_SNAP.val:
			file.write('"origin" "%d %d %d"\n' % tuple([round(axis*PREF_SCALE.val) for axis in ob.getLocation('worldspace')]) )
		else:
			file.write('"origin" "%.6f %.6f %.6f"\n' % tuple([axis*PREF_SCALE.val for axis in ob.getLocation('worldspace')]) )
		file.write('"_color" "%.6f %.6f %.6f"\n' % tuple(lamp.col))
		file.write('"style" "0"\n')
		file.write('}\n')
		TOTLAMP+=1
	
	
	print '\twriting empty objects as nodes'
	for ob in obs_empty:
		if write_node_map(file, ob):
			print '\t\t%s' % ob.name
			TOTNODE+=1
		else:
			print '\t\tignoring %s' % ob.name
	
	Window.WaitCursor(0)
	
	print 'Exported Map in %.4fsec' % (sys.time()-time)
	print 'Brushes: %d  Nodes: %d  Lamps %d\n' % (TOTBRUSH, TOTNODE, TOTLAMP)
Пример #26
0
def write(directory, filename, objects):
    def v_n_uv_key(v, n, uv):
        return round(v.x,
                     6), round(v.y, 6), round(v.z, 6), round(n.x, 6), round(
                         n.y, 6), round(n.z, 6), round(uv[0],
                                                       6), round(uv[1], 6)

    def v_n_key(v, n):
        return round(v.x,
                     6), round(v.y,
                               6), round(v.z,
                                         6), round(n.x,
                                                   6), round(n.y,
                                                             6), round(n.z, 6)

    def adjust_key(key, obCenter):
        keyList = list(key)
        keyList[0] -= obCenter[0]
        keyList[1] -= obCenter[1]
        keyList[2] -= obCenter[2]
        return tuple(keyList)

    temp_mesh_name = '~tmp-mesh'

    scn = Scene.GetCurrent()

    # Get the container mesh. - used for applying modifiers and non mesh objects.
    containerMesh = meshName = tempMesh = None
    for meshName in Blender.NMesh.GetNames():
        if meshName.startswith(temp_mesh_name):
            tempMesh = Mesh.Get(meshName)
            if not tempMesh.users:
                containerMesh = tempMesh
    if not containerMesh:
        containerMesh = Mesh.New(temp_mesh_name)

    del meshName
    del tempMesh

    try:
        armature = Blender.Object.Get("Armature")
        write_armature(directory + filename, armature)
    except:
        armature = None

# Get all meshs
    for ob_main in objects:
        for ob, ob_mat in BPyObject.getDerivedObjects(ob_main):
            me = BPyMesh.getMeshFromObject(ob, containerMesh, True, False, scn)
            if not me:
                continue

            # Initialize globalVertices and globalMaterials dictionaries
            vertIndex = 0
            matIndex = 0
            globalVertices = {}
            globalMaterials = {}
            # Dictionary of materials: (material.name, image.name):matname_imagename
            # matname_imagename has fixed names.
            materialDict = {}

            # We have a valid mesh
            if me.faces:
                # Add a dummy object to it.
                has_quads = False
                for f in me.faces:
                    if len(f) == 4:
                        has_quads = True
                        break

                if has_quads:
                    oldmode = Mesh.Mode()
                    Mesh.Mode(Mesh.SelectModes['FACE'])

                    me.sel = True
                    tempob = scn.objects.new(me)
                    me.quadToTriangle(0)  # more=0 shortest length
                    oldmode = Mesh.Mode(oldmode)
                    scn.objects.unlink(tempob)

                    Mesh.Mode(oldmode)
            else:
                continue

            # High Quality Normals
            BPyMesh.meshCalcNormals(me)

            # Make our own list so it can be sorted to reduce context switching
            faces = [f for f in me.faces]
            faceuv = me.faceUV
            edges = me.edges

            materials = me.materials
            materialNames = []
            materialItems = materials[:]
            if materials:
                for mat in materials:
                    if mat:
                        materialNames.append(mat.name)
                    else:
                        materialNames.append(None)

            # Possible there null materials, will mess up indicies
            # but at least it will export, wait until Blender gets fixed.
            materialNames.extend((16 - len(materialNames)) * [None])
            materialItems.extend((16 - len(materialItems)) * [None])

            # Sort by Material, then images
            # so we dont over context switch in the obj file.
            if faceuv:
                try:
                    faces.sort(key=lambda a: (a.mat, a.image, a.smooth))
                except:
                    faces.sort(lambda a, b: cmp((a.mat, a.image, a.smooth),
                                                (b.mat, b.image, b.smooth)))
            elif len(materials) > 1:
                try:
                    faces.sort(key=lambda a: (a.mat, a.smooth))
                except:
                    faces.sort(lambda a, b: cmp((a.mat, a.smooth),
                                                (b.mat, b.smooth)))
            else:  # no materials
                try:
                    faces.sort(key=lambda a: a.smooth)
                except:
                    faces.sort(lambda a, b: cmp(a.smooth, b.smooth))

            # Set the default mat to no material and no image.
            contextMat = (
                0, 0
            )  # Can never be this, so we will label a new material the first chance we get.
            contextSmooth = None  # Will either be true or false,  set bad to force initialization switch.

            name1 = ob.name
            name2 = ob.getData(1)
            obnamestring = fixName(name1)
            file = open(directory + obnamestring + ".drkMesh", "w")

            # Fill globalVertices dictionary by creating (vert, normal, uv) tuple for all vertices of all faces
            vertString = ""
            obCenter = ob.getLocation()
            if faceuv:
                vOutputFormat = 'v %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f\n'
            else:
                vOutputFormat = 'v %.6f %.6f %.6f %.6f %.6f %.6f\n'
                f_image = None

            #Loop through all faces
            submeshCount = 0
            faceCount = 0
            faceCounts = []
            for face in faces:
                if faceuv:
                    faceUVs = list(face.uv)
                faceUVindex = 0
                faceIndices = []
                for v in face:
                    if face.smooth:
                        vno = v.no
                    else:
                        vno = face.no
                    if faceuv:
                        key = v_n_uv_key(v.co, v.no, faceUVs[faceUVindex])
                        faceUVindex += 1
                    else:
                        key = v_n_key(v.co, v.no)
                    if not globalVertices.has_key(key):
                        globalVertices[key] = vertIndex
                        vertString += vOutputFormat % key
                        faceIndices.append(vertIndex)
                        vertIndex += 1
                    else:
                        faceIndices.append(globalVertices[key])

                # Make material,texture key
                f_mat = min(face.mat, len(materialNames) - 1)
                if faceuv:
                    f_image = face.image
                if faceuv and f_image:
                    matKey = materialNames[f_mat], f_image.name
                else:
                    matKey = materialNames[f_mat], None

            # Check for context switch
                if matKey != contextMat:
                    submeshCount += 1
                    if matKey[0] == None and matKey[1] == None:
                        # Write a null material, since we know the context has changed.
                        faceString += 'use (null)\n'  # mat, image

                    else:
                        mat_data = materialDict.get(matKey)
                        if not mat_data:
                            mat_data = materialDict[matKey] = fixName(
                                matKey[0]), materialItems[f_mat], f_image

                        vertString += 'use %d\n' % matIndex
                        globalMaterials[mat_data[0]] = matIndex
                        matIndex += 1
                    if faceCount != 0:
                        faceCounts.append(faceCount)
                        faceCount = 0

                contextMat = matKey
                vertString += 'face %d %d %d\n' % tuple(faceIndices)
                faceCount += 1
            faceCounts.append(faceCount)
            file.write('count %d\n' % vertIndex)
            if faceuv:
                file.write('uvs\n')
            file.write('submeshes %d\n' % submeshCount)
            for faceCount in faceCounts:
                file.write('faces %d\n' % faceCount)
            file.write(vertString)

            me.verts = None
            write_mtl(file, materialDict, globalMaterials)
            file.close()
Пример #27
0
    def export(self, scene, world, alltextures,\
      EXPORT_APPLY_MODIFIERS = False,\
      EXPORT_TRI=    False,\
     ):

        print "Info: starting X3D export to " + self.filename + "..."
        self.writeHeader()
        # self.writeScript()
        self.writeNavigationInfo(scene)
        self.writeBackground(world, alltextures)
        self.writeFog(world)
        self.proto = 0

        # COPIED FROM OBJ EXPORTER
        if EXPORT_APPLY_MODIFIERS:
            temp_mesh_name = '~tmp-mesh'

            # Get the container mesh. - used for applying modifiers and non mesh objects.
            containerMesh = meshName = tempMesh = None
            for meshName in Blender.NMesh.GetNames():
                if meshName.startswith(temp_mesh_name):
                    tempMesh = Mesh.Get(meshName)
                    if not tempMesh.users:
                        containerMesh = tempMesh
            if not containerMesh:
                containerMesh = Mesh.New(temp_mesh_name)
        # --------------------------

        for ob_main in scene.objects.context:
            for ob, ob_mat in BPyObject.getDerivedObjects(ob_main):
                objType = ob.type
                objName = ob.name
                self.matonly = 0
                if objType == "Camera":
                    self.writeViewpoint(ob, ob_mat, scene)
                elif objType in ("Mesh", "Curve", "Surf", "Text"):
                    if EXPORT_APPLY_MODIFIERS or objType != 'Mesh':
                        me = BPyMesh.getMeshFromObject(ob, containerMesh,
                                                       EXPORT_APPLY_MODIFIERS,
                                                       False, scene)
                    else:
                        me = ob.getData(mesh=1)

                    self.writeIndexedFaceSet(ob,
                                             me,
                                             ob_mat,
                                             world,
                                             EXPORT_TRI=EXPORT_TRI)
                elif objType == "Lamp":
                    data = ob.data
                    datatype = data.type
                    if datatype == Lamp.Types.Lamp:
                        self.writePointLight(ob, ob_mat, data, world)
                    elif datatype == Lamp.Types.Spot:
                        self.writeSpotLight(ob, ob_mat, data, world)
                    elif datatype == Lamp.Types.Sun:
                        self.writeDirectionalLight(ob, ob_mat, data, world)
                    else:
                        self.writeDirectionalLight(ob, ob_mat, data, world)
                # do you think x3d could document what to do with dummy objects?
                #elif objType == "Empty" and objName != "Empty":
                #	self.writeNode(ob, ob_mat)
                else:
                    #print "Info: Ignoring [%s], object type [%s] not handle yet" % (object.name,object.getType)
                    pass

        self.file.write("\n</Scene>\n</X3D>")

        if EXPORT_APPLY_MODIFIERS:
            if containerMesh:
                containerMesh.verts = None

        self.cleanup()
Пример #28
0
def setSinglePath(filename):
        base = os.path.dirname(filename)
	print('Base path: ' + base)
	print('Relative path: ' + relpath('.', base))
	base = relpath('.', base)

	Blender.Window.EditMode(0, '', 0)

	print('Models count: %d' % len(bpy.data.scenes))
	savescene = bpy.data.scenes.active	# save current active scene
	Window.DrawProgressBar( 0.0, "Start exporting..." )
        progress = 0.0
        addProgress = 1.0 / len(bpy.data.scenes)

	USE_GZIP = True

	for scene in bpy.data.scenes:
		progress += addProgress
                Window.DrawProgressBar( progress, 'Exporting model "' + scene.name + '" %.0f %%...' % ( progress * 100.0 ) )

		print('Model "' + scene.name + '":')
		scene.makeCurrent()

		try:
			import gzip
			file_model = gzip.open(base + "\\" + scene.name +".bdsm2.gz", "wb")
		except:
			print "Failed to compression modules, exporting uncompressed"
			USE_GZIP = False

			file_model = open(base + "\\" + scene.name +".bdsm2", "wb")

		file_info = open(base + "\\" + scene.name +".bdsi2", "w")

		file_model.write(MODEL_HEADER)
		file_info.write('Model name: "' + scene.name + '"\n')

		ANIMATION = False

# Pass 1 - validate scene for animation required and collect meshes

		render = scene.render
		start =	render.sFrame
		end =	render.eFrame
		if end < start: start, end = end, start
		saveframe = Blender.Get('curframe')	# save current active frame
		frame = start
		mesh_list = []
		mesh_by_frame = [[] for i in xrange(end-start+1)]
		omeshes = []
		link_omeshes_mesh = {}
		link_mesh_frame = {}

		while frame <= end:
			Blender.Set('curframe', frame)

			objects = [object for object in scene.objects if (object.type == 'Mesh') and (not object.restrictRender) and (1 in object.layers)]

			for object in objects:
				mesh = BPyMesh.getMeshFromObject(object, None, EXPORT_APPLY_MODIFIERS, False, scene)
				omesh = object.getData(True)
				if (mesh) and (len(mesh.faces)):
					try:
						ipo = object.ipo
					except:
						ipo = None
					if ipo:
						ANIMATION = True

					if omesh in omeshes:
#						file_info.write('+reusing mesh "' + omesh + '" for object "' + object.name + '\n')
						mesh_by_frame[frame-start].append((link_omeshes_mesh[omesh], object))
					else:
#						file_info.write('+mesh "' + omesh + '" for object "' + object.name + '"\n')
						omeshes.append(omesh)
						link_omeshes_mesh[omesh] = mesh
						mesh_list.append(mesh)
						mesh_by_frame[frame-start].append((mesh, object))
						link_mesh_frame[mesh] = frame

					materials = mesh.materials
					if materials:
						for material in materials:
							try:
								ipo = material.ipo
							except:
								ipo = None
							if ipo:
								ANIMATION = True

				#mesh.unlink()

			frame+=1
			
#		file_info.write('Meshes used: %d\n' % len(omeshes))
#		for omesh in omeshes:
#			file_info.write(' mesh ' + omesh + '\n')

		mesh_max = len(mesh_list)

		file_info.write('   meshes: %d\n' % mesh_max )
		file_model.write(struct.pack("<I", mesh_max))

		vert_data = ['' for i in xrange(len(mesh_list))];
		index_data = ['' for i in xrange(len(mesh_list))];

		for mi, mesh in enumerate(mesh_list):
			frame = link_mesh_frame[mesh]
			file_info.write('    [%d] mesh "'%(mi,) + mesh.name + '"  (frame = %d)\n' % frame)
			Blender.Set('curframe', frame)

# OpenGL 3.x  GL_QUADS is deprecated
#			MESH_QUADS = True
			MESH_QUADS = False
# Проверим состоит ли меш только из квадов
#			for face in mesh.faces:
#				if len(face) == 3:
#					MESH_QUADS = False
#					break

			if not MESH_QUADS:
				file_info.write('    convert to triangles\n')
				tempob = scene.objects.new(mesh)
				oldmode = Mesh.Mode()
				Mesh.Mode(Mesh.SelectModes['FACE'])
				mesh.sel = True
				mesh.quadToTriangle(0)
#				mesh.recalcNormals(0)
				scene.objects.unlink(tempob)
				Mesh.Mode(oldmode)

# TBD!!! Нужно еще вставить контроль того что не осталось смешанных face

			EXPORT_UV = True
			EXPORT_COLORS = True

# Генерируем списки вершин и списки индексов
			faceUV = mesh.faceUV
			vertexUV = mesh.vertexUV
			vertexColors = mesh.vertexColors
			if (not faceUV) and (not vertexUV):	EXPORT_UV = False
			if not vertexColors:			EXPORT_COLORS = False
			if not EXPORT_UV:			faceUV = vertexUV = False
			if not EXPORT_COLORS:			vertexColors = False

			color = uvcoord = uvcoord_key = normal = normal_key = None
			verts = [] # list of dictionaries
			# vdict = {} # (index, normal, uv) -> new index
			vdict = [{} for i in xrange(len(mesh.verts))]
			vert_count = 0
			for i, f in enumerate(mesh.faces):
				smooth = f.smooth
				if not smooth:
					normal = tuple(f.no)
					normal_key = rvec3d(normal)
				
				if faceUV:		uv = f.uv
				if vertexColors:	col = f.col
				for j, v in enumerate(f):
					if smooth:
						normal = tuple(v.no)
						normal_key = rvec3d(normal)
			
					if faceUV:
						uvcoord = uv[j][0], 1.0-uv[j][1]
						uvcoord_key = rvec2d(uvcoord)
					elif vertexUV:
						uvcoord = v.uvco[0], 1.0-v.uvco[1]
						uvcoord_key = rvec2d(uvcoord)
			
					if vertexColors:	color = col[j].r, col[j].g, col[j].b
			
					key = normal_key, uvcoord_key, color
			
					vdict_local = vdict[v.index]
			
					if (not vdict_local) or (not vdict_local.has_key(key)):
						vdict_local[key] = vert_count;
						verts.append( (tuple(v.co), normal, uvcoord, color) )
						vert_count += 1

			file_info.write('       Mesh attributes: ')

			vl = len(verts)
			if MESH_QUADS:
				il = len(mesh.faces)*4
			else:
				il = len(mesh.faces)*3

			bits = 0
			if MESH_QUADS:
				bits |= 1
				file_info.write(' [quads]')
			else:
				file_info.write(' [triangles]')

			if EXPORT_UV:
				bits |= 2
				file_info.write(' [texture coordinates]')
			if EXPORT_COLORS:
				bits |= 4
				file_info.write(' [vertex color]')
			if vl < 256:
				file_info.write(' [VB]')
			elif vl < 65536:
				bits |= 8
				file_info.write(' [VS]')
			else:
				bits |= 16
				file_info.write(' [VI]')
			if il < 256:
				file_info.write(' [IB]')
			elif il < 65536:
				bits |= 32
				file_info.write(' [IS]')
			else:
				bits |= 64
				file_info.write(' [II]')

			file_info.write('\n')
			file_model.write(struct.pack("<c", chr(bits)))

			file_info.write('       Mesh verts = %d\n' % vl);
			packwrite(file_model, vl);

			file_info.write('       Mesh indexes = %d\n' % il)
			packwrite(file_model, il);
			vind=0
			for v in verts:
#				file_info.write('       [%d]:' % vind)
				vind += 1
#				file_info.write(' v:(%.3f, %.3f, %.3f)' % (v[0][0], v[0][1], v[0][2]))
				vert_data[mi] += struct.pack('<3H', FloatToHalf(v[0][0]), FloatToHalf(v[0][1]), FloatToHalf(v[0][2]))	# vertex
#				file_info.write(' n:(%.3f, %.3f, %.3f)' % (v[1][0], v[1][1], v[1][2]))
				vert_data[mi] += struct.pack('<3H', FloatToHalf(v[1][0]), FloatToHalf(v[1][1]), FloatToHalf(v[1][2]))	# normals
				if EXPORT_UV:
#					file_info.write(' v:(%.3f, %.3f)' % (v[2][0], v[2][1]))
					vert_data[mi] += struct.pack('<2H', FloatToHalf(v[2][0]), FloatToHalf(v[2][1]))
				if EXPORT_COLORS:
#					file_info.write(' c:(%d, %d, %d, %d)' % (v[3][0], v[3][1], v[3][2], 255))
					vert_data[mi] += struct.pack('<4c', chr(int(v[3][0])), chr(int(v[3][1])), chr(int(v[3][2])), chr(255) ) 
#				file_info.write('\n')

			ri = 0
#			file_info.write('    Indexes:\n')
			iind = 0
			for (i, f) in enumerate(mesh.faces):
#				file.write('%d ' % len(f))
#				file.write(struct.pack("<I", len(f)))
				smooth = f.smooth
				if not smooth: no = rvec3d(f.no)
		
				if faceUV:		uv = f.uv
				if vertexColors:	col = f.col
				for j, v in enumerate(f):
					if f.smooth:		normal = rvec3d(v.no)
					else:			normal = no
					if faceUV:		uvcoord = rvec2d((uv[j][0], 1.0-uv[j][1]))
					elif vertexUV:		uvcoord = rvec2d((v.uvco[0], 1.0-v.uvco[1]))
					if vertexColors:	color = col[j].r, col[j].g, col[j].b
			
#					file.write(struct.pack("<I", vdict[v.index][normal, uvcoord, color]))
					ri += 1;
#					index_data[mi] += struct.pack("<I", vdict[v.index][normal, uvcoord, color])
#					file_info.write('       [%d]: %d\n' % (iind, vdict[v.index][normal, uvcoord, color]))
					iind += 1
					if vl < 256:
						index_data[mi] += struct.pack("<c", chr(vdict[v.index][normal, uvcoord, color]))
					elif vl < 65536:
						index_data[mi] += struct.pack("<H", vdict[v.index][normal, uvcoord, color])
					else:
						index_data[mi] += struct.pack("<I", vdict[v.index][normal, uvcoord, color])

#			file_info.write('       Real indexes = %d\n' % ri)

		if ANIMATION:
			file_info.write('  Animation frames: %d\n' %(end-start+1))
		else:
			end = start
			file_info.write('  No animation (frames: 1)\n')

		file_model.write(ANIMATION_HEADER)
		file_model.write(struct.pack("<I", end-start+1))

		frame = start
		while frame <= end:
			Blender.Set('curframe', frame)
			file_info.write('   frame: %d\n' % frame )

			mif = len(mesh_by_frame[frame-start])
			file_info.write('     meshes in frame: %d\n' % mif)
			packmwrite(file_model, mesh_max, mif);

			for mesh,mobject in mesh_by_frame[frame-start]:
				i = mesh_list.index(mesh)
				mcolor = [255, 255, 255, 255]

				materials = mobject.getMaterials()
				if not materials: materials = mesh.materials

				if materials:
					if materials[0]:
						mcolor = [int(materials[0].R*255), int(materials[0].G*255), int(materials[0].B*255), int(255) ]

				for x in xrange(4):
					if mcolor[x]<0: mcolor[x] = 0
					if mcolor[x]>255: mcolor[x] = 255

				file_info.write('     mesh "' + mesh.name + '"[%d] for object "'%(i,) + mobject.name + '"' )
				packmwrite(file_model, mesh_max, i)
				file_info.write('  color: [%d,%d,%d,%d]\n' % tuple(mcolor) )
				file_model.write(struct.pack("<4c", chr(mcolor[0]),chr(mcolor[1]),chr(mcolor[2]),chr(mcolor[3])))

				file_info.write('       matrix:');
				file_info.write(' [[%.2f,%.2f,%.2f,%.2f]' % tuple(mobject.matrixWorld[0]))
				file_info.write(' [%.2f,%.2f,%.2f,%.2f]' % tuple(mobject.matrixWorld[1]))
				file_info.write(' [%.2f,%.2f,%.2f,%.2f]' % tuple(mobject.matrixWorld[2]))
				file_info.write(' [%.2f,%.2f,%.2f,%.2f]]\n' % tuple(mobject.matrixWorld[3]))
				file_model.write(struct.pack("<4f", mobject.matrixWorld[0][0], mobject.matrixWorld[0][1], mobject.matrixWorld[0][2], mobject.matrixWorld[0][3]))
				file_model.write(struct.pack("<4f", mobject.matrixWorld[1][0], mobject.matrixWorld[1][1], mobject.matrixWorld[1][2], mobject.matrixWorld[1][3]))
				file_model.write(struct.pack("<4f", mobject.matrixWorld[2][0], mobject.matrixWorld[2][1], mobject.matrixWorld[2][2], mobject.matrixWorld[2][3]))
				file_model.write(struct.pack("<4f", mobject.matrixWorld[3][0], mobject.matrixWorld[3][1], mobject.matrixWorld[3][2], mobject.matrixWorld[3][3]))

			frame+=1

# Save vbo
		file_model.write(VBO_HEADER)
		for i in xrange(len(mesh_list)):
			file_info.write('   Mesh %d:\n' % i)
			file_model.write(vert_data[i])
			file_model.write(index_data[i])
			file_info.write('     Vertex data size = %d bytes\n' % len(vert_data[i]))
			file_info.write('     Index data size = %d bytes\n' % len(index_data[i]))


		Blender.Set('curframe', saveframe)	# restore current active frame

		file_info.close();
		file_model.close();

	savescene.makeCurrent()			# restore active scene
	Window.DrawProgressBar( 1.0, "Finished!" )

	return
def env_from_group(ob_act, grp, PREF_UPDATE_ACT=True):
	
	me = ob_act.getData(mesh=1)
	
	if PREF_UPDATE_ACT:
		act_group = me.activeGroup
		if act_group == None:
			Draw.PupMenu('Error%t|No active vertex group.')
			return
		
		try:
			ob = Object.Get(act_group)
		except:
			Draw.PupMenu('Error%t|No object named "'+ act_group +'".')
			return
		
		group_isect = intersection_data(ob)
		
	else:
		
		# get intersection data
		# group_isect_data = [intersection_data(ob) for ob in group.objects]
		group_isect_data = []
		for ob in grp.objects:
			if ob != ob_act: # in case we're in the group.
				gid = intersection_data(ob)
				if gid[1]: # has some triangles?
					group_isect_data.append( gid )
					
					# we only need 1 for the active group
					if PREF_UPDATE_ACT:
						break
	
		# sort by name
		group_isect_data.sort()
	
	if PREF_UPDATE_ACT:
		group_names, vweight_list = BPyMesh.meshWeight2List(me)
		group_index = group_names.index(act_group)
	else:
		group_names = [gid[0] for gid in group_isect_data]
		vweight_list= [[0.0]* len(group_names) for i in xrange(len(me.verts))]
	
	
	
	ob_act_mat = ob_act.matrixWorld
	for vi, v in enumerate(me.verts):
		# Get all the groups for this vert
		co = v.co * ob_act_mat
		
		if PREF_UPDATE_ACT:
			# only update existing
			if point_in_data(co, group_isect):	w = 1.0
			else:								w = 0.0
			vweight_list[vi][group_index] = w
			
		else:
			# generate new vgroup weights.
			for group_index, group_isect in enumerate(group_isect_data):
				if point_in_data(co, group_isect):
					vweight_list[vi][group_index] = 1.0
	
	BPyMesh.list2MeshWeight(me, group_names, vweight_list)
Пример #30
0
def write(filename):
    start = Blender.sys.time()
    file = open(filename, "wb")

    scn = Blender.Scene.GetCurrent()
    objects = list(scn.objects.context)

    if not objects:
        Blender.Draw.PupMenu('Error%t|No Objects selected')
        return

    try:
        objects.sort(key=lambda a: a.name)
    except:
        objects.sort(lambda a, b: cmp(a.name, b.name))

    text = generate_text()
    desc = generate_desc()
    icon = ""  #generate_icon()

    meshes = []
    for obj in objects:
        mesh = BPyMesh.getMeshFromObject(obj, None, True, False, scn)
        if mesh:
            mesh.transform(obj.matrixWorld)
            meshes.append(mesh)

    material_names = get_used_material_names(meshes)
    tags = generate_tags(material_names)
    surfs = generate_surfs(material_names)
    chunks = [text, desc, icon, tags]

    meshdata = cStringIO.StringIO()

    layer_index = 0

    for mesh in meshes:
        layr = generate_layr(obj.name, layer_index)
        pnts = generate_pnts(mesh)
        bbox = generate_bbox(mesh)
        pols = generate_pols(mesh)
        ptag = generate_ptag(mesh, material_names)
        clip = generate_clip(mesh, material_names)

        if mesh.faceUV:
            vmad_uv = generate_vmad_uv(mesh)  # per face

        if mesh.vertexColors:
            #if meshtools.average_vcols:
            #	vmap_vc = generate_vmap_vc(mesh)  # per vert
            #else:
            vmad_vc = generate_vmad_vc(mesh)  # per face

        write_chunk(meshdata, "LAYR", layr)
        chunks.append(layr)
        write_chunk(meshdata, "PNTS", pnts)
        chunks.append(pnts)
        write_chunk(meshdata, "BBOX", bbox)
        chunks.append(bbox)
        write_chunk(meshdata, "POLS", pols)
        chunks.append(pols)
        write_chunk(meshdata, "PTAG", ptag)
        chunks.append(ptag)

        if mesh.vertexColors:
            #if meshtools.average_vcols:
            #	write_chunk(meshdata, "VMAP", vmap_vc)
            #	chunks.append(vmap_vc)
            #else:
            write_chunk(meshdata, "VMAD", vmad_vc)
            chunks.append(vmad_vc)

        if mesh.faceUV:
            write_chunk(meshdata, "VMAD", vmad_uv)
            chunks.append(vmad_uv)
            write_chunk(meshdata, "CLIP", clip)
            chunks.append(clip)

        layer_index += 1
        mesh.verts = None  # save some ram

    for surf in surfs:
        chunks.append(surf)

    write_header(file, chunks)
    write_chunk(file, "ICON", icon)
    write_chunk(file, "TEXT", text)
    write_chunk(file, "DESC", desc)
    write_chunk(file, "TAGS", tags)
    file.write(meshdata.getvalue())
    meshdata.close()
    for surf in surfs:
        write_chunk(file, "SURF", surf)
    write_chunk(file, "DATE", "August 19, 2005")

    Blender.Window.DrawProgressBar(1.0, "")  # clear progressbar
    file.close()
    print '\a\r',
    print "Successfully exported %s in %.3f seconds" % (
        filename.split('\\')[-1].split('/')[-1], Blender.sys.time() - start)
Пример #31
0
def mesh_mirror(me, PREF_MIRROR_LOCATION, PREF_XMID_SNAP, PREF_MAX_DIST,
                PREF_XZERO_THRESH, PREF_MODE, PREF_SEL_ONLY, PREF_EDGE_USERS,
                PREF_MIRROR_WEIGHTS, PREF_FLIP_NAMES, PREF_CREATE_FLIP_NAMES):
    '''
	PREF_MIRROR_LOCATION, Will we mirror locations?
	PREF_XMID_SNAP, Should we snap verts to X-0?
	PREF_MAX_DIST, Maximum distance to test snapping verts.
	PREF_XZERO_THRESH, How close verts must be to the middle before they are considered X-Zero verts.
	PREF_MODE, 0:middle, 1: Left. 2:Right.
	PREF_SEL_ONLY, only snap the selection
	PREF_EDGE_USERS, match only verts with the same number of edge users.
	PREF_MIRROR_LOCATION, 
	'''

    # Operate on all verts
    if not PREF_SEL_ONLY:
        for v in me.verts:
            v.sel = 1

    if PREF_EDGE_USERS:
        edge_users = [0] * len(me.verts)
        for ed in me.edges:
            edge_users[ed.v1.index] += 1
            edge_users[ed.v2.index] += 1

    if PREF_XMID_SNAP:  # Do we snap locations at all?
        for v in me.verts:
            if v.sel:
                if abs(v.co.x) <= PREF_XZERO_THRESH:
                    v.co.x = 0
                    v.sel = 0

        # alredy de-selected verts
        neg_vts = [v for v in me.verts if v.sel and v.co.x < 0]
        pos_vts = [v for v in me.verts if v.sel and v.co.x > 0]

    else:
        # Use a small margin verts must be outside before we mirror them.
        neg_vts = [v for v in me.verts if v.sel if v.co.x < -PREF_XZERO_THRESH]
        pos_vts = [v for v in me.verts if v.sel if v.co.x > PREF_XZERO_THRESH]

    #*Mirror Location*********************************************************#
    if PREF_MIRROR_LOCATION:
        mirror_pairs = []
        # allign the negative with the positive.
        flipvec = Mathutils.Vector()
        len_neg_vts = float(len(neg_vts))
        for i1, nv in enumerate(neg_vts):
            if nv.sel:  # we may alredy be mirrored, if so well be deselected
                nv_co = nv.co
                for i2, pv in enumerate(pos_vts):
                    if pv.sel:
                        # Enforce edge users.
                        if not PREF_EDGE_USERS or edge_users[i1] == edge_users[
                                i2]:
                            flipvec[:] = pv.co
                            flipvec.x = -flipvec.x
                            l = (nv_co - flipvec).length

                            if l == 0.0:  # Both are alredy mirrored so we dont need to think about them.
                                # De-Select so we dont use again/
                                pv.sel = nv.sel = 0

                            # Record a match.
                            elif l <= PREF_MAX_DIST:

                                # We can adjust the length by the normal, now we know the length is under the limit.
                                # DISABLED, WASNT VERY USEFULL
                                '''
								if PREF_NOR_WEIGHT>0:
									# Get the normal and flipm reuse flipvec
									flipvec[:]= pv.no
									flipvec.x= -flipvec.x
									try:
										ang= Mathutils.AngleBetweenVecs(nv.no, flipvec)/180.0
									except: # on rare occasions angle between vecs will fail.- zero length vec.
										ang= 0
									
									l=l*(1+(ang*PREF_NOR_WEIGHT))
								'''
                                # Record the pairs for sorting to see who will get joined
                                mirror_pairs.append((l, nv, pv))

                # Update every 20 loops
                if i1 % 10 == 0:
                    Window.DrawProgressBar(
                        0.8 * (i1 / len_neg_vts),
                        'Mirror verts %i of %i' % (i1, len_neg_vts))

        Window.DrawProgressBar(0.9, 'Mirror verts: Updating locations')

        # Now we have a list of the pairs we might use, lets find the best and do them first.
        # de-selecting as we go. so we can makke sure not to mess it up.
        try:
            mirror_pairs.sort(key=lambda a: a[0])
        except:
            mirror_pairs.sort(lambda a, b: cmp(a[0], b[0]))

        for dist, v1, v2 in mirror_pairs:  # dist, neg, pos
            if v1.sel and v2.sel:
                if PREF_MODE == 0:  # Middle
                    flipvec[:] = v2.co  # positive
                    flipvec.x = -flipvec.x  # negatve
                    v2.co = v1.co = (flipvec + v1.co) * 0.5  # midway
                    v2.co.x = -v2.co.x
                elif PREF_MODE == 2:  # Left
                    v2.co = v1.co
                    v2.co.x = -v2.co.x
                elif PREF_MODE == 1:  # Right
                    v1.co = v2.co
                    v1.co.x = -v1.co.x
                v1.sel = v2.sel = 0

    #*Mirror Weights**********************************************************#
    if PREF_MIRROR_WEIGHTS:

        groupNames, vWeightDict = BPyMesh.meshWeight2Dict(me)
        mirror_pairs_l2r = []  # Stor a list of matches for these verts.
        mirror_pairs_r2l = []  # Stor a list of matches for these verts.

        # allign the negative with the positive.
        flipvec = Mathutils.Vector()
        len_neg_vts = float(len(neg_vts))

        # Here we make a tuple to look through, if were middle well need to look through both.
        if PREF_MODE == 0:  # Middle
            find_set = ((neg_vts, pos_vts, mirror_pairs_l2r),
                        (pos_vts, neg_vts, mirror_pairs_r2l))
        elif PREF_MODE == 1:  # Left
            find_set = ((neg_vts, pos_vts, mirror_pairs_l2r), )
        elif PREF_MODE == 2:  # Right
            find_set = ((pos_vts, neg_vts, mirror_pairs_r2l), )

        # Do a locational lookup again :/ - This isnt that good form but if we havnt mirrored weights well need to do it anyway.
        # The Difference with this is that we dont need to have 1:1 match for each vert- just get each vert to find another mirrored vert
        # and use its weight.
        # Use  "find_set" so we can do a flipped search L>R and R>L without duplicate code.
        for vtls_A, vtls_B, pair_ls in find_set:
            for i1, vA in enumerate(vtls_A):
                best_len = 1 << 30  # BIGNUM
                best_idx = -1

                # Find the BEST match
                vA_co = vA.co
                for i2, vB in enumerate(vtls_B):
                    # Enforce edge users.
                    if not PREF_EDGE_USERS or edge_users[i1] == edge_users[i2]:
                        flipvec[:] = vB.co
                        flipvec.x = -flipvec.x
                        l = (vA_co - flipvec).length

                        if l < best_len:
                            best_len = l
                            best_idx = i2

                if best_idx != -1:
                    pair_ls.append((vtls_A[i1].index,
                                    vtls_B[best_idx].index))  # neg, pos.

        # Now we can merge the weights
        if PREF_MODE == 0:  # Middle
            newVWeightDict = [vWeightDict[i] for i in xrange(len(me.verts))
                              ]  # Have empty dicts just incase
            for pair_ls in (mirror_pairs_l2r, mirror_pairs_r2l):
                if PREF_FLIP_NAMES:
                    for i1, i2 in pair_ls:
                        flipWeight, groupNames = BPyMesh.dictWeightFlipGroups(
                            vWeightDict[i2], groupNames,
                            PREF_CREATE_FLIP_NAMES)
                        newVWeightDict[i1] = BPyMesh.dictWeightMerge(
                            [vWeightDict[i1], flipWeight])
                else:
                    for i1, i2 in pair_ls:
                        newVWeightDict[i1] = BPyMesh.dictWeightMerge(
                            [vWeightDict[i1], vWeightDict[i2]])

            vWeightDict = newVWeightDict

        elif PREF_MODE == 1:  # Left
            if PREF_FLIP_NAMES:
                for i1, i2 in mirror_pairs_l2r:
                    vWeightDict[i2], groupNames = BPyMesh.dictWeightFlipGroups(
                        vWeightDict[i1], groupNames, PREF_CREATE_FLIP_NAMES)
            else:
                for i1, i2 in mirror_pairs_l2r:
                    vWeightDict[i2] = vWeightDict[
                        i1]  # Warning Multiple instances of the same data, its ok in this case but dont modify later.

        elif PREF_MODE == 2:  # Right
            if PREF_FLIP_NAMES:
                for i1, i2 in mirror_pairs_r2l:
                    vWeightDict[i2], groupNames = BPyMesh.dictWeightFlipGroups(
                        vWeightDict[i1], groupNames, PREF_CREATE_FLIP_NAMES)
            else:
                for i1, i2 in mirror_pairs_r2l:
                    vWeightDict[i2] = vWeightDict[i1]  # Warning, ditto above

        BPyMesh.dict2MeshWeight(me, groupNames, vWeightDict)

    me.update()
def vertexFakeAO(me, PREF_BLUR_ITERATIONS, PREF_BLUR_STRENGTH,
                 PREF_CLAMP_CONCAVE, PREF_CLAMP_CONVEX, PREF_SHADOW_ONLY,
                 PREF_SEL_ONLY):
    Window.WaitCursor(1)
    Ang = Mathutils.AngleBetweenVecs

    BPyMesh.meshCalcNormals(me)

    vert_tone = [0.0] * len(me.verts)
    vert_tone_count = [0] * len(me.verts)

    min_tone = 0
    max_tone = 0

    for i, f in enumerate(me.faces):
        fc = f.cent
        fno = f.no

        for v in f.v:
            vno = v.no  # get a scaled down normal.

            dot = vno.dot(v.co) - vno.dot(fc)
            vert_tone_count[v.index] += 1
            try:
                a = Ang(vno, fno)
            except:
                continue

            # Convex
            if dot > 0:
                a = min(PREF_CLAMP_CONVEX, a)
                if not PREF_SHADOW_ONLY:
                    vert_tone[v.index] += a
            else:
                a = min(PREF_CLAMP_CONCAVE, a)
                vert_tone[v.index] -= a

    # average vert_tone_list into vert_tonef
    for i, tones in enumerate(vert_tone):
        if vert_tone_count[i]:
            vert_tone[i] = vert_tone[i] / vert_tone_count[i]

    # Below we use edges to blur along so the edges need counting, not the faces
    vert_tone_count = [0] * len(me.verts)
    for ed in me.edges:
        vert_tone_count[ed.v1.index] += 1
        vert_tone_count[ed.v2.index] += 1

    # Blur tone
    blur = PREF_BLUR_STRENGTH
    blur_inv = 1.0 - PREF_BLUR_STRENGTH

    for i in xrange(PREF_BLUR_ITERATIONS):

        # backup the original tones
        orig_vert_tone = list(vert_tone)

        for ed in me.edges:

            i1 = ed.v1.index
            i2 = ed.v2.index

            val1 = (orig_vert_tone[i2] * blur) + (orig_vert_tone[i1] *
                                                  blur_inv)
            val2 = (orig_vert_tone[i1] * blur) + (orig_vert_tone[i2] *
                                                  blur_inv)

            # Apply the ton divided by the number of faces connected
            vert_tone[i1] += val1 / max(vert_tone_count[i1], 1)
            vert_tone[i2] += val2 / max(vert_tone_count[i2], 1)

    min_tone = min(vert_tone)
    max_tone = max(vert_tone)

    #print min_tone, max_tone

    tone_range = max_tone - min_tone
    if max_tone == min_tone:
        return

    for f in me.faces:
        if not PREF_SEL_ONLY or f.sel:
            f_col = f.col
            for i, v in enumerate(f):
                col = f_col[i]
                tone = vert_tone[v.index]
                tone = (tone - min_tone) / tone_range

                col.r = int(tone * col.r)
                col.g = int(tone * col.g)
                col.b = int(tone * col.b)

    Window.WaitCursor(0)
def env_from_group(ob_act, grp, PREF_UPDATE_ACT=True):

    me = ob_act.getData(mesh=1)

    if PREF_UPDATE_ACT:
        act_group = me.activeGroup
        if act_group == None:
            Draw.PupMenu('Error%t|No active vertex group.')
            return

        try:
            ob = Object.Get(act_group)
        except:
            Draw.PupMenu('Error%t|No object named "' + act_group + '".')
            return

        group_isect = intersection_data(ob)

    else:

        # get intersection data
        # group_isect_data = [intersection_data(ob) for ob in group.objects]
        group_isect_data = []
        for ob in grp.objects:
            if ob != ob_act:  # in case we're in the group.
                gid = intersection_data(ob)
                if gid[1]:  # has some triangles?
                    group_isect_data.append(gid)

                    # we only need 1 for the active group
                    if PREF_UPDATE_ACT:
                        break

        # sort by name
        group_isect_data.sort()

    if PREF_UPDATE_ACT:
        group_names, vweight_list = BPyMesh.meshWeight2List(me)
        group_index = group_names.index(act_group)
    else:
        group_names = [gid[0] for gid in group_isect_data]
        vweight_list = [[0.0] * len(group_names)
                        for i in xrange(len(me.verts))]

    ob_act_mat = ob_act.matrixWorld
    for vi, v in enumerate(me.verts):
        # Get all the groups for this vert
        co = v.co * ob_act_mat

        if PREF_UPDATE_ACT:
            # only update existing
            if point_in_data(co, group_isect): w = 1.0
            else: w = 0.0
            vweight_list[vi][group_index] = w

        else:
            # generate new vgroup weights.
            for group_index, group_isect in enumerate(group_isect_data):
                if point_in_data(co, group_isect):
                    vweight_list[vi][group_index] = 1.0

    BPyMesh.list2MeshWeight(me, group_names, vweight_list)
Пример #34
0
def solidify(me, PREF_THICK, PREF_SKIN_SIDES=True, PREF_REM_ORIG=False, PREF_COLLAPSE_SIDES=False):
	
	# Main code function
	me_faces = me.faces
	faces_sel= [f for f in me_faces if f.sel]
	
	BPyMesh.meshCalcNormals(me)
	normals= [v.no for v in me.verts]
	vertFaces= [[] for i in xrange(len(me.verts))]
	for f in me_faces:
		no=f.no
		for v in f:
			vertFaces[v.index].append(no)
	
	# Scale the normals by the face angles from the vertex Normals.
	for i in xrange(len(me.verts)):
		length=0.0
		if vertFaces[i]:
			for fno in vertFaces[i]:
				try:
					a= Ang(fno, normals[i])
				except:
					a= 0	
				if a>=90:
					length+=1
				elif a < SMALL_NUM:
					length+= 1
				else:
					length+= angleToLength(a)
			
			length= length/len(vertFaces[i])
			#print 'LENGTH %.6f' % length
			# normals[i]= (normals[i] * length) * PREF_THICK
			normals[i] *= length * PREF_THICK
			
			
	
	len_verts = len( me.verts )
	len_faces = len( me_faces )
	
	vert_mapping= [-1] * len(me.verts)
	verts= []
	for f in faces_sel:
		for v in f:
			i= v.index
			if vert_mapping[i]==-1:
				vert_mapping[i]= len_verts + len(verts)
				verts.append(v.co + normals[i])
	
	#verts= [v.co + normals[v.index] for v in me.verts]
	
	me.verts.extend( verts )
	#faces= [tuple([ me.verts[v.index+len_verts] for v in reversed(f.v)]) for f in me_faces ]
	faces= [ tuple([vert_mapping[v.index] for v in reversed(f.v)]) for f in faces_sel ]
	me_faces.extend( faces )
	

	
	
	# Old method before multi UVs
	"""
	has_uv = me.faceUV
	has_vcol = me.vertexColors
	for i, orig_f in enumerate(faces_sel):
		new_f= me_faces[len_faces + i]
		new_f.mat = orig_f.mat
		new_f.smooth = orig_f.smooth
		orig_f.sel=False
		new_f.sel= True
		new_f = me_faces[i+len_faces]
		if has_uv:
			new_f.uv = [c for c in reversed(orig_f.uv)]
			new_f.mode = orig_f.mode
			new_f.flag = orig_f.flag
			if orig_f.image:
				new_f.image = orig_f.image
		if has_vcol:
			new_f.col = [c for c in reversed(orig_f.col)]
	"""
	copy_facedata_multilayer(me, faces_sel, [me_faces[len_faces + i] for i in xrange(len(faces_sel))])
	
	if PREF_SKIN_SIDES or PREF_COLLAPSE_SIDES:
		skin_side_faces= []
		skin_side_faces_orig= []
		# Get edges of faces that only have 1 user - so we can make walls
		edges = {}
		
		# So we can reference indicies that wrap back to the start.
		ROT_TRI_INDEX = 0,1,2,0
		ROT_QUAD_INDEX = 0,1,2,3,0
		
		for f in faces_sel:
			f_v= f.v
			for i, edgekey in enumerate(f.edge_keys):
				if edges.has_key(edgekey):
					edges[edgekey]= None
				else:
					if len(f_v) == 3:
						edges[edgekey] = f, f_v, i, ROT_TRI_INDEX[i+1]
					else:
						edges[edgekey] = f, f_v, i, ROT_QUAD_INDEX[i+1]
		del ROT_QUAD_INDEX, ROT_TRI_INDEX
		
		# So we can remove doubles with edges only.
		if PREF_COLLAPSE_SIDES:
			me.sel = False
		
		# Edges are done. extrude the single user edges.
		for edge_face_data in edges.itervalues():
			if edge_face_data: # != None
				f, f_v, i1, i2 = edge_face_data
				v1i,v2i= f_v[i1].index, f_v[i2].index
				
				if PREF_COLLAPSE_SIDES:
					# Collapse
					cv1 = me.verts[v1i]
					cv2 = me.verts[vert_mapping[v1i]]
					
					cv3 = me.verts[v2i]
					cv4 = me.verts[vert_mapping[v2i]]
					
					cv1.co = cv2.co = (cv1.co+cv2.co)/2
					cv3.co = cv4.co = (cv3.co+cv4.co)/2
					
					cv1.sel=cv2.sel=cv3.sel=cv4.sel=True
					
					
					
				else:
					# Now make a new Face
					# skin_side_faces.append( (v1i, v2i, vert_mapping[v2i], vert_mapping[v1i]) )
					skin_side_faces.append( (v2i, v1i, vert_mapping[v1i], vert_mapping[v2i]) )
					skin_side_faces_orig.append((f, len(me_faces) + len(skin_side_faces_orig), i1, i2))
		
		if PREF_COLLAPSE_SIDES:
			me.remDoubles(0.0001)
		else:
			me_faces.extend(skin_side_faces)
			# Now assign properties.
			"""
			# Before MultiUVs
			for i, origfData in enumerate(skin_side_faces_orig):
				orig_f, new_f_idx, i1, i2 = origfData
				new_f= me_faces[new_f_idx]
				
				new_f.mat= orig_f.mat
				new_f.smooth= orig_f.smooth
				if has_uv:
					new_f.mode= orig_f.mode
					new_f.flag= orig_f.flag
					if orig_f.image:
						new_f.image= orig_f.image
					
					uv1= orig_f.uv[i1]
					uv2= orig_f.uv[i2]
					new_f.uv= (uv1, uv2, uv2, uv1)
				
				if has_vcol:
					col1= orig_f.col[i1]
					col2= orig_f.col[i2]
					new_f.col= (col1, col2, col2, col1)
			"""
			
			for i, origfData in enumerate(skin_side_faces_orig):
				orig_f, new_f_idx, i2, i1 = origfData
				new_f= me_faces[new_f_idx]
				
				new_f.mat= orig_f.mat
				new_f.smooth= orig_f.smooth
			
			for uvlayer in me.getUVLayerNames():
				me.activeUVLayer = uvlayer
				for i, origfData in enumerate(skin_side_faces_orig):
					orig_f, new_f_idx, i2, i1 = origfData
					new_f= me_faces[new_f_idx]
					
					new_f.mode= orig_f.mode
					new_f.flag= orig_f.flag
					new_f.image= orig_f.image
					
					uv1= orig_f.uv[i1]
					uv2= orig_f.uv[i2]
					new_f.uv= (uv1, uv2, uv2, uv1)
			
			for collayer in me.getColorLayerNames():
				me.activeColorLayer = collayer
				for i, origfData in enumerate(skin_side_faces_orig):
					orig_f, new_f_idx, i2, i1 = origfData
					new_f= me_faces[new_f_idx]
					
					col1= orig_f.col[i1]
					col2= orig_f.col[i2]
					new_f.col= (col1, col2, col2, col1)
		
	
	if PREF_REM_ORIG:
		me_faces.delete(0, faces_sel)
Пример #35
0
def my_mesh_util(me):
    # Examples
    S = Mesh.EdgeFlags.SEAM
    edges_seam = set([ed.key for ed in me.edges if ed.flag & S])

    # make an edges_seam
    ed_face_count = {}
    for f in me.faces:
        if not f.hide:  # ignore seams next to hidden faces
            for edkey in f.edge_keys:
                try:
                    ed_face_count[edkey] += 1
                except:
                    ed_face_count[edkey] = 1

                # remove all single edge faces (or on the bounds of a hidden face) from edges_seam to make edges_seam_island - this means
    edges_seam = edges_seam.difference(set([edkey for edkey, eduser in ed_face_count.iteritems() if eduser == 1]))

    uv_key_vec_dict = {}
    uv_connect_dict = {}
    uv_seamconnect_dict = {}

    face_uv_islands = BPyMesh.facesUvIslands(me)
    print len(face_uv_islands), "islands"

    for face_uv_island in face_uv_islands:
        face_uv_island[:] = [f for f in face_uv_island if f.sel]

    face_uv_islands = [face_uv_island for face_uv_island in face_uv_islands if face_uv_island]

    print "totislands =-= ", len(face_uv_islands)

    for face_uv_island in face_uv_islands:

        # For every island!
        for f in face_uv_island:

            vs = f.v
            uv = f.uv
            uv_keys = [tuple(v) for v in uv]
            fkeys = f.edge_keys

            if len(vs) == 3:
                f.uvSel = (0, 0, 0)
                pairs = (0, 1), (1, 2), (2, 0)
            else:
                f.uvSel = (0, 0, 0, 0)
                pairs = (0, 1), (1, 2), (2, 3), (3, 0)

            for i, i_other in pairs:

                # first add to global uvconnect dict.
                # Add all other UVs - for connectivity
                uvk1 = uv_keys[i]
                st = uv_connect_dict.setdefault(uvk1, set())
                st.update([uv_keys[j] for j in xrange(len(f)) if j != i])

                # This dict maps uv keys to a list of face uvs
                ls = uv_key_vec_dict.setdefault(uvk1, [])
                ls.append(uv[i])

                if fkeys[i] in edges_seam:  # This is a seam
                    uvk2 = uv_keys[i_other]

                    ls = uv_seamconnect_dict.setdefault(uvk1, [])
                    if uvk2 not in ls:
                        ls.append(uvk2)

                    ls = uv_seamconnect_dict.setdefault(uvk2, [])
                    if uvk1 not in ls:
                        ls.append(uvk1)

                    # Find UV strips!!!

        def next_uv(uv_ls, uv_prev):
            if len(uv_ls) == 2:
                if uv_ls[0] == uv_prev:
                    return uv_ls[1]
                elif uv_ls[1] == uv_prev:
                    return uv_ls[0]

        uv_strips = []

        for uv_key, uv_brothers in uv_seamconnect_dict.iteritems():
            # print len(uv_brothers), uv_brothers
            if len(uv_brothers) == 1:
                # print "ONE BRO"
                uvlist = [uv_key, uv_brothers[0]]
                uv_brothers[:] = []  # INVALIDATE
                uv_strips.append(uvlist)
                while True:
                    uv_next_bro = uv_seamconnect_dict[uvlist[-1]]
                    if len(uv_next_bro) != 2:
                        uv_next_bro[:] = []  # INVALIDATE
                        break

                    uv_key = next_uv(uv_next_bro, uvlist[-2])

                    # if uv_key==None:
                    # 	return

                    uv_next_bro[:] = []

                    uvlist.append(uv_key)
                print len(uvlist)

                # Build a mapping so we can know the UVs
                # we could really

        face_uvs = [(uv, tuple(uv)) for f in face_uv_island for uv in f.uv]
        # face_uvs = [uv for f in face_uv_island for uv in f.uv]

        print len(uv_strips), "strips for this island"

        vec1 = Vector(0, 0)
        vec2 = Vector(0, 0)
        """
		vecs = []
		for uvlist in uv_strips:
			vec_average = Vector( 0,0 )
			for i in xrange(1,len(uvlist)):
				vec1[:] = uvlist[i-1]
				vec2[:] = uvlist[i]
				
				vec_average += vec1-vec2
			vecs.append( vec_average.normalize() )
		
		# add all the lines into an average for this island
		print len(uv_strips), 'uv_strips'
		
		if vecs:
			vec_average_island = vecs[0]
			if len(vecs)!=1:
				# Add the others 
				for  i in xrange(1, len(vecs)):
					vec_average = vecs[i]
					
					# Get a direction for this island
					if AngleBetweenVecs(vec_average_island, vec_average) > 180:
						vec_average = -vec_average
						vec_average_island += vec_average
			
			ang = AngleBetweenVecs(vec_average_island, Vector(0.0, 1.0))
			
			if Mathutils.CrossVecs(vec_average_island.copy().resize3D(), Vector(0.0, 1.0, 0)).z > 0:
				ang = -ang
			rotmat = RotationMatrix(ang, 2)
			
			
			for f in face_uv_island:
				for uv in f.uv:
					uv[:] = uv*rotmat
		"""
        if 1:

            # when more then 2 strips, use leftmost and rightmost.
            if len(uv_strips) > 2:
                striplocs = []
                for i, uvlist in enumerate(uv_strips):
                    average_x = 0.0
                    average_y = 0.0

                    for uv_key in uvlist:
                        average_x += uv_key[0]
                        average_y += uv_key[1]

                    striplocs.append((average_x / len(uvlist), average_y / len(uvlist), i))

                    # we only really care about the x, since this is teh first value, min/max work ok
                uv_strips[:] = [uv_strips[min(striplocs)[2]], uv_strips[max(striplocs)[2]]]

            for uvlist in uv_strips:
                # Rotate Strips

                uvlist_uvvecs = []  # face

                # Collect uvs for this strip.
                for uv_key in uvlist:
                    uvlist_uvvecs.append(uv_key_vec_dict[uv_key])

                x = 0.0
                for uvls in uvlist_uvvecs:
                    x += uvls[0].x  # all uvs in this uvls are the same, dont apply them twice when averaging

                x = x / len(uvlist_uvvecs)
                for uvls in uvlist_uvvecs:
                    for uv in uvls:
                        uv.x = x

                        # Set the pinning
                for f in face_uv_island:
                    for i, uv in enumerate(f.uv):

                        # BAD!!!
                        for uvls in uvlist_uvvecs:
                            if uv in uvls:
                                # PIN? UVSel for now, this sucks too, just select them
                                uvsel = list(f.uvSel)
                                uvsel[i] = 1
                                f.uvSel = tuple(uvsel)
                                break
Пример #36
0
def write(filename, objects,\
EXPORT_TRI=False,  EXPORT_EDGES=False,  EXPORT_NORMALS=False,  EXPORT_NORMALS_HQ=False,\
EXPORT_UV=True,  EXPORT_MTL=True,  EXPORT_COPY_IMAGES=False,\
EXPORT_APPLY_MODIFIERS=True, EXPORT_ROTX90=True, EXPORT_BLEN_OBS=True,\
EXPORT_GROUP_BY_OB=False,  EXPORT_GROUP_BY_MAT=False, EXPORT_MORPH_TARGET=False, EXPORT_ARMATURE=False):
	'''
	Basic write function. The context and options must be alredy set
	This can be accessed externaly
	eg.
	write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options.
	'''
	
	def veckey3d(v):
		return round(v.x, 6), round(v.y, 6), round(v.z, 6)
		
	def veckey2d(v):
		return round(v.x, 6), round(v.y, 6)
	
	print 'OBJ Export path: "%s"' % filename
	temp_mesh_name = '~tmp-mesh'

	time1 = sys.time()
	scn = Scene.GetCurrent()

	file = open(filename, "w")
	
	# Write Header
	file.write('# Blender3D v%s OBJ File: %s\n' % (Blender.Get('version'), Blender.Get('filename').split('/')[-1].split('\\')[-1] ))
	file.write('# www.blender3d.org\n')

	# Tell the obj file what material file to use.
	if EXPORT_MTL:
		mtlfilename = '%s.mtl' % '.'.join(filename.split('.')[:-1])
		file.write('mtllib %s\n' % ( mtlfilename.split('\\')[-1].split('/')[-1] ))
	
	# Get the container mesh. - used for applying modifiers and non mesh objects.
	containerMesh = meshName = tempMesh = None
	for meshName in Blender.NMesh.GetNames():
		if meshName.startswith(temp_mesh_name):
			tempMesh = Mesh.Get(meshName)
			if not tempMesh.users:
				containerMesh = tempMesh
	if not containerMesh:
		containerMesh = Mesh.New(temp_mesh_name)
	
	if EXPORT_ROTX90:
		mat_xrot90= Blender.Mathutils.RotationMatrix(-90, 4, 'x')
		
	del meshName
	del tempMesh
	
	# Initialize totals, these are updated each object
	totverts = totuvco = totno = 1
	
	face_vert_index = 1
	
	globalNormals = {}
	
	# Get all meshs
	for ob_main in objects:
		for ob, ob_mat in BPyObject.getDerivedObjects(ob_main):
			# Will work for non meshes now! :)
			# getMeshFromObject(ob, container_mesh=None, apply_modifiers=True, vgroups=True, scn=None)
                        if EXPORT_ARMATURE:
				write_armature(file,ob)
				write_poses(file,ob)
			me= BPyMesh.getMeshFromObject(ob, containerMesh, EXPORT_APPLY_MODIFIERS, False, scn)
			if not me:
				continue
			
			if EXPORT_UV:
				faceuv= me.faceUV
			else:
				faceuv = False
			
			# We have a valid mesh
			if EXPORT_TRI and me.faces:
				# Add a dummy object to it.
				has_quads = False
				for f in me.faces:
					if len(f) == 4:
						has_quads = True
						break
				
				if has_quads:
					oldmode = Mesh.Mode()
					Mesh.Mode(Mesh.SelectModes['FACE'])
					
					me.sel = True
					tempob = scn.objects.new(me)
					me.quadToTriangle(0) # more=0 shortest length
					oldmode = Mesh.Mode(oldmode)
					scn.objects.unlink(tempob)
					
					Mesh.Mode(oldmode)
			

			faces = [ f for f in me.faces ]
			
			if EXPORT_EDGES:
				edges = me.edges
			else:
				edges = []
			
			if not (len(faces)+len(edges)+len(me.verts)): # Make sure there is somthing to write
				continue # dont bother with this mesh.
			
			if EXPORT_ROTX90:
                                me.transform(ob_mat*mat_xrot90)
			else:
				me.transform(ob_mat)
			
			# High Quality Normals
			if EXPORT_NORMALS and faces:
				if EXPORT_NORMALS_HQ:
					BPyMesh.meshCalcNormals(me)
				else:
					# transforming normals is incorrect
					# when the matrix is scaled,
					# better to recalculate them
					me.calcNormals()
			
			# # Crash Blender
			#materials = me.getMaterials(1) # 1 == will return None in the list.
			materials = me.materials
			
			materialNames = []
			materialItems = materials[:]
			if materials:
				for mat in materials:
					if mat: # !=None
						materialNames.append(mat.name)
					else:
						materialNames.append(None)
				# Cant use LC because some materials are None.
				# materialNames = map(lambda mat: mat.name, materials) # Bug Blender, dosent account for null materials, still broken.  
			
			# Possible there null materials, will mess up indicies
			# but at least it will export, wait until Blender gets fixed.
			materialNames.extend((16-len(materialNames)) * [None])
			materialItems.extend((16-len(materialItems)) * [None])
			
			# Sort by Material, then images
			# so we dont over context switch in the obj file.
			if EXPORT_MORPH_TARGET:
				pass
			elif faceuv:
				try:	faces.sort(key = lambda a: (a.mat, a.image, a.smooth))
				except: faces.sort(lambda a,b: cmp((a.mat, a.image, a.smooth), (b.mat, b.image, b.smooth)))
			elif len(materials) > 1:
				try:	faces.sort(key = lambda a: (a.mat, a.smooth))
				except: faces.sort(lambda a,b: cmp((a.mat, a.smooth), (b.mat, b.smooth)))
			else:
				# no materials
				try:	faces.sort(key = lambda a: a.smooth)
				except: faces.sort(lambda a,b: cmp(a.smooth, b.smooth))
			
			# Set the default mat to no material and no image.
			contextMat = (0, 0) # Can never be this, so we will label a new material teh first chance we get.
			contextSmooth = None # Will either be true or false,  set bad to force initialization switch.
			
			if EXPORT_BLEN_OBS or EXPORT_GROUP_BY_OB:
				name1 = ob.name
				name2 = ob.getData(1)
				if name1 == name2:
					obnamestring = fixName(name1)
				else:
					obnamestring = '%s_%s' % (fixName(name1), fixName(name2))
				
				if EXPORT_BLEN_OBS:
					file.write('o %s\n' % obnamestring) # Write Object name
				else: # if EXPORT_GROUP_BY_OB:
					file.write('g %s\n' % obnamestring)
			
			
			
			# Vert
			mesh = ob.getData()
                        objmat = ob.getMatrix()
                        for i in objmat:
		          file.write('obm: %.6f %.6f %.6f %.6f\n' % tuple(i))
			vgrouplist = mesh.getVertGroupNames()
			file.write('vgroupcount: %i\n' % len(vgrouplist))
			for vgname in vgrouplist:
				file.write('vgroup: %s\n' % vgname)
			for v in mesh.verts:
				file.write('v %.6f %.6f %.6f\n' % tuple(v.co))
				influences = mesh.getVertexInfluences(v.index)
				file.write('influence: %i\n' % len(influences))
				for name,weight in influences:
					file.write('GroupName: %s\n' % name)
					file.write('Weight: %f\n' % weight)
			
			# UV
			if faceuv:
				uv_face_mapping = [[0,0,0,0] for f in faces] # a bit of a waste for tri's :/
				
				uv_dict = {} # could use a set() here
				for f_index, f in enumerate(faces):
					
					for uv_index, uv in enumerate(f.uv):
						uvkey = veckey2d(uv)
						try:
							uv_face_mapping[f_index][uv_index] = uv_dict[uvkey]
						except:
							uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] = len(uv_dict)
							file.write('vt %.6f %.6f\n' % tuple(uv))
				
				uv_unique_count = len(uv_dict)
				del uv, uvkey, uv_dict, f_index, uv_index
				# Only need uv_unique_count and uv_face_mapping
			
			# NORMAL, Smooth/Non smoothed.
			if EXPORT_NORMALS:
				for f in faces:
					if f.smooth:
						for v in f:
							noKey = veckey3d(v.no)
							if not globalNormals.has_key( noKey ):
								globalNormals[noKey] = totno
								totno +=1
								file.write('vn %.6f %.6f %.6f\n' % noKey)
					else:
						# Hard, 1 normal from the face.
						noKey = veckey3d(f.no)
						if not globalNormals.has_key( noKey ):
							globalNormals[noKey] = totno
							totno +=1
							file.write('vn %.6f %.6f %.6f\n' % noKey)
			
			if not faceuv:
				f_image = None
			
			for f_index, f in enumerate(faces):
				f_v= f.v
				f_smooth= f.smooth
				f_mat = min(f.mat, len(materialNames)-1)
				if faceuv:
					f_image = f.image
					f_uv= f.uv
				
				# MAKE KEY
				if faceuv and f_image: # Object is always true.
					key = materialNames[f_mat],  f_image.name
				else:
					key = materialNames[f_mat],  None # No image, use None instead.
				
				# CHECK FOR CONTEXT SWITCH
				if key == contextMat:
					pass # Context alredy switched, dont do anythoing
				else:
					if key[0] == None and key[1] == None:
						# Write a null material, since we know the context has changed.
						if EXPORT_GROUP_BY_MAT:
							file.write('g %s_%s\n' % (fixName(ob.name), fixName(ob.getData(1))) ) # can be mat_image or (null)
						file.write('usemtl (null)\n') # mat, image
						
					else:
						mat_data= MTL_DICT.get(key)
						if not mat_data:
							# First add to global dict so we can export to mtl
							# Then write mtl
							
							# Make a new names from the mat and image name,
							# converting any spaces to underscores with fixName.
							
							# If none image dont bother adding it to the name
							if key[1] == None:
								mat_data = MTL_DICT[key] = ('%s'%fixName(key[0])), materialItems[f_mat], f_image
							else:
								mat_data = MTL_DICT[key] = ('%s_%s' % (fixName(key[0]), fixName(key[1]))), materialItems[f_mat], f_image
						
						if EXPORT_GROUP_BY_MAT:
							file.write('g %s_%s_%s\n' % (fixName(ob.name), fixName(ob.getData(1)), mat_data[0]) ) # can be mat_image or (null)
						file.write('usemtl %s\n' % mat_data[0]) # can be mat_image or (null)
					
				contextMat = key
				if f_smooth != contextSmooth:
					if f_smooth: # on now off
						file.write('s 1\n')
						contextSmooth = f_smooth
					else: # was off now on
						file.write('s off\n')
						contextSmooth = f_smooth
				
				file.write('f')
				if faceuv:
					if EXPORT_NORMALS:
						if f_smooth: # Smoothed, use vertex normals
							for vi, v in enumerate(f_v):
								file.write( ' %d/%d/%d' % (\
								  v.index+totverts,\
								  totuvco + uv_face_mapping[f_index][vi],\
								  globalNormals[ veckey3d(v.no) ])) # vert, uv, normal
							
						else: # No smoothing, face normals
							no = globalNormals[ veckey3d(f.no) ]
							for vi, v in enumerate(f_v):
								file.write( ' %d/%d/%d' % (\
								  v.index+totverts,\
								  totuvco + uv_face_mapping[f_index][vi],\
								  no)) # vert, uv, normal
					
					else: # No Normals
						for vi, v in enumerate(f_v):
							file.write( ' %d/%d' % (\
							  v.index+totverts,\
							  totuvco + uv_face_mapping[f_index][vi])) # vert, uv
					
					face_vert_index += len(f_v)
				
				else: # No UV's
					if EXPORT_NORMALS:
						if f_smooth: # Smoothed, use vertex normals
							for v in f_v:
								file.write( ' %d//%d' % (\
								  v.index+totverts,\
								  globalNormals[ veckey3d(v.no) ]))
						else: # No smoothing, face normals
							no = globalNormals[ veckey3d(f.no) ]
							for v in f_v:
								file.write( ' %d//%d' % (\
								  v.index+totverts,\
								  no))
					else: # No Normals
						for v in f_v:
							file.write( ' %d' % (\
							  v.index+totverts))
						
				file.write('\n')
			
			# Write edges.
			if EXPORT_EDGES:
				LOOSE= Mesh.EdgeFlags.LOOSE
				for ed in edges:
					if ed.flag & LOOSE:
						file.write('f %d %d\n' % (ed.v1.index+totverts, ed.v2.index+totverts))
				
			# Make the indicies global rather then per mesh
			totverts += len(me.verts)
			if faceuv:
				totuvco += uv_unique_count
			me.verts= None
	file.close()
	
	
	# Now we have all our materials, save them
	if EXPORT_MTL:
		write_mtl(mtlfilename)
	if EXPORT_COPY_IMAGES:
		dest_dir = filename
		# Remove chars until we are just the path.
		while dest_dir and dest_dir[-1] not in '\\/':
			dest_dir = dest_dir[:-1]
		if dest_dir:
			copy_images(dest_dir)
		else:
			print '\tError: "%s" could not be used as a base for an image path.' % filename
	
	print "OBJ Export time: %.2f" % (sys.time() - time1)
Пример #37
0
    def writeMeshData(self, parent, mesh, obj):
        
        aMesh = BPyMesh.getMeshFromObject(obj, self.getContainerMesh(), True, scn=self.scene)
        
        if len(aMesh.faces) == 0:
            return
        
        print("Writing mesh %s" % mesh.name)
        
        materials = aMesh.materials
        
        has_quads = False
        for f in aMesh.faces:
            if len(f) == 4:
                has_quads = True
                break
        
        if has_quads:
            oldmode = Mesh.Mode()
            Mesh.Mode(Mesh.SelectModes['FACE'])
            
            aMesh.sel = True
            tempob = self.scene.objects.new(aMesh)
            aMesh.quadToTriangle(0) # more=0 shortest length
            oldmode = Mesh.Mode(oldmode)
            self.scene.objects.unlink(tempob)
            
            Mesh.Mode(oldmode)
        
        data = self.doc.createDataElement(mesh.name+"_data", None, None, None, None)    
        parent.appendChild(data)
        
        # Mesh indices
        matCount = len(materials)
        if matCount == 0:
            matCount = 1
        indices = [[] for m in range(matCount)] #@UnusedVariable
        vertices = []
        vertex_dict = {}
       
        print("Faces: %i" % len(aMesh.faces))
        
        i = 0
        for face in aMesh.faces:
            mv = None
            for i, v in enumerate(face):
                if face.smooth:
                    if aMesh.faceUV:
                        mv = vertex(v.index, None, face.uv[i])
                    else:
                        mv = vertex(v.index, None, None)
                else:
                    if aMesh.faceUV:
                        mv = vertex(v.index, face.no, face.uv[i])
                    else:
                        mv = vertex(v.index, face.no)
                index, added = appendUnique(vertex_dict, mv)
                indices[face.mat].append(index)
                if added:
                    vertices.append(mv)


        # Single or no material: write all in one data block
        if not matCount > 1:
            valueElement = self.doc.createIntElement(None, "index")
            valueElement.setValue(' '.join(map(str, indices[0])))
            data.appendChild(valueElement)
       
        print("Vertices: %i" % len(vertex_dict))
        
        # Vertex positions
        value_list = []
        for v in vertices:
            value_list.append("%.6f %.6f %.6f" % tuple(aMesh.verts[v.index].co))
                
        valueElement = self.doc.createFloat3Element(None, "position")
        valueElement.setValue(' '.join(value_list))
        data.appendChild(valueElement)
        
        # Vertex normals
        value_list = []
        for v in vertices:
            if v.normal == None:
                value_list.append("%.6f %.6f %.6f" % tuple(aMesh.verts[v.index].no))
            else:
                value_list.append("%.6f %.6f %.6f" % tuple(v.normal))
     
        valueElement = self.doc.createFloat3Element(None, "normal")
        valueElement.setValue(' '.join(value_list))
        data.appendChild(valueElement)

        # Vertex texCoord
        if aMesh.faceUV:
            value_list = []
            for v in vertices:
                value_list.append("%.6f %.6f" % tuple(v.texcoord))
    
            valueElement =self. doc.createFloat2Element(None, "texcoord")
            valueElement.setValue(' '.join(value_list))
            data.appendChild(valueElement);
            
        if len(materials) > 1:
            for i, material in enumerate(materials):
                if len(indices[i]) == 0:
                    continue
                
                data = self.doc.createDataElement(mesh.name+"_data_" + material.name, None, None, None, None)    
                parent.appendChild(data)

                refdata = self.doc.createDataElement(src_="#"+mesh.name+"_data")
                data.appendChild(refdata)

                valueElement = self.doc.createIntElement(None, "index")
                valueElement.setValue(' '.join(map(str, indices[i])))
                data.appendChild(valueElement)
       
        aMesh.verts = None
Пример #38
0
def file_callback(filename):

    if not filename.lower().endswith(".ctm"):
        filename += ".ctm"

        # Get object mesh from the selected object
    scn = bpy.data.scenes.active
    ob = scn.objects.active
    if not ob:
        Blender.Draw.PupMenu("Error%t|Select 1 active object")
        return
    mesh = BPyMesh.getMeshFromObject(ob, None, False, False, scn)
    if not mesh:
        Blender.Draw.PupMenu("Error%t|Could not get mesh data from active object")
        return

        # Check which mesh properties are present...
    hasVertexUV = mesh.vertexUV or mesh.faceUV
    hasVertexColors = mesh.vertexColors

    # Show a GUI for the export settings
    pupBlock = []
    EXPORT_APPLY_MODIFIERS = Draw.Create(1)
    pupBlock.append(("Apply Modifiers", EXPORT_APPLY_MODIFIERS, "Use transformed mesh data."))
    EXPORT_NORMALS = Draw.Create(1)
    pupBlock.append(("Normals", EXPORT_NORMALS, "Export vertex normal data."))
    if hasVertexUV:
        EXPORT_UV = Draw.Create(1)
        pupBlock.append(("UVs", EXPORT_UV, "Export texface UV coords."))
    if hasVertexColors:
        EXPORT_COLORS = Draw.Create(1)
        pupBlock.append(("Colors", EXPORT_COLORS, "Export vertex Colors."))
    EXPORT_MG2 = Draw.Create(0)
    pupBlock.append(("Fixed Point", EXPORT_MG2, "Use limited precision algorithm (MG2 method = better compression)."))
    if not Draw.PupBlock("Export...", pupBlock):
        return

        # Adjust export settings according to GUI selections
    EXPORT_APPLY_MODIFIERS = EXPORT_APPLY_MODIFIERS.val
    EXPORT_NORMALS = EXPORT_NORMALS.val
    if hasVertexUV:
        EXPORT_UV = EXPORT_UV.val
    else:
        EXPORT_UV = False
    if hasVertexColors:
        EXPORT_COLORS = EXPORT_COLORS.val
    else:
        EXPORT_COLORS = False
    EXPORT_MG2 = EXPORT_MG2.val

    # If the user wants to export MG2, then show another GUI...
    if EXPORT_MG2:
        pupBlock = []
        EXPORT_VPREC = Draw.Create(0.01)
        pupBlock.append(("Vertex", EXPORT_VPREC, 0.0001, 1.0, "Relative vertex precision (fixed point)."))
        if EXPORT_NORMALS:
            EXPORT_NPREC = Draw.Create(1.0 / 256.0)
            pupBlock.append(("Normal", EXPORT_NPREC, 0.0001, 1.0, "Normal precision (fixed point)."))
        if EXPORT_UV:
            EXPORT_UVPREC = Draw.Create(1.0 / 1024.0)
            pupBlock.append(("UV", EXPORT_UVPREC, 0.0001, 1.0, "UV precision (fixed point)."))
        if EXPORT_COLORS:
            EXPORT_CPREC = Draw.Create(1.0 / 256.0)
            pupBlock.append(("Color", EXPORT_CPREC, 0.0001, 1.0, "Color precision (fixed point)."))
        if not Draw.PupBlock("Fixed point precision...", pupBlock):
            return

            # Adjust export settings according to GUI selections
    if EXPORT_MG2:
        EXPORT_VPREC = EXPORT_VPREC.val
    else:
        EXPORT_VPREC = 0.1
    if EXPORT_MG2 and EXPORT_NORMALS:
        EXPORT_NPREC = EXPORT_NPREC.val
    else:
        EXPORT_NPREC = 0.1
    if EXPORT_MG2 and EXPORT_UV:
        EXPORT_UVPREC = EXPORT_UVPREC.val
    else:
        EXPORT_UVPREC = 0.1
    if EXPORT_MG2 and EXPORT_COLORS:
        EXPORT_CPREC = EXPORT_CPREC.val
    else:
        EXPORT_CPREC = 0.1

    is_editmode = Blender.Window.EditMode()
    if is_editmode:
        Blender.Window.EditMode(0, "", 0)
    Window.WaitCursor(1)
    try:
        # Get the mesh, again, if we wanted modifiers (from GUI selection)
        if EXPORT_APPLY_MODIFIERS:
            mesh = BPyMesh.getMeshFromObject(ob, None, EXPORT_APPLY_MODIFIERS, False, scn)
            if not mesh:
                Blender.Draw.PupMenu("Error%t|Could not get mesh data from active object")
                return
            mesh.transform(ob.matrixWorld, True)

            # Count triangles (quads count as two triangles)
        triangleCount = 0
        for f in mesh.faces:
            if len(f.v) == 4:
                triangleCount += 2
            else:
                triangleCount += 1

                # Extract indices from the Blender mesh (quads are split into two triangles)
        pindices = cast((c_int * 3 * triangleCount)(), POINTER(c_int))
        i = 0
        for f in mesh.faces:
            pindices[i] = c_int(f.v[0].index)
            pindices[i + 1] = c_int(f.v[1].index)
            pindices[i + 2] = c_int(f.v[2].index)
            i += 3
            if len(f.v) == 4:
                pindices[i] = c_int(f.v[0].index)
                pindices[i + 1] = c_int(f.v[2].index)
                pindices[i + 2] = c_int(f.v[3].index)
                i += 3

                # Extract vertex array from the Blender mesh
        vertexCount = len(mesh.verts)
        pvertices = cast((c_float * 3 * vertexCount)(), POINTER(c_float))
        i = 0
        for v in mesh.verts:
            pvertices[i] = c_float(v.co.x)
            pvertices[i + 1] = c_float(v.co.y)
            pvertices[i + 2] = c_float(v.co.z)
            i += 3

            # Extract normals
        if EXPORT_NORMALS:
            pnormals = cast((c_float * 3 * vertexCount)(), POINTER(c_float))
            i = 0
            for v in mesh.verts:
                pnormals[i] = c_float(v.no.x)
                pnormals[i + 1] = c_float(v.no.y)
                pnormals[i + 2] = c_float(v.no.z)
                i += 3
        else:
            pnormals = POINTER(c_float)()

            # Extract UVs
        if EXPORT_UV:
            ptexCoords = cast((c_float * 2 * vertexCount)(), POINTER(c_float))
            if mesh.faceUV:
                for f in mesh.faces:
                    for j, v in enumerate(f.v):
                        k = v.index
                        if k < vertexCount:
                            uv = f.uv[j]
                            ptexCoords[k * 2] = uv[0]
                            ptexCoords[k * 2 + 1] = uv[1]
            else:
                i = 0
                for v in mesh.verts:
                    ptexCoords[i] = c_float(v.uvco[0])
                    ptexCoords[i + 1] = c_float(v.uvco[1])
                    i += 2
        else:
            ptexCoords = POINTER(c_float)()

            # Extract colors
        if EXPORT_COLORS:
            pcolors = cast((c_float * 4 * vertexCount)(), POINTER(c_float))
            for f in mesh.faces:
                for j, v in enumerate(f.v):
                    k = v.index
                    if k < vertexCount:
                        col = f.col[j]
                        pcolors[k * 4] = col.r / 255.0
                        pcolors[k * 4 + 1] = col.g / 255.0
                        pcolors[k * 4 + 2] = col.b / 255.0
                        pcolors[k * 4 + 3] = 1.0
        else:
            pcolors = POINTER(c_float)()

            # Load the OpenCTM shared library
        if os.name == "nt":
            libHDL = WinDLL("openctm.dll")
        else:
            libName = find_library("openctm")
            if not libName:
                Blender.Draw.PupMenu("Could not find the OpenCTM shared library")
                return
            libHDL = CDLL(libName)
        if not libHDL:
            Blender.Draw.PupMenu("Could not open the OpenCTM shared library")
            return

            # Get all the functions from the shared library that we need
        ctmNewContext = libHDL.ctmNewContext
        ctmNewContext.argtypes = [c_int]
        ctmNewContext.restype = c_void_p
        ctmFreeContext = libHDL.ctmFreeContext
        ctmFreeContext.argtypes = [c_void_p]
        ctmGetError = libHDL.ctmGetError
        ctmGetError.argtypes = [c_void_p]
        ctmGetError.restype = c_int
        ctmErrorString = libHDL.ctmErrorString
        ctmErrorString.argtypes = [c_int]
        ctmErrorString.restype = c_char_p
        ctmFileComment = libHDL.ctmFileComment
        ctmFileComment.argtypes = [c_void_p, c_char_p]
        ctmDefineMesh = libHDL.ctmDefineMesh
        ctmDefineMesh.argtypes = [c_void_p, POINTER(c_float), c_int, POINTER(c_int), c_int, POINTER(c_float)]
        ctmSave = libHDL.ctmSave
        ctmSave.argtypes = [c_void_p, c_char_p]
        ctmAddUVMap = libHDL.ctmAddUVMap
        ctmAddUVMap.argtypes = [c_void_p, POINTER(c_float), c_char_p, c_char_p]
        ctmAddUVMap.restype = c_int
        ctmAddAttribMap = libHDL.ctmAddAttribMap
        ctmAddAttribMap.argtypes = [c_void_p, POINTER(c_float), c_char_p]
        ctmAddAttribMap.restype = c_int
        ctmCompressionMethod = libHDL.ctmCompressionMethod
        ctmCompressionMethod.argtypes = [c_void_p, c_int]
        ctmVertexPrecisionRel = libHDL.ctmVertexPrecisionRel
        ctmVertexPrecisionRel.argtypes = [c_void_p, c_float]
        ctmNormalPrecision = libHDL.ctmNormalPrecision
        ctmNormalPrecision.argtypes = [c_void_p, c_float]
        ctmUVCoordPrecision = libHDL.ctmUVCoordPrecision
        ctmUVCoordPrecision.argtypes = [c_void_p, c_int, c_float]
        ctmAttribPrecision = libHDL.ctmAttribPrecision
        ctmAttribPrecision.argtypes = [c_void_p, c_int, c_float]

        # Create an OpenCTM context
        ctm = ctmNewContext(0x0102)  # CTM_EXPORT
        try:
            # Set the file comment
            ctmFileComment(
                ctm, c_char_p("%s - created by Blender %s (www.blender.org)" % (ob.getName(), Blender.Get("version")))
            )

            # Define the mesh
            ctmDefineMesh(ctm, pvertices, c_int(vertexCount), pindices, c_int(triangleCount), pnormals)

            # Add UV coordinates?
            if EXPORT_UV:
                tm = ctmAddUVMap(ctm, ptexCoords, c_char_p(), c_char_p())
                if EXPORT_MG2:
                    ctmUVCoordPrecision(ctm, tm, EXPORT_UVPREC)

                    # Add colors?
            if EXPORT_COLORS:
                cm = ctmAddAttribMap(ctm, pcolors, c_char_p("Color"))
                if EXPORT_MG2:
                    ctmAttribPrecision(ctm, cm, EXPORT_CPREC)

                    # Set compression method
            if EXPORT_MG2:
                ctmCompressionMethod(ctm, 0x0203)  # CTM_METHOD_MG2
                ctmVertexPrecisionRel(ctm, EXPORT_VPREC)
                if EXPORT_NORMALS:
                    ctmNormalPrecision(ctm, EXPORT_NPREC)

            else:
                ctmCompressionMethod(ctm, 0x0202)  # CTM_METHOD_MG1

                # Save the file
            ctmSave(ctm, c_char_p(filename))

            # Check for errors
            e = ctmGetError(ctm)
            if e != 0:
                s = ctmErrorString(e)
                Blender.Draw.PupMenu("Error%t|Could not save the file: " + s)

        finally:
            # Free the OpenCTM context
            ctmFreeContext(ctm)

    finally:
        Window.WaitCursor(0)
        if is_editmode:
            Blender.Window.EditMode(1, "", 0)
Пример #39
0
def processBuilding(building, groups, ids,path):
	#
	global zzMesh
	if zzMesh==None:
		zzMesh=bpy.data.meshes.new('zz')
	# 
	o=open(os.sep.join([path,'building_%i.bld'%building]),'w')
	# Compute building position!
	avgX=0.0
	avgY=0.0
	minX=9999.0
	maxX=-9999.0
	minY=9999.0
	maxY=-9999.0
	count=0
	for id in groups[building]:
		for (x,y,z) in ids[id][Attributes.GEOM]:
			avgX+=x
			avgY+=y
			count+=1
			if x>maxX: maxX=x
			if x<minX: minX=x
			if y>maxY: maxY=y
			if y<minY: minY=y
	icountf=1.0/float(count)
	avgX*=icountf
	avgY*=icountf
	print >>o, '{'
	print >>o, '	"origin":(%f,%f),'%(avgX,avgY)
	print >>o, '	"bounds":((%f,%f),(%f,%f)),'%(minX,minY,maxX,maxY)
	print >>o, '	"parts":{'
	# Convert geometry to meters
	angleToDistance=111226.3
	yFactor=math.cos(avgY*0.017453292519943295769236907684886)
	baseindex=0
	for id in groups[building]:
		print >>o, '		%i:{'%id
		# First, base geometry
		l=len(ids[id][Attributes.GEOM])
		for i in xrange(0,l):
			(x,y,z)=ids[id][Attributes.GEOM][i]
			ids[id][Attributes.GEOM][i]=[(x-avgX)*angleToDistance*yFactor, (y-avgY)*angleToDistance, z]
		baseindex+=1
		basegeom=ids[id][Attributes.GEOM]
		faces=BPyMesh.ngon(basegeom[::-1], range(0,len(basegeom)))
		zzMesh.verts.delete(zzMesh.verts)
		zzMesh.verts.extend(basegeom[::-1])
		zzMesh.faces.extend(faces)
		zzMesh.calcNormals()
		print >>o, '			"base":{'
		outputMesh(o,zzMesh,4)
		print >>o, '			},'
		# Then, roofs
		topgeom=[]
		if ids[id].has_key(Attributes.ROOF):
			print >>o, '			"roofs":['
			l=len(ids[id][Attributes.ROOF])
			for i in xrange(0,l):
				ll=len(ids[id][Attributes.ROOF][i])
				for j in xrange(0,ll):
					(x,y,z)=ids[id][Attributes.ROOF][i][j]
					ids[id][Attributes.ROOF][i][j]=[(x-avgX)*angleToDistance*yFactor, (y-avgY)*angleToDistance, z]
				rgeom=ids[id][Attributes.ROOF][i]
				topgeom.extend(rgeom)
				faces=BPyMesh.ngon(rgeom, range(0,len(rgeom)))
				zzMesh.verts.delete(zzMesh.verts)
				zzMesh.verts.extend(rgeom)
				zzMesh.faces.extend(faces)
				zzMesh.calcNormals()
				#
				print >>o, '				{'
				outputMesh(o,zzMesh,5)
				print >>o, '				},'
			print >>o, '			],'
		# Last, pediments
		if ids[id].has_key(Attributes.PEDIMENT):
			print >>o, '			"pediments":['
			l=len(ids[id][Attributes.PEDIMENT])
			for i in xrange(0,l):
				ll=len(ids[id][Attributes.PEDIMENT][i])
				for j in xrange(0,ll):
					(x,y,z)=ids[id][Attributes.PEDIMENT][i][j]
					ids[id][Attributes.PEDIMENT][i][j]=[(x-avgX)*angleToDistance*yFactor, (y-avgY)*angleToDistance, z]
				pgeom=ids[id][Attributes.PEDIMENT][i]
				topgeom.extend(pgeom)
				faces=BPyMesh.ngon(pgeom, range(0,len(pgeom)))
				zzMesh.verts.delete(zzMesh.verts)
				zzMesh.verts.extend(pgeom)
				zzMesh.faces.extend(faces)
				zzMesh.calcNormals()
				#
				print >>o, '				{'
				outputMesh(o,zzMesh,5)
				print >>o, '				},'
			print >>o, '			],'
		# And while we're here, construct some walls...
		upper=[]
		for (x,y,z) in basegeom:
			minz=99999.0
			for (rx,ry,rz) in topgeom:
				(dx,dy)=(x-rx,y-ry)
				if dx<0: dx=-dx
				if dy<0: dy=-dy
				if dx<.001 and dy<.001:
					if rz<minz: minz=rz
			if minz<99999.0:
				upper.append([x,y,minz])
		L=len(upper)
		if L==len(basegeom):
			zzMesh.verts.delete(zzMesh.verts)
			wallgeom=upper+basegeom
			zzMesh.verts.extend(wallgeom)
			for f in xrange(0,L-1):
				zzMesh.faces.extend([[f,f+L,f+L+1], [f,f+L+1,f+1]])
			zzMesh.calcNormals()
			print >>o, '			"wall":{'
			outputMesh(o,zzMesh,4)
			print >>o, '			}'
		else:
			print "Cannot generate wall for id=%i (building %i) %i!=%i"%(id,building,L,len(basegeom))
			pass
		#
		print >>o, '		},'
		del ids[id]
	print >>o, '	},'
	print >>o, '}'
	o.close()
Пример #40
0
def main():
	scn = Scene.GetCurrent()
	act_ob= scn.objects.active
	if not act_ob or act_ob.type != 'Mesh':
		BPyMessages.Error_NoMeshActive()
		return
	
	act_me= act_ob.getData(mesh=1)
	
	if act_me.multires:
		BPyMessages.Error_NoMeshMultiresEdit()
		return
	
	act_group= act_me.activeGroup
	if not act_group: act_group= ''
	
	
	# Defaults
	PREF_REDUX= Draw.Create(0.5)
	PREF_BOUNDRY_WEIGHT= Draw.Create(5.0)
	PREF_REM_DOUBLES= Draw.Create(1)
	PREF_FACE_AREA_WEIGHT= Draw.Create(1.0)
	PREF_FACE_TRIANGULATE= Draw.Create(1)
	
	VGROUP_INF_ENABLE= Draw.Create(0)
	VGROUP_INF_REDUX= Draw.Create(act_group)
	VGROUP_INF_WEIGHT= Draw.Create(10.0)
	
	PREF_DO_UV= Draw.Create(1)
	PREF_DO_VCOL= Draw.Create(1)
	PREF_DO_WEIGHTS= Draw.Create(1)
	PREF_OTHER_SEL_OBS= Draw.Create(0)
	
	pup_block = [\
	('Poly Reduce:', PREF_REDUX, 0.05, 0.95, 'Scale the meshes poly count by this value.'),\
	('Boundry Weight:', PREF_BOUNDRY_WEIGHT, 0.0, 20.0, 'Weight boundry verts by this scale, 0.0 for no boundry weighting.'),\
	('Area Weight:', PREF_FACE_AREA_WEIGHT, 0.0, 20.0, 'Collapse edges effecting lower area faces first.'),\
	('Triangulate', PREF_FACE_TRIANGULATE, 'Convert quads to tris before reduction, for more choices of edges to collapse.'),\
	'',\
	('VGroup Weighting', VGROUP_INF_ENABLE, 'Use a vertex group to influence the reduction, higher weights for higher quality '),\
	('vgroup name: ', VGROUP_INF_REDUX, 0, 32, 'The name of the vertex group to use for the weight map'),\
	('vgroup mult: ', VGROUP_INF_WEIGHT, 0.0, 100.0, 'How much to make the weight effect the reduction'),\
	('Other Selected Obs', PREF_OTHER_SEL_OBS, 'reduce other selected objects.'),\
	'',\
	'',\
	'',\
	('UV Coords', PREF_DO_UV, 'Interpolate UV Coords.'),\
	('Vert Colors', PREF_DO_VCOL, 'Interpolate Vertex Colors'),\
	('Vert Weights', PREF_DO_WEIGHTS, 'Interpolate Vertex Weights'),\
	('Remove Doubles', PREF_REM_DOUBLES, 'Remove doubles before reducing to avoid boundry tearing.'),\
	]
	
	if not Draw.PupBlock("Poly Reducer", pup_block):
		return
	
	PREF_REDUX= PREF_REDUX.val
	PREF_BOUNDRY_WEIGHT= PREF_BOUNDRY_WEIGHT.val
	PREF_REM_DOUBLES= PREF_REM_DOUBLES.val
	PREF_FACE_AREA_WEIGHT= PREF_FACE_AREA_WEIGHT.val
	PREF_FACE_TRIANGULATE= PREF_FACE_TRIANGULATE.val
	
	VGROUP_INF_ENABLE= VGROUP_INF_ENABLE.val
	VGROUP_INF_WEIGHT= VGROUP_INF_WEIGHT.val
	
	if VGROUP_INF_ENABLE and VGROUP_INF_WEIGHT:
		VGROUP_INF_REDUX= VGROUP_INF_REDUX.val
	else:
		VGROUP_INF_WEIGHT= 0.0
		VGROUP_INF_REDUX= None
		
		
	PREF_DO_UV= PREF_DO_UV.val
	PREF_DO_VCOL= PREF_DO_VCOL.val
	PREF_DO_WEIGHTS= PREF_DO_WEIGHTS.val
	PREF_OTHER_SEL_OBS= PREF_OTHER_SEL_OBS.val
	
	
	t= sys.time()
	
	is_editmode = Window.EditMode() # Exit Editmode.
	if is_editmode: Window.EditMode(0)
	Window.WaitCursor(1)	
	print 'reducing:', act_ob.name, act_ob.getData(1)
	BPyMesh.redux(act_ob, PREF_REDUX, PREF_BOUNDRY_WEIGHT, PREF_REM_DOUBLES, PREF_FACE_AREA_WEIGHT, PREF_FACE_TRIANGULATE, PREF_DO_UV, PREF_DO_VCOL, PREF_DO_WEIGHTS, VGROUP_INF_REDUX, VGROUP_INF_WEIGHT)
	
	if PREF_OTHER_SEL_OBS:
		for ob in scn.objects.context:
			if ob.type == 'Mesh' and ob != act_ob:
				print 'reducing:', ob.name, ob.getData(1)
				BPyMesh.redux(ob, PREF_REDUX, PREF_BOUNDRY_WEIGHT, PREF_REM_DOUBLES, PREF_FACE_AREA_WEIGHT, PREF_FACE_TRIANGULATE, PREF_DO_UV, PREF_DO_VCOL, PREF_DO_WEIGHTS, VGROUP_INF_REDUX, VGROUP_INF_WEIGHT)
				Window.RedrawAll()
	
	if is_editmode: Window.EditMode(1)
	Window.WaitCursor(0)
	Window.RedrawAll()
	
	print 'Reduction done in %.6f sec.' % (sys.time()-t)
Пример #41
0
def write(filename):
    start = Blender.sys.time()
    if not filename.lower().endswith('.js'):
        filename += '.js'

    scn = Blender.Scene.GetCurrent()
    ob = scn.objects.active
    if not ob:
        Blender.Draw.PupMenu('Error%t|Select 1 active object')
        return

    file = open(filename, 'wb')

    mesh = BPyMesh.getMeshFromObject(ob, None, True, False, scn)
    if not mesh:
        Blender.Draw.PupMenu(
            'Error%t|Could not get mesh data from active object')
        return

    mesh.transform(ob.matrixWorld)

    #classname = clean(ob.name)
    classname = filename.split('/')[-1].replace('.js', '')

    file = open(filename, "wb")

    file.write('var %s = function () {\n\n' % classname)
    file.write('\tvar scope = this;\n\n')
    file.write('\tTHREE.Geometry.call(this);\n\n')

    for v in mesh.verts:
        file.write('\tv( %.6f, %.6f, %.6f );\n' %
                   (v.co.x, v.co.z, -v.co.y))  # co

    file.write('\n')

    for f in mesh.faces:
        if len(f.verts) == 3:
            file.write('\tf3( %d, %d, %d, %.6f, %.6f, %.6f );\n' %
                       (f.verts[0].index, f.verts[1].index, f.verts[2].index,
                        f.verts[0].no.x, f.verts[0].no.z, -f.verts[0].no.y))
        else:
            file.write('\tf4( %d, %d, %d, %d, %.6f, %.6f, %.6f );\n' %
                       (f.verts[0].index, f.verts[1].index, f.verts[2].index,
                        f.verts[3].index, f.verts[0].no.x, f.verts[0].no.z,
                        -f.verts[0].no.y))
    face_index_pairs = [(face, index) for index, face in enumerate(mesh.faces)]

    file.write('\n')
    '''
    for f in me.faces:
		if me.faceUV:
		    if len(f.verts) == 3:
		        file.write('\tuv( %.6f, %.6f, %.6f, %.6f, %.6f, %.6f );\n' % (f.uv[0][0], 1.0-f.uv[0][1], f.uv[1][0], 1.0-f.uv[1][1], f.uv[2][0], 1.0-f.uv[2][1])
	'''
    for f in mesh.faces:
        if mesh.faceUV:
            if len(f.verts) == 3:
                file.write('\tuv( %.6f, %.6f, %.6f, %.6f, %.6f, %.6f );\n' %
                           (f.uv[0].x, 1.0 - f.uv[0].y, f.uv[1].x,
                            1.0 - f.uv[1].y, f.uv[2].x, 1.0 - f.uv[2].y))
            else:
                file.write(
                    '\tuv( %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f);\n'
                    % (f.uv[0].x, 1.0 - f.uv[0].y, f.uv[1].x, 1.0 - f.uv[1].y,
                       f.uv[2].x, 1.0 - f.uv[2].y, f.uv[3].x, 1.0 - f.uv[3].y))

    file.write('\n')
    file.write('\tfunction v( x, y, z ) {\n\n')
    file.write(
        '\t\tscope.vertices.push( new THREE.Vertex( new THREE.Vector3( x, y, z ) ) );\n\n'
    )
    file.write('\t}\n\n')
    file.write('\tfunction f3( a, b, c, nx, ny, nz ) {\n\n')
    file.write(
        '\t\tscope.faces.push( new THREE.Face3( a, b, c, nx && ny && nz ? new THREE.Vector3( nx, ny, nz ) : null ) );\n\n'
    )
    file.write('\t}\n\n')
    file.write('\tfunction f4( a, b, c, d, nx, ny, nz ) {\n\n')
    file.write(
        '\t\tscope.faces.push( new THREE.Face4( a, b, c, d, nx && ny && nz ? new THREE.Vector3( nx, ny, nz ) : null ) );\n\n'
    )
    file.write('\t}\n\n')
    file.write('\tfunction uv( u1, v1, u2, v2, u3, v3, u4, v4 ) {\n\n')
    file.write('\t\tvar uv = [];\n')
    file.write('\t\tuv.push( new THREE.UV( u1, v1 ) );\n')
    file.write('\t\tuv.push( new THREE.UV( u2, v2 ) );\n')
    file.write('\t\tuv.push( new THREE.UV( u3, v3 ) );\n')
    file.write('\t\tif ( u4 && v4 ) uv.push( new THREE.UV( u4, v4 ) );\n')
    file.write('\t\tscope.uvs.push( uv );\n')
    file.write('\t}\n\n')
    file.write('}\n\n')
    file.write('%s.prototype = new THREE.Geometry();\n' % classname)
    file.write('%s.prototype.constructor = %s;' % (classname, classname))
    file.close()
def redux(ob,
          REDUX=0.5,
          BOUNDRY_WEIGHT=2.0,
          REMOVE_DOUBLES=False,
          FACE_AREA_WEIGHT=1.0,
          FACE_TRIANGULATE=True,
          DO_UV=True,
          DO_VCOL=True,
          DO_WEIGHTS=True,
          VGROUP_INF_REDUX=None,
          VGROUP_INF_WEIGHT=0.5):
    """
	BOUNDRY_WEIGHT - 0 is no boundry weighting. 2.0 will make them twice as unlikely to collapse.
	FACE_AREA_WEIGHT - 0 is no weight. 1 is normal, 2.0 is higher.
	"""

    if REDUX < 0 or REDUX > 1.0:
        raise 'Error, factor must be between 0 and 1.0'
    elif not set:
        raise 'Error, this function requires Python 2.4 or a full install of Python 2.3'

    BOUNDRY_WEIGHT = 1 + BOUNDRY_WEIGHT
    """ # DEBUG!
	if Blender.Get('rt') == 1000:
		DEBUG=True
	else:
		DEBUG= False
	"""

    me = ob.getData(mesh=1)
    me.hide = False  # unhide all data,.
    if len(me.faces) < 5:
        return

    if FACE_TRIANGULATE or REMOVE_DOUBLES:
        me.sel = True

    if FACE_TRIANGULATE:
        me.quadToTriangle()

    if REMOVE_DOUBLES:
        #me.remDoubles(0.0001)
        print "ERROR: No RemDoubles in background mode!!"

    vgroups = me.getVertGroupNames()

    if not me.getVertGroupNames():
        DO_WEIGHTS = False

    if (VGROUP_INF_REDUX!= None and VGROUP_INF_REDUX not in vgroups) or\
    VGROUP_INF_WEIGHT==0.0:
        VGROUP_INF_REDUX = None

    try:
        VGROUP_INF_REDUX_INDEX = vgroups.index(VGROUP_INF_REDUX)
    except:
        VGROUP_INF_REDUX_INDEX = -1

    # del vgroups
    len_vgroups = len(vgroups)

    OLD_MESH_MODE = Blender.Mesh.Mode()
    Blender.Mesh.Mode(Blender.Mesh.SelectModes.VERTEX)

    if DO_UV and not me.faceUV:
        DO_UV = False

    if DO_VCOL and not me.vertexColors:
        DO_VCOL = False

    current_face_count = len(me.faces)
    target_face_count = int(current_face_count * REDUX)
    # % of the collapseable faces to collapse per pass.
    #collapse_per_pass= 0.333 # between 0.1 - lots of small nibbles, slow but high q. and 0.9 - big passes and faster.
    collapse_per_pass = 0.333  # between 0.1 - lots of small nibbles, slow but high q. and 0.9 - big passes and faster.
    """# DEBUG!
	if DEBUG:
		COUNT= [0]
		def rd():
			if COUNT[0]< 330:
				COUNT[0]+=1
				return
			me.update()
			Blender.Window.RedrawAll()
			print 'Press key for next, count "%s"' % COUNT[0]
			try: input()
			except KeyboardInterrupt:
				raise "Error"
			except:
				pass
				
			COUNT[0]+=1
	"""
    class collapseEdge(object):
        __slots__ = 'length', 'key', 'faces', 'collapse_loc', 'v1', 'v2', 'uv1', 'uv2', 'col1', 'col2', 'collapse_weight'

        def __init__(self, ed):
            self.init_from_edge(
                ed)  # So we can re-use the classes without using more memory.

        def init_from_edge(self, ed):
            self.key = ed.key
            self.length = ed.length
            self.faces = []
            self.v1 = ed.v1
            self.v2 = ed.v2
            if DO_UV or DO_VCOL:
                self.uv1 = []
                self.uv2 = []
                self.col1 = []
                self.col2 = []

            # self.collapse_loc= None # new collapse location.
            # Basic weighting.
            #self.collapse_weight= self.length *  (1+ ((ed.v1.no-ed.v2.no).length**2))
            self.collapse_weight = 1.0

        def collapse_locations(self, w1, w2):
            '''
			Generate a smart location for this edge to collapse to
			w1 and w2 are vertex location bias
			'''

            v1co = self.v1.co
            v2co = self.v2.co
            v1no = self.v1.no
            v2no = self.v2.no

            # Basic operation, works fine but not as good as predicting the best place.
            #between= ((v1co*w1) + (v2co*w2))
            #self.collapse_loc= between

            # normalize the weights of each vert - se we can use them as scalers.
            wscale = w1 + w2
            if not wscale:  # no scale?
                w1 = w2 = 0.5
            else:
                w1 /= wscale
                w2 /= wscale

            length = self.length
            between = MidpointVecs(v1co, v2co)

            # Collapse
            # new_location = between # Replace tricky code below. this code predicts the best collapse location.

            # Make lines at right angles to the normals- these 2 lines will intersect and be
            # the point of collapsing.

            # Enlarge so we know they intersect:  self.length*2
            cv1 = v1no.cross(v1no.cross(v1co - v2co))
            cv2 = v2no.cross(v2no.cross(v2co - v1co))

            # Scale to be less then the edge lengths.
            cv2.length = cv1.length = 1

            cv1 = cv1 * (length * 0.4)
            cv2 = cv2 * (length * 0.4)

            smart_offset_loc = between + (cv1 + cv2)

            # Now we need to blend between smart_offset_loc and w1/w2
            # you see were blending between a vert and the edges midpoint, so we cant use a normal weighted blend.
            if w1 > 0.5:  # between v1 and smart_offset_loc
                #self.collapse_loc= v1co*(w2+0.5) + smart_offset_loc*(w1-0.5)
                w2 *= 2
                w1 = 1 - w2
                new_loc_smart = v1co * w1 + smart_offset_loc * w2
            else:  # w between v2 and smart_offset_loc
                w1 *= 2
                w2 = 1 - w1
                new_loc_smart = v2co * w2 + smart_offset_loc * w1

            if new_loc_smart.x != new_loc_smart.x:  # NAN LOCATION, revert to between
                new_loc_smart = None

            return new_loc_smart, between, v1co * 0.99999 + v2co * 0.00001, v1co * 0.00001 + v2co * 0.99999

    class collapseFace(object):
        __slots__ = 'verts', 'normal', 'area', 'index', 'orig_uv', 'orig_col', 'uv', 'col'  # , 'collapse_edge_count'

        def __init__(self, f):
            self.init_from_face(f)

        def init_from_face(self, f):
            self.verts = f.v
            self.normal = f.no
            self.area = f.area
            self.index = f.index
            if DO_UV:
                self.orig_uv = [uv_key(uv) for uv in f.uv]
                self.uv = f.uv
            if DO_VCOL:
                self.orig_col = [col_key(col) for col in f.col]
                self.col = f.col

    collapse_edges = collapse_faces = None

    # So meshCalcNormals can avoid making a new list all the time.
    reuse_vertNormals = [Vector() for v in xrange(len(me.verts))]

    while target_face_count <= len(me.faces):
        BPyMesh.meshCalcNormals(me, reuse_vertNormals)

        if DO_WEIGHTS:
            #groupNames, vWeightDict= BPyMesh.meshWeight2Dict(me)
            groupNames, vWeightList = BPyMesh.meshWeight2List(me)

        # THIS CRASHES? Not anymore.
        verts = list(me.verts)
        edges = list(me.edges)
        faces = list(me.faces)

        # THIS WORKS
        #verts= me.verts
        #edges= me.edges
        #faces= me.faces

        # if DEBUG: DOUBLE_CHECK= [0]*len(verts)
        me.sel = False

        if not collapse_faces:  # Initialize the list.
            collapse_faces = [collapseFace(f) for f in faces]
            collapse_edges = [collapseEdge(ed) for ed in edges]
        else:
            for i, ed in enumerate(edges):
                collapse_edges[i].init_from_edge(ed)

            # Strip the unneeded end off the list
            collapse_edges[i + 1:] = []

            for i, f in enumerate(faces):
                collapse_faces[i].init_from_face(f)

            # Strip the unneeded end off the list
            collapse_faces[i + 1:] = []

        collapse_edges_dict = dict([(ced.key, ced) for ced in collapse_edges])

        # Store verts edges.
        vert_ed_users = [[] for i in xrange(len(verts))]
        for ced in collapse_edges:
            vert_ed_users[ced.key[0]].append(ced)
            vert_ed_users[ced.key[1]].append(ced)

        # Store face users
        vert_face_users = [[] for i in xrange(len(verts))]

        # Have decieded not to use this. area is better.
        #face_perim= [0.0]* len(me.faces)

        for ii, cfa in enumerate(collapse_faces):
            for i, v1 in enumerate(cfa.verts):
                vert_face_users[v1.index].append((i, cfa))

                # add the uv coord to the vert
                v2 = cfa.verts[i - 1]
                i1 = v1.index
                i2 = v2.index

                if i1 > i2: ced = collapse_edges_dict[i2, i1]
                else: ced = collapse_edges_dict[i1, i2]

                ced.faces.append(cfa)
                if DO_UV or DO_VCOL:
                    # if the edge is flipped from its order in the face then we need to flip the order indicies.
                    if cfa.verts[i] == ced.v1: i1, i2 = i, i - 1
                    else: i1, i2 = i - 1, i

                    if DO_UV:
                        ced.uv1.append(cfa.orig_uv[i1])
                        ced.uv2.append(cfa.orig_uv[i2])

                    if DO_VCOL:
                        ced.col1.append(cfa.orig_col[i1])
                        ced.col2.append(cfa.orig_col[i2])

                # PERIMITER
                #face_perim[ii]+= ced.length

        # How weight the verts by the area of their faces * the normal difference.
        # when the edge collapses, to vert weights are taken into account

        vert_weights = [0.5] * len(verts)

        for ii, vert_faces in enumerate(vert_face_users):
            for f in vert_faces:
                try:
                    no_ang = (Ang(verts[ii].no, f[1].normal) / 180) * f[1].area
                except:
                    no_ang = 1.0

                vert_weights[ii] += no_ang

        # Use a vertex group as a weighting.
        if VGROUP_INF_REDUX != None:

            # Get Weights from a vgroup.
            """
			vert_weights_map= [1.0] * len(verts)
			for i, wd in enumerate(vWeightDict):
				try:	vert_weights_map[i]= 1+(wd[VGROUP_INF_REDUX] * VGROUP_INF_WEIGHT)
				except:	pass
			"""
            vert_weights_map = [
                1 + (wl[VGROUP_INF_REDUX_INDEX] * VGROUP_INF_WEIGHT)
                for wl in vWeightList
            ]

        # BOUNDRY CHECKING AND WEIGHT EDGES. CAN REMOVE
        # Now we know how many faces link to an edge. lets get all the boundry verts
        if BOUNDRY_WEIGHT > 0:
            verts_boundry = [1] * len(verts)
            #for ed_idxs, faces_and_uvs in edge_faces_and_uvs.iteritems():
            for ced in collapse_edges:
                if len(ced.faces) < 2:
                    for key in ced.key:  # only ever 2 key indicies.
                        verts_boundry[key] = 2

            for ced in collapse_edges:
                b1 = verts_boundry[ced.key[0]]
                b2 = verts_boundry[ced.key[1]]
                if b1 != b2:
                    # Edge has 1 boundry and 1 non boundry vert. weight higher
                    ced.collapse_weight = BOUNDRY_WEIGHT
                #elif b1==b2==2: # if both are on a seam then weigh half as bad.
                #	ced.collapse_weight= ((BOUNDRY_WEIGHT-1)/2) +1
            # weight the verts by their boundry status
            del b1
            del b2

            for ii, boundry in enumerate(verts_boundry):
                if boundry == 2:
                    vert_weights[ii] *= BOUNDRY_WEIGHT

            vert_collapsed = verts_boundry
            del verts_boundry
        else:
            vert_collapsed = [1] * len(verts)

        # Best method, no quick hacks here, Correction. Should be the best but needs tweaks.
        def ed_set_collapse_error(ced):
            # Use the vertex weights to bias the new location.
            new_locs = ced.collapse_locations(vert_weights[ced.key[0]],
                                              vert_weights[ced.key[1]])

            # Find the connecting faces of the 2 verts.
            i1, i2 = ced.key
            test_faces = set()
            for i in (i1, i2):  # faster then LC's
                for f in vert_face_users[i]:
                    test_faces.add(f[1].index)
            for f in ced.faces:
                test_faces.remove(f.index)

            v1_orig = Vector(ced.v1.co)
            v2_orig = Vector(ced.v2.co)

            def test_loc(new_loc):
                '''
				Takes a location and tests the error without changing anything
				'''
                new_weight = ced.collapse_weight
                ced.v1.co = ced.v2.co = new_loc

                new_nos = [faces[i].no for i in test_faces]

                # So we can compare the befire and after normals
                ced.v1.co = v1_orig
                ced.v2.co = v2_orig

                # now see how bad the normals are effected
                angle_diff = 1.0

                for ii, i in enumerate(
                        test_faces):  # local face index, global face index
                    cfa = collapse_faces[i]  # this collapse face
                    try:
                        # can use perim, but area looks better.
                        if FACE_AREA_WEIGHT:
                            # Psudo code for wrighting
                            # angle_diff= The before and after angle difference between the collapsed and un-collapsed face.
                            # ... devide by 180 so the value will be between 0 and 1.0
                            # ... add 1 so we can use it as a multiplyer and not make the area have no eefect (below)
                            # area_weight= The faces original area * the area weight
                            # ... add 1.0 so a small area face dosent make the angle_diff have no effect.
                            #
                            # Now multiply - (angle_diff * area_weight)
                            # ... The weight will be a minimum of 1.0 - we need to subtract this so more faces done give the collapse an uneven weighting.

                            angle_diff += (
                                (1 + (Ang(cfa.normal, new_nos[ii]) / 180)) *
                                (1 + (cfa.area * FACE_AREA_WEIGHT))
                            ) - 1  # 4 is how much to influence area
                        else:
                            angle_diff += (Ang(cfa.normal), new_nos[ii]) / 180

                    except:
                        pass

                # This is very arbirary, feel free to modify
                try:
                    no_ang = (Ang(ced.v1.no, ced.v2.no) / 180) + 1
                except:
                    no_ang = 2.0

                # do *= because we face the boundry weight to initialize the weight. 1.0 default.
                new_weight *= ((no_ang * ced.length) * (1 - (1 / angle_diff))
                               )  # / max(len(test_faces), 1)
                return new_weight

            # End testloc

            # Test the collapse locatons
            collapse_loc_best = None
            collapse_weight_best = 1000000000
            ii = 0
            for collapse_loc in new_locs:
                if collapse_loc:  # will only ever fail if smart loc is NAN
                    test_weight = test_loc(collapse_loc)
                    if test_weight < collapse_weight_best:
                        iii = ii
                        collapse_weight_best = test_weight
                        collapse_loc_best = collapse_loc
                    ii += 1

            ced.collapse_loc = collapse_loc_best
            ced.collapse_weight = collapse_weight_best

            # are we using a weight map
            if VGROUP_INF_REDUX:
                v = vert_weights_map[i1] + vert_weights_map[i2]
                ced.collapse_weight *= v

        # End collapse Error

        # We can calculate the weights on __init__ but this is higher qualuity.
        for ced in collapse_edges:
            if ced.faces:  # dont collapse faceless edges.
                ed_set_collapse_error(ced)

        # Wont use the function again.
        del ed_set_collapse_error
        # END BOUNDRY. Can remove

        # sort by collapse weight
        try:
            collapse_edges.sort(key=lambda ced: ced.collapse_weight
                                )  # edges will be used for sorting
        except:
            collapse_edges.sort(lambda ced1, ced2: cmp(ced1.collapse_weight,
                                                       ced2.collapse_weight)
                                )  # edges will be used for sorting

        vert_collapsed = [0] * len(verts)

        collapse_edges_to_collapse = []

        # Make a list of the first half edges we can collapse,
        # these will better edges to remove.
        collapse_count = 0
        for ced in collapse_edges:
            if ced.faces:
                i1, i2 = ced.key
                # Use vert selections
                if vert_collapsed[i1] or vert_collapsed[i2]:
                    pass
                else:
                    # Now we know the verts havnyt been collapsed.
                    vert_collapsed[i2] = vert_collapsed[
                        i1] = 1  # Dont collapse again.
                    collapse_count += 1
                    collapse_edges_to_collapse.append(ced)

        # Get a subset of the entire list- the first "collapse_per_pass", that are best to collapse.
        if collapse_count > 4:
            collapse_count = int(collapse_count * collapse_per_pass)
        else:
            collapse_count = len(collapse_edges)
        # We know edge_container_list_collapse can be removed.
        for ced in collapse_edges_to_collapse:
            """# DEBUG!
			if DEBUG:
				if DOUBLE_CHECK[ced.v1.index] or\
				DOUBLE_CHECK[ced.v2.index]:
					raise 'Error'
				else:
					DOUBLE_CHECK[ced.v1.index]=1
					DOUBLE_CHECK[ced.v2.index]=1
				
				tmp= (ced.v1.co+ced.v2.co)*0.5
				Blender.Window.SetCursorPos(tmp.x, tmp.y, tmp.z)
				Blender.Window.RedrawAll()
			"""

            # Chech if we have collapsed our quota.
            collapse_count -= 1
            if not collapse_count:
                break

            current_face_count -= len(ced.faces)

            # Find and assign the real weights based on collapse loc.

            # Find the weights from the collapse error
            if DO_WEIGHTS or DO_UV or DO_VCOL:
                i1, i2 = ced.key
                # Dont use these weights since they may not have been used to make the collapse loc.
                #w1= vert_weights[i1]
                #w2= vert_weights[i2]
                w1 = (ced.v2.co - ced.collapse_loc).length
                w2 = (ced.v1.co - ced.collapse_loc).length

                # Normalize weights
                wscale = w1 + w2
                if not wscale:  # no scale?
                    w1 = w2 = 0.5
                else:
                    w1 /= wscale
                    w2 /= wscale

                # Interpolate the bone weights.
                if DO_WEIGHTS:

                    # add verts vgroups to eachother
                    wl1 = vWeightList[i1]  # v1 weight dict
                    wl2 = vWeightList[i2]  # v2 weight dict
                    for group_index in xrange(len_vgroups):
                        wl1[group_index] = wl2[group_index] = (
                            wl1[group_index] * w1) + (wl2[group_index] * w2)
                # Done finding weights.

                if DO_UV or DO_VCOL:
                    # Handel UV's and vert Colors!
                    for v, my_weight, other_weight, edge_my_uvs, edge_other_uvs, edge_my_cols, edge_other_cols in (\
                    (ced.v1, w1, w2, ced.uv1, ced.uv2, ced.col1, ced.col2),\
                    (ced.v2, w2, w1, ced.uv2, ced.uv1, ced.col2, ced.col1)\
                    ):
                        uvs_mixed = [
                            uv_key_mix(edge_my_uvs[iii], edge_other_uvs[iii],
                                       my_weight, other_weight)
                            for iii in xrange(len(edge_my_uvs))
                        ]
                        cols_mixed = [
                            col_key_mix(edge_my_cols[iii],
                                        edge_other_cols[iii], my_weight,
                                        other_weight)
                            for iii in xrange(len(edge_my_cols))
                        ]

                        for face_vert_index, cfa in vert_face_users[v.index]:
                            if len(
                                    cfa.verts
                            ) == 3 and cfa not in ced.faces:  # if the face is apart of this edge then dont bother finding the uvs since the face will be removed anyway.

                                if DO_UV:
                                    # UV COORDS
                                    uvk = cfa.orig_uv[face_vert_index]
                                    try:
                                        tex_index = edge_my_uvs.index(uvk)
                                    except:
                                        tex_index = None
                                        """ # DEBUG!
										if DEBUG:
											print 'not found', uvk, 'in', edge_my_uvs, 'ed index', ii, '\nwhat about', edge_other_uvs
										"""
                                    if tex_index != None:  # This face uses a uv in the collapsing face. - do a merge
                                        other_uv = edge_other_uvs[tex_index]
                                        uv_vec = cfa.uv[face_vert_index]
                                        uv_vec.x, uv_vec.y = uvs_mixed[
                                            tex_index]

                                # TEXFACE COLORS
                                if DO_VCOL:
                                    colk = cfa.orig_col[face_vert_index]
                                    try:
                                        tex_index = edge_my_cols.index(colk)
                                    except:
                                        pass
                                    if tex_index != None:
                                        other_col = edge_other_cols[tex_index]
                                        col_ob = cfa.col[face_vert_index]
                                        col_ob.r, col_ob.g, col_ob.b = cols_mixed[
                                            tex_index]

                                # DEBUG! if DEBUG: rd()

            # Execute the collapse
            ced.v1.sel = ced.v2.sel = True  # Select so remove doubles removed the edges and faces that use it
            ced.v1.co = ced.v2.co = ced.collapse_loc

            # DEBUG! if DEBUG: rd()
            if current_face_count <= target_face_count:
                break

        # Copy weights back to the mesh before we remove doubles.
        if DO_WEIGHTS:
            #BPyMesh.dict2MeshWeight(me, groupNames, vWeightDict)
            BPyMesh.list2MeshWeight(me, groupNames, vWeightList)

        #doubles= me.remDoubles(0.0001)
        doubles = 0
        current_face_count = len(me.faces)

        if current_face_count <= target_face_count or not doubles:  # not doubles shoule never happen.
            break

    me.update()
    Blender.Mesh.Mode(OLD_MESH_MODE)
Пример #43
0
def file_callback(filename):

    if not filename.lower().endswith('.ply'):
        filename += '.ply'

    scn = bpy.data.scenes.active
    ob = scn.objects.active
    if not ob:
        Blender.Draw.PupMenu('Error%t|Select 1 active object')
        return

    file = open(filename, 'wb')

    EXPORT_APPLY_MODIFIERS = Draw.Create(1)
    EXPORT_NORMALS = Draw.Create(1)
    EXPORT_UV = Draw.Create(1)
    EXPORT_COLORS = Draw.Create(1)
    #EXPORT_EDGES = Draw.Create(0)

    pup_block = [\
    ('Apply Modifiers', EXPORT_APPLY_MODIFIERS, 'Use transformed mesh data.'),\
    ('Normals', EXPORT_NORMALS, 'Export vertex normal data.'),\
    ('UVs', EXPORT_UV, 'Export texface UV coords.'),\
    ('Colors', EXPORT_COLORS, 'Export vertex Colors.'),\
	#('Edges', EXPORT_EDGES, 'Edges not connected to faces.'),\
    ]

    if not Draw.PupBlock('Export...', pup_block):
        return

    is_editmode = Blender.Window.EditMode()
    if is_editmode:
        Blender.Window.EditMode(0, '', 0)

    Window.WaitCursor(1)

    EXPORT_APPLY_MODIFIERS = EXPORT_APPLY_MODIFIERS.val
    EXPORT_NORMALS = EXPORT_NORMALS.val
    EXPORT_UV = EXPORT_UV.val
    EXPORT_COLORS = EXPORT_COLORS.val
    #EXPORT_EDGES = EXPORT_EDGES.val

    mesh = BPyMesh.getMeshFromObject(ob, None, EXPORT_APPLY_MODIFIERS, False,
                                     scn)

    if not mesh:
        Blender.Draw.PupMenu(
            'Error%t|Could not get mesh data from active object')
        return

    mesh.transform(ob.matrixWorld)

    faceUV = mesh.faceUV
    vertexUV = mesh.vertexUV
    vertexColors = mesh.vertexColors

    if (not faceUV) and (not vertexUV): EXPORT_UV = False
    if not vertexColors: EXPORT_COLORS = False

    if not EXPORT_UV: faceUV = vertexUV = False
    if not EXPORT_COLORS: vertexColors = False

    # incase
    color = uvcoord = uvcoord_key = normal = normal_key = None

    verts = []  # list of dictionaries
    # vdict = {} # (index, normal, uv) -> new index
    vdict = [{} for i in xrange(len(mesh.verts))]
    vert_count = 0
    for i, f in enumerate(mesh.faces):
        smooth = f.smooth
        if not smooth:
            normal = tuple(f.no)
            normal_key = rvec3d(normal)

        if faceUV: uv = f.uv
        if vertexColors: col = f.col
        for j, v in enumerate(f):
            if smooth:
                normal = tuple(v.no)
                normal_key = rvec3d(normal)

            if faceUV:
                uvcoord = uv[j][0], 1.0 - uv[j][1]
                uvcoord_key = rvec2d(uvcoord)
            elif vertexUV:
                uvcoord = v.uvco[0], 1.0 - v.uvco[1]
                uvcoord_key = rvec2d(uvcoord)

            if vertexColors: color = col[j].r, col[j].g, col[j].b

            key = normal_key, uvcoord_key, color

            vdict_local = vdict[v.index]

            if (not vdict_local) or (not vdict_local.has_key(key)):
                vdict_local[key] = vert_count
                verts.append((tuple(v.co), normal, uvcoord, color))
                vert_count += 1

    file.write('ply\n')
    file.write('format ascii 1.0\n')
    file.write(
        'comment Created by Blender3D %s - www.blender.org, source file: %s\n'
        % (Blender.Get('version'),
           Blender.Get('filename').split('/')[-1].split('\\')[-1]))

    file.write('element vertex %d\n' % len(verts))

    file.write('property float x\n')
    file.write('property float y\n')
    file.write('property float z\n')
    if EXPORT_NORMALS:
        file.write('property float nx\n')
        file.write('property float ny\n')
        file.write('property float nz\n')

    if EXPORT_UV:
        file.write('property float s\n')
        file.write('property float t\n')
    if EXPORT_COLORS:
        file.write('property uchar red\n')
        file.write('property uchar green\n')
        file.write('property uchar blue\n')

    file.write('element face %d\n' % len(mesh.faces))
    file.write('property list uchar uint vertex_indices\n')
    file.write('end_header\n')

    for i, v in enumerate(verts):
        file.write('%.6f %.6f %.6f ' % v[0])  # co
        if EXPORT_NORMALS:
            file.write('%.6f %.6f %.6f ' % v[1])  # no

        if EXPORT_UV:
            file.write('%.6f %.6f ' % v[2])  # uv
        if EXPORT_COLORS:
            file.write('%u %u %u' % v[3])  # col
        file.write('\n')

    for (i, f) in enumerate(mesh.faces):
        file.write('%d ' % len(f))
        smooth = f.smooth
        if not smooth: no = rvec3d(f.no)

        if faceUV: uv = f.uv
        if vertexColors: col = f.col
        for j, v in enumerate(f):
            if f.smooth: normal = rvec3d(v.no)
            else: normal = no
            if faceUV: uvcoord = rvec2d((uv[j][0], 1.0 - uv[j][1]))
            elif vertexUV: uvcoord = rvec2d((v.uvco[0], 1.0 - v.uvco[1]))
            if vertexColors: color = col[j].r, col[j].g, col[j].b

            file.write('%d ' % vdict[v.index][normal, uvcoord, color])

        file.write('\n')
    file.close()

    if is_editmode:
        Blender.Window.EditMode(1, '', 0)
Пример #44
0
def main():
    scn = Scene.GetCurrent()
    act_ob = scn.objects.active
    if not act_ob or act_ob.type != 'Mesh':
        BPyMessages.Error_NoMeshActive()
        return

    act_me = act_ob.getData(mesh=1)

    if act_me.multires:
        BPyMessages.Error_NoMeshMultiresEdit()
        return

    act_group = act_me.activeGroup
    if not act_group: act_group = ''

    # Defaults
    PREF_REDUX = Draw.Create(0.5)
    PREF_BOUNDRY_WEIGHT = Draw.Create(5.0)
    PREF_REM_DOUBLES = Draw.Create(1)
    PREF_FACE_AREA_WEIGHT = Draw.Create(1.0)
    PREF_FACE_TRIANGULATE = Draw.Create(1)

    VGROUP_INF_ENABLE = Draw.Create(0)
    VGROUP_INF_REDUX = Draw.Create(act_group)
    VGROUP_INF_WEIGHT = Draw.Create(10.0)

    PREF_DO_UV = Draw.Create(1)
    PREF_DO_VCOL = Draw.Create(1)
    PREF_DO_WEIGHTS = Draw.Create(1)
    PREF_OTHER_SEL_OBS = Draw.Create(0)

    pup_block = [\
    ('Poly Reduce:', PREF_REDUX, 0.05, 0.95, 'Scale the meshes poly count by this value.'),\
    ('Boundry Weight:', PREF_BOUNDRY_WEIGHT, 0.0, 20.0, 'Weight boundry verts by this scale, 0.0 for no boundry weighting.'),\
    ('Area Weight:', PREF_FACE_AREA_WEIGHT, 0.0, 20.0, 'Collapse edges effecting lower area faces first.'),\
    ('Triangulate', PREF_FACE_TRIANGULATE, 'Convert quads to tris before reduction, for more choices of edges to collapse.'),\
    '',\
    ('VGroup Weighting', VGROUP_INF_ENABLE, 'Use a vertex group to influence the reduction, higher weights for higher quality '),\
    ('vgroup name: ', VGROUP_INF_REDUX, 0, 32, 'The name of the vertex group to use for the weight map'),\
    ('vgroup mult: ', VGROUP_INF_WEIGHT, 0.0, 100.0, 'How much to make the weight effect the reduction'),\
    ('Other Selected Obs', PREF_OTHER_SEL_OBS, 'reduce other selected objects.'),\
    '',\
    '',\
    '',\
    ('UV Coords', PREF_DO_UV, 'Interpolate UV Coords.'),\
    ('Vert Colors', PREF_DO_VCOL, 'Interpolate Vertex Colors'),\
    ('Vert Weights', PREF_DO_WEIGHTS, 'Interpolate Vertex Weights'),\
    ('Remove Doubles', PREF_REM_DOUBLES, 'Remove doubles before reducing to avoid boundry tearing.'),\
    ]

    if not Draw.PupBlock("Poly Reducer", pup_block):
        return

    PREF_REDUX = PREF_REDUX.val
    PREF_BOUNDRY_WEIGHT = PREF_BOUNDRY_WEIGHT.val
    PREF_REM_DOUBLES = PREF_REM_DOUBLES.val
    PREF_FACE_AREA_WEIGHT = PREF_FACE_AREA_WEIGHT.val
    PREF_FACE_TRIANGULATE = PREF_FACE_TRIANGULATE.val

    VGROUP_INF_ENABLE = VGROUP_INF_ENABLE.val
    VGROUP_INF_WEIGHT = VGROUP_INF_WEIGHT.val

    if VGROUP_INF_ENABLE and VGROUP_INF_WEIGHT:
        VGROUP_INF_REDUX = VGROUP_INF_REDUX.val
    else:
        VGROUP_INF_WEIGHT = 0.0
        VGROUP_INF_REDUX = None

    PREF_DO_UV = PREF_DO_UV.val
    PREF_DO_VCOL = PREF_DO_VCOL.val
    PREF_DO_WEIGHTS = PREF_DO_WEIGHTS.val
    PREF_OTHER_SEL_OBS = PREF_OTHER_SEL_OBS.val

    t = sys.time()

    is_editmode = Window.EditMode()  # Exit Editmode.
    if is_editmode: Window.EditMode(0)
    Window.WaitCursor(1)
    print 'reducing:', act_ob.name, act_ob.getData(1)
    BPyMesh.redux(act_ob, PREF_REDUX, PREF_BOUNDRY_WEIGHT, PREF_REM_DOUBLES,
                  PREF_FACE_AREA_WEIGHT, PREF_FACE_TRIANGULATE, PREF_DO_UV,
                  PREF_DO_VCOL, PREF_DO_WEIGHTS, VGROUP_INF_REDUX,
                  VGROUP_INF_WEIGHT)

    if PREF_OTHER_SEL_OBS:
        for ob in scn.objects.context:
            if ob.type == 'Mesh' and ob != act_ob:
                print 'reducing:', ob.name, ob.getData(1)
                BPyMesh.redux(ob, PREF_REDUX, PREF_BOUNDRY_WEIGHT,
                              PREF_REM_DOUBLES, PREF_FACE_AREA_WEIGHT,
                              PREF_FACE_TRIANGULATE, PREF_DO_UV, PREF_DO_VCOL,
                              PREF_DO_WEIGHTS, VGROUP_INF_REDUX,
                              VGROUP_INF_WEIGHT)
                Window.RedrawAll()

    if is_editmode: Window.EditMode(1)
    Window.WaitCursor(0)
    Window.RedrawAll()

    print 'Reduction done in %.6f sec.' % (sys.time() - t)
Пример #45
0
def vertexFakeAO(me, PREF_BLUR_ITERATIONS, PREF_BLUR_STRENGTH, PREF_CLAMP_CONCAVE, PREF_CLAMP_CONVEX, PREF_SHADOW_ONLY, PREF_SEL_ONLY):
	Window.WaitCursor(1)
	Ang= Mathutils.AngleBetweenVecs
	
	BPyMesh.meshCalcNormals(me)

	vert_tone= [0.0] * len(me.verts)
	vert_tone_count= [0] * len(me.verts)

	min_tone=0
	max_tone=0

	for i, f in enumerate(me.faces):
		fc= f.cent
		fno = f.no
		
		for v in f.v:
			vno=v.no # get a scaled down normal.
			
			dot= vno.dot(v.co) - vno.dot(fc)
			vert_tone_count[v.index]+=1
			try:
				a= Ang(vno, fno)
			except:
				continue
			
			# Convex
			if dot>0:
				a= min(PREF_CLAMP_CONVEX, a)
				if not PREF_SHADOW_ONLY:
					vert_tone[v.index] += a
			else:
				a= min(PREF_CLAMP_CONCAVE, a)
				vert_tone[v.index] -= a
	
	# average vert_tone_list into vert_tonef
	for i, tones in enumerate(vert_tone):
		if vert_tone_count[i]:
			vert_tone[i] = vert_tone[i] / vert_tone_count[i]

	
	# Below we use edges to blur along so the edges need counting, not the faces
	vert_tone_count=	[0] *	len(me.verts)
	for ed in me.edges:
		vert_tone_count[ed.v1.index] += 1
		vert_tone_count[ed.v2.index] += 1


	# Blur tone
	blur		= PREF_BLUR_STRENGTH
	blur_inv	= 1.0 - PREF_BLUR_STRENGTH
	
	for i in xrange(PREF_BLUR_ITERATIONS):
		
		# backup the original tones
		orig_vert_tone= list(vert_tone)
		
		for ed in me.edges:
			
			i1= ed.v1.index
			i2= ed.v2.index
		
			val1= (orig_vert_tone[i2]*blur) +  (orig_vert_tone[i1]*blur_inv)
			val2= (orig_vert_tone[i1]*blur) +  (orig_vert_tone[i2]*blur_inv)
			
			# Apply the ton divided by the number of faces connected
			vert_tone[i1]+= val1 / max(vert_tone_count[i1], 1)
			vert_tone[i2]+= val2 / max(vert_tone_count[i2], 1)
	

	min_tone= min(vert_tone)
	max_tone= max(vert_tone)
	
	#print min_tone, max_tone
	
	tone_range= max_tone-min_tone
	if max_tone==min_tone:
		return
	
	for f in me.faces:
		if not PREF_SEL_ONLY or f.sel:
			f_col= f.col
			for i, v in enumerate(f):
				col= f_col[i]
				tone= vert_tone[v.index]
				tone= (tone-min_tone)/tone_range
				
				col.r= int(tone*col.r)
				col.g= int(tone*col.g)
				col.b= int(tone*col.b)
	
	Window.WaitCursor(0)
def main():

    scn = bpy.data.scenes.active
    ob = scn.objects.active
    if not ob or ob.type != 'Mesh':
        return

    is_editmode = Window.EditMode()
    if is_editmode:
        Window.EditMode(0)

    mousedown_wait()  # so the menu items clicking dosnt trigger the mouseclick

    Window.DrawProgressBar(0.0, '')
    Window.DrawProgressBar(0.1, '(1 of 3) Click on a face corner')

    # wait for a click
    mouse_buttons = Window.GetMouseButtons()
    while not mouse_buttons & LMB:
        sys.sleep(10)
        mouse_buttons = Window.GetMouseButtons()

        # Allow for RMB cancel
        if mouse_buttons & RMB:
            return

    while mouse_buttons & LMB:
        sys.sleep(10)
        mouse_buttons = Window.GetMouseButtons()

    Window.DrawProgressBar(0.2, '(2 of 3 ) Click confirms the U coords')

    mousedown_wait()

    obmat = ob.matrixWorld
    screen_x, screen_y = Window.GetMouseCoords()
    mouseInView, OriginA, DirectionA = mouseViewRay(screen_x, screen_y, obmat)

    if not mouseInView or not OriginA:
        return

    me = ob.getData(mesh=1)

    # Get the face under the mouse
    face_click, isect, side = BPyMesh.pickMeshRayFace(me, OriginA, DirectionA)
    if not face_click:
        return

    proj_z_component = face_click.no
    if not face_click:
        return

    # Find the face vertex thats closest to the mouse,
    # this vert will be used as the corner to map from.
    best_v = None
    best_length = 10000000
    vi1 = None
    for i, v in enumerate(face_click.v):
        l = (v.co - isect).length
        if l < best_length:
            best_v = v
            best_length = l
            vi1 = i

    # now get the 2 edges in the face that connect to v
    # we can work it out fairly easerly
    if len(face_click) == 4:
        if vi1 == 0: vi2, vi3 = 3, 1
        elif vi1 == 1: vi2, vi3 = 0, 2
        elif vi1 == 2: vi2, vi3 = 1, 3
        elif vi1 == 3: vi2, vi3 = 2, 0
    else:
        if vi1 == 0: vi2, vi3 = 2, 1
        elif vi1 == 1: vi2, vi3 = 0, 2
        elif vi1 == 2: vi2, vi3 = 1, 0

    face_corner_main = face_click.v[vi1].co
    face_corner_a = face_click.v[vi2].co
    face_corner_b = face_click.v[vi3].co

    line_a_len = (face_corner_a - face_corner_main).length
    line_b_len = (face_corner_b - face_corner_main).length

    orig_cursor = Window.GetCursorPos()
    Window.SetCursorPos(face_corner_main.x, face_corner_main.y,
                        face_corner_main.z)

    SHIFT = Window.Qual.SHIFT
    MODE = 0  # firstclick, 1, secondclick
    mouse_buttons = Window.GetMouseButtons()

    project_mat = Matrix([0, 0, 0], [0, 0, 0], [0, 0, 0])

    def get_face_coords(f):
        f_uv = f.uv
        return [(v.co - face_corner_main, f_uv[i]) for i, v in enumerate(f.v)]

    if me.faceUV == False:
        me.faceUV = True

    coords = [(co, uv) for f in me.faces if f.sel
              for co, uv in get_face_coords(f)]

    coords_orig = [uv.copy() for co, uv in coords]
    USE_MODIFIER = using_modifier(ob)

    while 1:
        if mouse_buttons & LMB:
            if MODE == 0:
                mousedown_wait()
                Window.DrawProgressBar(
                    0.8, '(3 of 3 ) Click confirms the V coords')
                MODE = 1  # second click

                # Se we cont continually set the length and get float error
                proj_y_component_orig = proj_y_component.copy()
            else:
                break

        elif mouse_buttons & RMB:
            # Restore old uvs
            for i, uv_orig in enumerate(coords_orig):
                coords[i][1][:] = uv_orig
            break

        mouse_buttons = Window.GetMouseButtons()
        screen_x, screen_y = Window.GetMouseCoords()
        mouseInView, OriginA, DirectionA = mouseViewRay(
            screen_x, screen_y, obmat)

        if not mouseInView:
            continue

        # Do a ray tri intersection, not clipped by the tri
        new_isect = Intersect(face_corner_main, face_corner_a, face_corner_b,
                              DirectionA, OriginA, False)
        new_isect_alt = new_isect + DirectionA * 0.0001

        # The distance from the mouse cursor ray vector to the edge
        line_isect_a_pair = LineIntersect(new_isect, new_isect_alt,
                                          face_corner_main, face_corner_a)
        line_isect_b_pair = LineIntersect(new_isect, new_isect_alt,
                                          face_corner_main, face_corner_b)

        # SHIFT to flip the axis.
        is_shift = Window.GetKeyQualifiers() & SHIFT

        if MODE == 0:
            line_dist_a = (line_isect_a_pair[0] - line_isect_a_pair[1]).length
            line_dist_b = (line_isect_b_pair[0] - line_isect_b_pair[1]).length

            if line_dist_a < line_dist_b:
                proj_x_component = face_corner_a - face_corner_main
                y_axis_length = line_b_len
                x_axis_length = (line_isect_a_pair[1] -
                                 face_corner_main).length
            else:
                proj_x_component = face_corner_b - face_corner_main
                y_axis_length = line_a_len
                x_axis_length = (line_isect_b_pair[1] -
                                 face_corner_main).length

            proj_y_component = proj_x_component.cross(proj_z_component)

            proj_y_component.length = 1 / y_axis_length
            proj_x_component.length = 1 / x_axis_length

            if is_shift: proj_x_component.negate()

        else:
            proj_y_component[:] = proj_y_component_orig
            if line_dist_a < line_dist_b:
                proj_y_component.length = 1 / (line_isect_a_pair[1] -
                                               new_isect).length
            else:
                proj_y_component.length = 1 / (line_isect_b_pair[1] -
                                               new_isect).length

            if is_shift: proj_y_component.negate()

        # Use the existing matrix to make a new 3x3 projecton matrix
        project_mat[0][:] = -proj_y_component
        project_mat[1][:] = -proj_x_component
        project_mat[2][:] = proj_z_component

        # Apply the projection matrix
        for proj_co, uv in coords:
            uv[:] = (project_mat * proj_co)[0:2]

        if USE_MODIFIER:
            me.update()

        Window.Redraw(Window.Types.VIEW3D)

    Window.SetCursorPos(*orig_cursor)
    if is_editmode:
        Window.EditMode(1)

    Window.RedrawAll()
Пример #47
0
def create_mesh(scn, new_objects, has_ngons, CREATE_FGONS, CREATE_EDGES,
                verts_loc, verts_tex, faces, unique_materials,
                unique_material_images, unique_smooth_groups, vertex_groups,
                dataname):
    '''
	Takes all the data gathered and generates a mesh, adding the new object to new_objects
	deals with fgons, sharp edges and assigning materials
	'''
    if not has_ngons:
        CREATE_FGONS = False

    if unique_smooth_groups:
        sharp_edges = {}
        smooth_group_users = dict([
            (context_smooth_group, {})
            for context_smooth_group in unique_smooth_groups.iterkeys()
        ])
        context_smooth_group_old = -1

    # Split fgons into tri's
    fgon_edges = {}  # Used for storing fgon keys
    if CREATE_EDGES:
        edges = []

    context_object = None

    # reverse loop through face indicies
    for f_idx in xrange(len(faces) - 1, -1, -1):

        face_vert_loc_indicies,\
        face_vert_tex_indicies,\
        context_material,\
        context_smooth_group,\
        context_object= faces[f_idx]

        len_face_vert_loc_indicies = len(face_vert_loc_indicies)

        if len_face_vert_loc_indicies == 1:
            faces.pop(f_idx)  # cant add single vert faces

        elif not face_vert_tex_indicies or len_face_vert_loc_indicies == 2:  # faces that have no texture coords are lines
            if CREATE_EDGES:
                # generators are better in python 2.4+ but can't be used in 2.3
                # edges.extend( (face_vert_loc_indicies[i], face_vert_loc_indicies[i+1]) for i in xrange(len_face_vert_loc_indicies-1) )
                edges.extend([(face_vert_loc_indicies[i],
                               face_vert_loc_indicies[i + 1])
                              for i in xrange(len_face_vert_loc_indicies - 1)])

            faces.pop(f_idx)
        else:

            # Smooth Group
            if unique_smooth_groups and context_smooth_group:
                # Is a part of of a smooth group and is a face
                if context_smooth_group_old is not context_smooth_group:
                    edge_dict = smooth_group_users[context_smooth_group]
                    context_smooth_group_old = context_smooth_group

                for i in xrange(len_face_vert_loc_indicies):
                    i1 = face_vert_loc_indicies[i]
                    i2 = face_vert_loc_indicies[i - 1]
                    if i1 > i2: i1, i2 = i2, i1

                    try:
                        edge_dict[i1, i2] += 1
                    except KeyError:
                        edge_dict[i1, i2] = 1

            # FGons into triangles
            if has_ngons and len_face_vert_loc_indicies > 4:

                ngon_face_indices = BPyMesh.ngon(verts_loc,
                                                 face_vert_loc_indicies)
                faces.extend(\
                [(\
                [face_vert_loc_indicies[ngon[0]], face_vert_loc_indicies[ngon[1]], face_vert_loc_indicies[ngon[2]] ],\
                [face_vert_tex_indicies[ngon[0]], face_vert_tex_indicies[ngon[1]], face_vert_tex_indicies[ngon[2]] ],\
                context_material,\
                context_smooth_group,\
                context_object)\
                for ngon in ngon_face_indices]\
                )

                # edges to make fgons
                if CREATE_FGONS:
                    edge_users = {}
                    for ngon in ngon_face_indices:
                        for i in (0, 1, 2):
                            i1 = face_vert_loc_indicies[ngon[i]]
                            i2 = face_vert_loc_indicies[ngon[i - 1]]
                            if i1 > i2: i1, i2 = i2, i1

                            try:
                                edge_users[i1, i2] += 1
                            except KeyError:
                                edge_users[i1, i2] = 1

                    for key, users in edge_users.iteritems():
                        if users > 1:
                            fgon_edges[key] = None

                # remove all after 3, means we dont have to pop this one.
                faces.pop(f_idx)

    # Build sharp edges
    if unique_smooth_groups:
        for edge_dict in smooth_group_users.itervalues():
            for key, users in edge_dict.iteritems():
                if users == 1:  # This edge is on the boundry of a group
                    sharp_edges[key] = None

    # map the material names to an index
    material_mapping = dict([
        (name, i) for i, name in enumerate(unique_materials)
    ])  # enumerate over unique_materials keys()

    materials = [None] * len(unique_materials)

    for name, index in material_mapping.iteritems():
        materials[index] = unique_materials[name]

    me = bpy.data.meshes.new(dataname)

    me.materials = materials[0:16]  # make sure the list isnt too big.
    #me.verts.extend([(0,0,0)]) # dummy vert
    me.verts.extend(verts_loc)

    face_mapping = me.faces.extend([f[0] for f in faces], indexList=True)

    if verts_tex and me.faces:
        me.faceUV = 1
        # TEXMODE= Mesh.FaceModes['TEX']

    context_material_old = -1  # avoid a dict lookup
    mat = 0  # rare case it may be un-initialized.
    me_faces = me.faces
    ALPHA = Mesh.FaceTranspModes.ALPHA

    for i, face in enumerate(faces):
        if len(face[0]) < 2:
            pass  #raise "bad face"
        elif len(face[0]) == 2:
            if CREATE_EDGES:
                edges.append(face[0])
        else:
            face_index_map = face_mapping[i]
            if face_index_map != None:  # None means the face wasnt added
                blender_face = me_faces[face_index_map]

                face_vert_loc_indicies,\
                face_vert_tex_indicies,\
                context_material,\
                context_smooth_group,\
                context_object= face

                if context_smooth_group:
                    blender_face.smooth = True

                if context_material:
                    if context_material_old is not context_material:
                        mat = material_mapping[context_material]
                        if mat > 15:
                            mat = 15
                        context_material_old = context_material

                    blender_face.mat = mat

                if verts_tex:
                    if context_material:
                        image, has_data = unique_material_images[
                            context_material]
                        if image:  # Can be none if the material dosnt have an image.
                            blender_face.image = image
                            if has_data and image.depth == 32:
                                blender_face.transp |= ALPHA

                    # BUG - Evil eekadoodle problem where faces that have vert index 0 location at 3 or 4 are shuffled.
                    if len(face_vert_loc_indicies) == 4:
                        if face_vert_loc_indicies[
                                2] == 0 or face_vert_loc_indicies[3] == 0:
                            face_vert_tex_indicies = face_vert_tex_indicies[
                                2], face_vert_tex_indicies[
                                    3], face_vert_tex_indicies[
                                        0], face_vert_tex_indicies[1]
                    else:  # length of 3
                        if face_vert_loc_indicies[2] == 0:
                            face_vert_tex_indicies = face_vert_tex_indicies[
                                1], face_vert_tex_indicies[
                                    2], face_vert_tex_indicies[0]
                    # END EEEKADOODLE FIX

                    # assign material, uv's and image
                    for ii, uv in enumerate(blender_face.uv):
                        uv.x, uv.y = verts_tex[face_vert_tex_indicies[ii]]
    del me_faces
    del ALPHA

    # Add edge faces.
    me_edges = me.edges
    if CREATE_FGONS and fgon_edges:
        FGON = Mesh.EdgeFlags.FGON
        for ed in me.findEdges(fgon_edges.keys()):
            if ed != None:
                me_edges[ed].flag |= FGON
        del FGON

    if unique_smooth_groups and sharp_edges:
        SHARP = Mesh.EdgeFlags.SHARP
        for ed in me.findEdges(sharp_edges.keys()):
            if ed != None:
                me_edges[ed].flag |= SHARP
        del SHARP

    if CREATE_EDGES:
        me_edges.extend(edges)

    del me_edges

    me.calcNormals()

    ob = scn.objects.new(me)
    new_objects.append(ob)

    # Create the vertex groups. No need to have the flag passed here since we test for the
    # content of the vertex_groups. If the user selects to NOT have vertex groups saved then
    # the following test will never run
    for group_name, group_indicies in vertex_groups.iteritems():
        me.addVertGroup(group_name)
        me.assignVertsToGroup(group_name, group_indicies, 1.00,
                              Mesh.AssignModes.REPLACE)
Пример #48
0
def write(filename):
    start = Blender.sys.time()
    if not filename.lower().endswith(".tmf"):
        filename += ".tmf"

    scn = Blender.Scene.GetCurrent()
    ob = scn.objects.active
    if not ob:
        Blender.Draw.PupMenu("Error%t|Select 1 active object")
        return

    file = open(filename, "wb")

    mesh = BPyMesh.getMeshFromObject(ob, None, True, False, scn)
    if not mesh:
        Blender.Draw.PupMenu("Error%t|Could not get mesh data from active object")
        return

    mesh.transform(ob.matrixWorld)

    file = open(filename, "wb")

    # Write vertex coords and normals
    file.write("C " + ` len(mesh.verts) ` + "\n")
    for v in mesh.verts:
        file.write("%.6f %.6f %.6f " % tuple(v.co))
        file.write("\n")

    file.write("N " + ` len(mesh.verts) ` + "\n")
    for v in mesh.verts:
        file.write("%.6f %.6f %.6f " % tuple(v.no))
        file.write("\n")

        # Process faces
    faces = len(mesh.faces)
    data = ""
    uvdata = ""
    for face in mesh.faces:
        if face.v[2] < 0:
            # discard
            faces = faces - 1
        elif face.v[2] < 0:
            # Already a triangle, add it to the data, do not change the count
            data = data + ` face.v[0].index ` + " " + ` face.v[1].index ` + " " + ` face.v[2].index ` + "\n"
            for v in face.uv:
                add_uvdata(uvdata, v)
        else:
            # this one is a quad
            # Break it up into two triangles
            # Hence one additional face
            faces = faces + 1
            data = data + ` face.v[0].index ` + " " + ` face.v[1].index ` + " " + ` face.v[3].index ` + "\n"
            data = data + ` face.v[1].index ` + " " + ` face.v[2].index ` + " " + ` face.v[3].index ` + "\n"
            uvdata = add_uvdata(uvdata, face.uv[0])
            uvdata = add_uvdata(uvdata, face.uv[1])
            uvdata = add_uvdata(uvdata, face.uv[3])
            uvdata = uvdata + "\n"
            uvdata = add_uvdata(uvdata, face.uv[1])
            uvdata = add_uvdata(uvdata, face.uv[2])
            uvdata = add_uvdata(uvdata, face.uv[3])
            uvdata = uvdata + "\n"
            # Now I can write the header with the correct face count, and then the data
    file.write("F " + ` faces ` + "\n")
    file.write(data)
    uvs = faces * 3
    file.write("T " + ` uvs ` + "\n")
    file.write(uvdata)

    file.close()

    end = Blender.sys.time()
    message = 'Successfully exported "%s" in %.4f seconds' % (Blender.sys.basename(filename), end - start)
    print message
Пример #49
0
def vertexGradientPick(ob, MODE):
	#MODE 0 == VWEIGHT,  1 == VCOL 
	
	me= ob.getData(mesh=1)
	if not me.faceUV:	me.faceUV= True
	
	Window.DrawProgressBar (0.0, '')
	
	mousedown_wait()
	
	if MODE==0:
		act_group= me.activeGroup
		if act_group == None:
			mousedown_wait()
			Draw.PupMenu('Error, mesh has no active group.')
			return
	
	# Loop until click
	Window.DrawProgressBar (0.25, 'Click to set gradient start')
	mouseup()
	
	obmat= ob.matrixWorld
	screen_x, screen_y = Window.GetMouseCoords()
	mouseInView, OriginA, DirectionA = mouseViewRay(screen_x, screen_y, obmat)
	if not mouseInView or not OriginA:
		return
	
	# get the mouse weight
	
	if MODE==0:
		pickValA= BPyMesh.pickMeshGroupWeight(me, act_group, OriginA, DirectionA)
	if MODE==1:
		pickValA= BPyMesh.pickMeshGroupVCol(me, OriginA, DirectionA)
	
	Window.DrawProgressBar (0.75, 'Click to set gradient end')
	mouseup()
	
	TOALPHA= Window.GetKeyQualifiers() & Window.Qual.SHIFT
	
	screen_x, screen_y = Window.GetMouseCoords()
	mouseInView, OriginB, DirectionB = mouseViewRay(screen_x, screen_y, obmat)
	if not mouseInView or not OriginB:
		return
	
	if not TOALPHA: # Only get a second opaque value if we are not blending to alpha
		if MODE==0:	pickValB= BPyMesh.pickMeshGroupWeight(me, act_group, OriginB, DirectionB)
		else:
			pickValB= BPyMesh.pickMeshGroupVCol(me, OriginB, DirectionB)
	else:
		if MODE==0: pickValB= 0.0
		else: pickValB= [0.0, 0.0, 0.0] # Dummy value
	
	# Neither points touched a face
	if pickValA == pickValB == None:
		return
	
	# clicking on 1 non face is fine. just set the weight to 0.0
	if pickValA==None:
		pickValA= 0.0
		
		# swap A/B
		OriginA, OriginB= OriginB, OriginA
		DirectionA, DirectionB= DirectionB, DirectionA
		pickValA, pickValB= pickValA, pickValB
		
		TOALPHA= True
		
	if pickValB==None:
		pickValB= 0.0
		TOALPHA= True
	
	# set up 2 lines so we can measure their distances and calc the gradient
	
	# make a line 90d to the grad in screenspace.
	if (OriginA-OriginB).length <= eps: # Persp view. same origin different direction
		cross_grad= DirectionA.cross(DirectionB)
		ORTHO= False
		
	else: # Ortho - Same direction, different origin
		cross_grad= DirectionA.cross(OriginA-OriginB)
		ORTHO= True
	
	cross_grad.normalize()
	cross_grad= cross_grad * 100
	
	lineA= (OriginA, OriginA+(DirectionA*100))
	lineB= (OriginB, OriginB+(DirectionB*100))
	
	if not ORTHO:
		line_angle= AngleBetweenVecs(lineA[1], lineB[1])/2
		line_mid= (lineA[1]+lineB[1])*0.5

	VSEL= [False] * (len(me.verts))
	
	# Get the selected faces and apply the selection to the verts.
	for f in me.faces:
		if f.sel:
			for v in f.v:
				VSEL[v.index]= True
	groupNames, vWeightDict= BPyMesh.meshWeight2Dict(me)
	
	
	
	def grad_weight_from_co(v):
		'''
		Takes a vert and retuens its gradient radio between A and B
		'''
		
		if not VSEL[v.index]: # Not bart of a selected face?
			return None, None
		
		v_co= v.co
		# make a line 90d to the 2 lines the user clicked.
		vert_line= (v_co - cross_grad, v_co + cross_grad)
		
		xA= LineIntersect(vert_line[0], vert_line[1], lineA[0], lineA[1])
		xB= LineIntersect(vert_line[0], vert_line[1], lineB[0], lineB[1])
		
		if not xA or not xB: # Should never happen but support it anyhow
			return None, None
		
		wA= (xA[0]-xA[1]).length
		wB= (xB[0]-xB[1]).length
		
		wTot= wA+wB
		if not wTot: # lines are on the same point.
			return None, None
		
		'''
		Get the length of the line between both intersections on the 
		2x view lines.
		if the dist between  lineA+VertLine and lineB+VertLine is 
		greater then the lenth between lineA and lineB intersection points, it means
		that the verts are not inbetween the 2 lines.
		'''
		lineAB_length= (xA[1]-xB[1]).length
		
		# normalzie
		wA= wA/wTot
		wB= wB/wTot
		
		if ORTHO: # Con only use line length method with parelelle lines
			if wTot > lineAB_length+eps:
				# vert is outside the range on 1 side. see what side of the grad
				if wA>wB:		wA, wB= 1.0, 0.0
				else:			wA, wB= 0.0, 1.0
		else:
			# PERSP, lineA[0] is the same origin as lineB[0]
			
			# Either xA[0] or xB[0]  can be used instead of a possible x_mid between the 2
			# as long as the point is inbetween lineA and lineB it dosent matter.
			a= AngleBetweenVecs(lineA[0]-xA[0], line_mid)
			if a>line_angle:
				# vert is outside the range on 1 side. see what side of the grad
				if wA>wB:		wA, wB= 1.0, 0.0
				else:			wA, wB= 0.0, 1.0
		
		return wA, wB
		
	
	grad_weights= [grad_weight_from_co(v) for v in me.verts]
	
	
	if MODE==0:
		for v in me.verts:
			i= v.index
			if VSEL[i]:
				wA, wB = grad_weights[i]
				if wA != None: # and wB 
					if TOALPHA:
						# Do alpha by using the exiting weight for 
						try:		pickValB= vWeightDict[i][act_group]
						except:	pickValB= 0.0 # The weights not there? assume zero
					# Mix2 2 opaque weights
					vWeightDict[i][act_group]= pickValB*wA + pickValA*wB
	
	else: # MODE==1 VCol
		for f in me.faces:
			if f.sel:
				f_v= f.v
				for i in xrange(len(f_v)):
					v= f_v[i]
					wA, wB = grad_weights[v.index]
					
					c= f.col[i]
					
					if TOALPHA:
						pickValB= c.r, c.g, c.b
					
					c.r = int(pickValB[0]*wA + pickValA[0]*wB)
					c.g = int(pickValB[1]*wA + pickValA[1]*wB)
					c.b = int(pickValB[2]*wA + pickValA[2]*wB)
					
	
	
	
	# Copy weights back to the mesh.
	BPyMesh.dict2MeshWeight(me, groupNames, vWeightDict)
	Window.DrawProgressBar (1.0, '')
Пример #50
0
def write(filename):
    start = Blender.sys.time()
    file = open(filename, "wb")

    scn = Blender.Scene.GetCurrent()
    objects = list(scn.objects.context)

    if not objects:
        Blender.Draw.PupMenu("Error%t|No Objects selected")
        return

    try:
        objects.sort(key=lambda a: a.name)
    except:
        objects.sort(lambda a, b: cmp(a.name, b.name))

    text = generate_text()
    desc = generate_desc()
    icon = ""  # generate_icon()

    meshes = []
    mesh_object_name_lookup = {}  # for name lookups only

    for obj in objects:
        mesh = BPyMesh.getMeshFromObject(obj, None, True, False, scn)
        if mesh:
            mesh.transform(obj.matrixWorld)
            meshes.append(mesh)
            mesh_object_name_lookup[mesh] = obj.name
    del obj

    material_names = get_used_material_names(meshes)
    tags = generate_tags(material_names)
    surfs = generate_surfs(material_names)
    chunks = [text, desc, icon, tags]

    meshdata = cStringIO.StringIO()

    layer_index = 0

    for mesh in meshes:
        layr = generate_layr(mesh_object_name_lookup[mesh], layer_index)
        pnts = generate_pnts(mesh)
        bbox = generate_bbox(mesh)
        pols = generate_pols(mesh)
        ptag = generate_ptag(mesh, material_names)
        clip = generate_clip(mesh, material_names)

        if mesh.faceUV:
            vmad_uv = generate_vmad_uv(mesh)  # per face

        if mesh.vertexColors:
            # if meshtools.average_vcols:
            # 	vmap_vc = generate_vmap_vc(mesh)  # per vert
            # else:
            vmad_vc = generate_vmad_vc(mesh)  # per face

        write_chunk(meshdata, "LAYR", layr)
        chunks.append(layr)
        write_chunk(meshdata, "PNTS", pnts)
        chunks.append(pnts)
        write_chunk(meshdata, "BBOX", bbox)
        chunks.append(bbox)
        write_chunk(meshdata, "POLS", pols)
        chunks.append(pols)
        write_chunk(meshdata, "PTAG", ptag)
        chunks.append(ptag)

        if mesh.vertexColors:
            # if meshtools.average_vcols:
            # 	write_chunk(meshdata, "VMAP", vmap_vc)
            # 	chunks.append(vmap_vc)
            # else:
            write_chunk(meshdata, "VMAD", vmad_vc)
            chunks.append(vmad_vc)

        if mesh.faceUV:
            write_chunk(meshdata, "VMAD", vmad_uv)
            chunks.append(vmad_uv)
            write_chunk(meshdata, "CLIP", clip)
            chunks.append(clip)

        layer_index += 1
        mesh.verts = None  # save some ram

    del mesh_object_name_lookup

    for surf in surfs:
        chunks.append(surf)

    write_header(file, chunks)
    write_chunk(file, "ICON", icon)
    write_chunk(file, "TEXT", text)
    write_chunk(file, "DESC", desc)
    write_chunk(file, "TAGS", tags)
    file.write(meshdata.getvalue())
    meshdata.close()
    for surf in surfs:
        write_chunk(file, "SURF", surf)
    write_chunk(file, "DATE", "August 19, 2005")

    Blender.Window.DrawProgressBar(1.0, "")  # clear progressbar
    file.close()
    print "\a\r",
    print "Successfully exported %s in %.3f seconds" % (
        filename.split("\\")[-1].split("/")[-1],
        Blender.sys.time() - start,
    )
Пример #51
0
def actWeightNormalize(me, ob, PREF_PEAKWEIGHT, PREF_ACTIVE_ONLY,
                       PREF_ARMATURE_ONLY, PREF_KEEP_PROPORTION):

    groupNames, vWeightDict = BPyMesh.meshWeight2Dict(me)
    new_weight = max_weight = -1.0
    act_group = me.activeGroup

    if PREF_ACTIVE_ONLY:
        normalizeGroups = [act_group]
    else:
        normalizeGroups = groupNames[:]

    if PREF_ARMATURE_ONLY:

        armature_groups = getArmatureGroups(ob, me)

        i = len(normalizeGroups)
        while i:
            i -= 1
            if not normalizeGroups[i] in armature_groups:
                del normalizeGroups[i]

    for act_group in normalizeGroups:
        vWeightDictUsed = [False] * len(vWeightDict)

        for i, wd in enumerate(vWeightDict):
            try:
                new_weight = wd[act_group]
                if new_weight > max_weight:
                    max_weight = new_weight
                vWeightDictUsed[i] = wd
            except:
                pass

        # These can be skipped for now, they complicate things when using multiple vgroups,
        '''
		if max_weight < SMALL_NUM or new_weight == -1:
			Draw.PupMenu('No verts to normalize. exiting.')
			#return
		
		if abs(max_weight-PREF_PEAKWEIGHT) < SMALL_NUM:
			Draw.PupMenu('Vert Weights are alredy normalized.')
			#return
		'''
        max_weight = max_weight / PREF_PEAKWEIGHT

        if PREF_KEEP_PROPORTION:
            # TODO, PROPORTIONAL WEIGHT SCALING.
            for wd in vWeightDictUsed:
                if wd:  # not false.
                    if len(wd) == 1:
                        # Only 1 group for thsi vert. Simple
                        wd[act_group] /= max_weight
                    else:
                        # More then 1 group. will need to scale all users evenly.
                        if PREF_ARMATURE_ONLY:
                            local_maxweight = max([
                                v for k, v in wd.iteritems()
                                if k in armature_groups
                            ]) / PREF_PEAKWEIGHT
                            if local_maxweight > 0.0:
                                # So groups that are not used in any bones are ignored.
                                for weight in wd.iterkeys():
                                    if weight in armature_groups:
                                        wd[weight] /= local_maxweight
                        else:
                            local_maxweight = max(
                                wd.itervalues()) / PREF_PEAKWEIGHT
                            for weight in wd.iterkeys():
                                wd[weight] /= local_maxweight

        else:  # Simple, just scale the weights up. we alredy know this is in an armature group (if needed)
            for wd in vWeightDictUsed:
                if wd:  # not false.
                    wd[act_group] /= max_weight

    # Copy weights back to the mesh.
    BPyMesh.dict2MeshWeight(me, groupNames, vWeightDict)
Пример #52
0
def write(filename, objects,\
EXPORT_NORMALS_HQ=False,\
EXPORT_MTL=True,  EXPORT_COPY_IMAGES=False,\
EXPORT_APPLY_MODIFIERS=True, EXPORT_BLEN_OBS=True,\
EXPORT_GROUP_BY_OB=False):
    '''
    Basic write function. The context and options must be alredy set
    This can be accessed externaly
    eg.
    write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options.
    '''
    def veckey3d(v):
        return round(v.x, 6), round(v.y, 6), round(v.z, 6)

    def veckey2d(v):
        return round(v.x, 6), round(v.y, 6)

    print 'WTF Export path: "%s"' % filename
    temp_mesh_name = '~tmp-mesh'

    time1 = sys.time()
    scn = Scene.GetCurrent()

    file = open(filename, "w")
    file.write('<?xml version="1.0"?>\n')
    file.write('<OPEN_TRACK>\n')

    # Write Header
    # file.write('\n<!--\n'
    #            + '  Blender3D v%s WTF File: %s\n' % (Blender.Get('version'), Blender.Get('filename').split('/')[-1].split('\\')[-1] )
    #            + '  www.blender3d.org\n'
    #            + '-->\n\n')

    # Get the container mesh. - used for applying modifiers and non mesh objects.
    containerMesh = meshName = tempMesh = None
    for meshName in Blender.NMesh.GetNames():
        if meshName.startswith(temp_mesh_name):
            tempMesh = Mesh.Get(meshName)
            if not tempMesh.users:
                containerMesh = tempMesh
    if not containerMesh:
        containerMesh = Mesh.New(temp_mesh_name)

    del meshName
    del tempMesh

    # Initialize totals, these are updated each object
    totverts = totuvco = totno = 0

    face_vert_index = 0

    globalNormals = {}

    file.write('\n<library_objects>\n')
    # Get all meshs
    for ob_main in objects:
        obnamestring = fixName(ob_main.name)
        file.write('\t<object id="%s">\n' % obnamestring)  # Write Object name

        for ob, ob_mat in BPyObject.getDerivedObjects(ob_main):
            # Will work for non meshes now! :)
            # getMeshFromObject(ob, container_mesh=None, apply_modifiers=True, vgroups=True, scn=None)
            me = BPyMesh.getMeshFromObject(ob, containerMesh,
                                           EXPORT_APPLY_MODIFIERS, False, scn)
            if not me:
                file.write('\t\t<loc>%.6f %.6f %.6f</loc>\n' %
                           tuple(ob_main.loc))  # Write Object name
                file.write('\t\t<rot>%.6f %.6f %.6f</rot>\n' %
                           tuple(ob_main.rot))  # Write Object name
                continue

            faceuv = me.faceUV

            # We have a valid mesh
            if me.faces:
                # Add a dummy object to it.
                has_quads = False
                for f in me.faces:
                    if len(f) == 4:
                        has_quads = True
                        break

                if has_quads:
                    oldmode = Mesh.Mode()
                    Mesh.Mode(Mesh.SelectModes['FACE'])

                    me.sel = True
                    tempob = scn.objects.new(me)
                    me.quadToTriangle(0)  # more=0 shortest length
                    oldmode = Mesh.Mode(oldmode)
                    scn.objects.unlink(tempob)

                    Mesh.Mode(oldmode)

            # Make our own list so it can be sorted to reduce context switching
            faces = [f for f in me.faces]
            edges = me.edges

            if not (len(faces) + len(edges) +
                    len(me.verts)):  # Make sure there is somthing to write
                continue  # dont bother with this mesh.

            me.transform(ob_mat)

            # High Quality Normals
            if faces:
                if EXPORT_NORMALS_HQ:
                    BPyMesh.meshCalcNormals(me)
                else:
                    # transforming normals is incorrect
                    # when the matrix is scaled,
                    # better to recalculate them
                    me.calcNormals()

            # # Crash Blender
            #materials = me.getMaterials(1) # 1 == will return None in the list.
            materials = me.materials

            materialNames = []
            materialItems = materials[:]
            if materials:
                for mat in materials:
                    if mat:  # !=None
                        materialNames.append(mat.name)
                    else:
                        materialNames.append(None)
                # Cant use LC because some materials are None.
                # materialNames = map(lambda mat: mat.name, materials) # Bug Blender, dosent account for null materials, still broken.

            # Possible there null materials, will mess up indicies
            # but at least it will export, wait until Blender gets fixed.
            materialNames.extend((16 - len(materialNames)) * [None])
            materialItems.extend((16 - len(materialItems)) * [None])

            # Sort by Material, then images
            # so we dont over context switch in the obj file.
            if faceuv:
                try:
                    faces.sort(key=lambda a: (a.mat, a.image, a.smooth))
                except:
                    faces.sort(lambda a, b: cmp((a.mat, a.image, a.smooth),
                                                (b.mat, b.image, b.smooth)))
            elif len(materials) > 1:
                try:
                    faces.sort(key=lambda a: (a.mat, a.smooth))
                except:
                    faces.sort(lambda a, b: cmp((a.mat, a.smooth),
                                                (b.mat, b.smooth)))
            else:
                # no materials
                try:
                    faces.sort(key=lambda a: a.smooth)
                except:
                    faces.sort(lambda a, b: cmp(a.smooth, b.smooth))

            # Set the default mat to no material and no image.
            contextMat = (
                0, 0
            )  # Can never be this, so we will label a new material teh first chance we get.
            contextSmooth = None  # Will either be true or false,  set bad to force initialization switch.

            if len(faces) > 0:
                file.write('\t\t<mesh>\n')
            else:
                file.write('\t\t<curve>\n')

            vertname = "%s-Vertices" % obnamestring
            vertarrayname = "%s-Array" % vertname
            normname = "%s-Normals" % obnamestring
            normarrayname = "%s-Array" % normname
            texname = "%s-TexCoord" % obnamestring
            texarrayname = "%s-Array" % texname

            # Vert
            file.write('\t\t\t<float_array count="%d" id="%s">' %
                       (len(me.verts), vertarrayname))
            for v in me.verts:
                file.write(' %.6f %.6f %.6f' % tuple(v.co))
            file.write('</float_array>\n')
            file.write('\t\t\t<vertices id="%s" source="#%s" />\n' %
                       (vertname, vertarrayname))

            # UV
            if faceuv:
                file.write('\t\t\t<float_array id="%s">' % texarrayname)
                uv_face_mapping = [[0, 0, 0, 0] for f in faces
                                   ]  # a bit of a waste for tri's :/

                uv_dict = {}  # could use a set() here
                for f_index, f in enumerate(faces):

                    for uv_index, uv in enumerate(f.uv):
                        uvkey = veckey2d(uv)
                        try:
                            uv_face_mapping[f_index][uv_index] = uv_dict[uvkey]
                        except:
                            uv_face_mapping[f_index][uv_index] = uv_dict[
                                uvkey] = len(uv_dict)
                            file.write(' %.6f %.6f' % tuple(uv))

                uv_unique_count = len(uv_dict)
                del uv, uvkey, uv_dict, f_index, uv_index
                # Only need uv_unique_count and uv_face_mapping
                file.write('</float_array>\n')
                file.write('\t\t\t<texcoords id="%s" source="#%s" />\n' %
                           (texname, texarrayname))

            # NORMAL, Smooth/Non smoothed.
            if len(faces) > 0:
                file.write('\t\t\t<float_array id="%s">' % normarrayname)
                for f in faces:
                    if f.smooth:
                        for v in f:
                            noKey = veckey3d(v.no)
                            if not globalNormals.has_key(noKey):
                                globalNormals[noKey] = totno
                                totno += 1
                                file.write(' %.6f %.6f %.6f' % noKey)
                    else:
                        # Hard, 1 normal from the face.
                        noKey = veckey3d(f.no)
                        if not globalNormals.has_key(noKey):
                            globalNormals[noKey] = totno
                            totno += 1
                            file.write(' %.6f %.6f %.6f' % noKey)
                file.write('</float_array>\n')
                file.write('\t\t\t<normals id="%s" source="#%s" />\n' %
                           (normname, normarrayname))

            if not faceuv:
                f_image = None
            in_triangles = False

            for f_index, f in enumerate(faces):
                f_v = f.v
                f_smooth = f.smooth
                f_mat = min(f.mat, len(materialNames) - 1)
                if faceuv:
                    f_image = f.image
                    f_uv = f.uv

                # MAKE KEY
                if faceuv and f_image:  # Object is always true.
                    key = materialNames[f_mat], f_image.name
                else:
                    key = materialNames[
                        f_mat], None  # No image, use None instead.

                # CHECK FOR CONTEXT SWITCH
                if key == contextMat:
                    pass  # Context alredy switched, dont do anythoing
                else:
                    if key[0] == None and key[1] == None:
                        # Write a null material, since we know the context has changed.
                        if in_triangles:
                            file.write('</p>\n')
                            file.write('\t\t\t</triangles>\n')
                        file.write('\t\t\t<triangles id="%s_%s">\n' %
                                   (fixName(ob.name), fixName(ob.getData(1))))
                        in_triangles = True
                    else:
                        mat_data = MTL_DICT.get(key)
                        if not mat_data:
                            # First add to global dict so we can export to mtl
                            # Then write mtl

                            # Make a new names from the mat and image name,
                            # converting any spaces to underscores with fixName.

                            # If none image dont bother adding it to the name
                            if key[1] == None:
                                mat_data = MTL_DICT[key] = ('%s' % fixName(
                                    key[0])), materialItems[f_mat], f_image
                            else:
                                mat_data = MTL_DICT[key] = (
                                    '%s_%s' %
                                    (fixName(key[0]), fixName(key[1]))
                                ), materialItems[f_mat], f_image
                        if in_triangles:
                            file.write('</p>\n')
                            file.write('\t\t\t</triangles>\n')
                        file.write(
                            '\t\t\t<triangles id="%s_%s_%s" material="#%s">\n'
                            % (fixName(ob.name), fixName(
                                ob.getData(1)), mat_data[0], mat_data[0]))
                        in_triangles = True

                    file.write(
                        '\t\t\t\t<input offset="0" semantic="VERTEX" source="#%s" />\n'
                        % vertname)
                    file.write(
                        '\t\t\t\t<input offset="1" semantic="NORMAL" source="#%s" />\n'
                        % normname)
                    if faceuv:
                        file.write(
                            '\t\t\t\t<input offset="2" semantic="TEXCOORD" source="#%s" />\n'
                            % texname)
                    file.write('\t\t\t\t<p>')

                contextMat = key
                if f_smooth != contextSmooth:
                    if f_smooth:  # on now off
                        # file.write('s 1\n')
                        contextSmooth = f_smooth
                    else:  # was off now on
                        # file.write('s off\n')
                        contextSmooth = f_smooth

                if faceuv:
                    if f_smooth:  # Smoothed, use vertex normals
                        for vi, v in enumerate(f_v):
                            file.write( ' %d %d %d' % (\
                                v.index+totverts,\
                                totuvco + uv_face_mapping[f_index][vi],\
                                globalNormals[ veckey3d(v.no) ])) # vert, uv, normal

                    else:  # No smoothing, face normals
                        no = globalNormals[veckey3d(f.no)]
                        for vi, v in enumerate(f_v):
                            file.write( ' %d %d %d' % (\
                                v.index+totverts,\
                                totuvco + uv_face_mapping[f_index][vi],\
                                no)) # vert, uv, normal

                    face_vert_index += len(f_v)

                else:  # No UV's
                    if f_smooth:  # Smoothed, use vertex normals
                        for v in f_v:
                            file.write( ' %d %d' % (\
                                v.index+totverts,\
                                globalNormals[ veckey3d(v.no) ]))
                    else:  # No smoothing, face normals
                        no = globalNormals[veckey3d(f.no)]
                        for v in f_v:
                            file.write( ' %d %d' % (\
                                v.index+totverts,\
                                no))
            if in_triangles:
                file.write('</p>\n')
                file.write('\t\t\t</triangles>\n')

            # Write edges.
            LOOSE = Mesh.EdgeFlags.LOOSE
            has_edge = False
            for ed in edges:
                if ed.flag & LOOSE:
                    has_edge = True
            if has_edge:
                file.write('\t\t\t<edges>\n')
                file.write(
                    '\t\t\t\t<input offset="0" semantic="VERTEX" source="#%s" />\n'
                    % vertname)
                file.write('\t\t\t\t<p>')
                for ed in edges:
                    if ed.flag & LOOSE:
                        file.write(
                            ' %d %d' %
                            (ed.v1.index + totverts, ed.v2.index + totverts))
                file.write('</p>\n')
                file.write('\t\t\t</edges>\n')

            # Make the indicies global rather then per mesh
            # totverts += len(me.verts)
            # if faceuv:
            #     totuvco += uv_unique_count
            me.verts = None
            if len(faces) > 0:
                file.write('\t\t</mesh>\n')
            else:
                file.write('\t\t</curve>\n')
        file.write('\t</object>\n')
    file.write('</library_objects>\n\n')

    # Now we have all our materials, save them
    if EXPORT_MTL:
        write_library_materials(file)

    # Save the groups
    write_library_groups(file)

    file.write('</OPEN_TRACK>\n')
    file.close()

    if EXPORT_COPY_IMAGES:
        dest_dir = filename
        # Remove chars until we are just the path.
        while dest_dir and dest_dir[-1] not in '\\/':
            dest_dir = dest_dir[:-1]
        if dest_dir:
            copy_images(dest_dir)
        else:
            print '\tError: "%s" could not be used as a base for an image path.' % filename

    print "WTF Export time: %.2f" % (sys.time() - time1)
Пример #53
0
def write(filename, objects,\
EXPORT_NORMALS_HQ=False,\
EXPORT_MTL=True,  EXPORT_COPY_IMAGES=False,\
EXPORT_APPLY_MODIFIERS=True, EXPORT_BLEN_OBS=True,\
EXPORT_GROUP_BY_OB=False):
    '''
    Basic write function. The context and options must be alredy set
    This can be accessed externaly
    eg.
    write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options.
    '''
    
    def veckey3d(v):
        return round(v.x, 6), round(v.y, 6), round(v.z, 6)
        
    def veckey2d(v):
        return round(v.x, 6), round(v.y, 6)
    
    print 'WTF Export path: "%s"' % filename
    temp_mesh_name = '~tmp-mesh'

    time1 = sys.time()
    scn = Scene.GetCurrent()

    file = open(filename, "w")
    file.write('<?xml version="1.0"?>\n')
    file.write('<OPEN_TRACK>\n')

    # Write Header
    # file.write('\n<!--\n'
    #            + '  Blender3D v%s WTF File: %s\n' % (Blender.Get('version'), Blender.Get('filename').split('/')[-1].split('\\')[-1] )
    #            + '  www.blender3d.org\n'
    #            + '-->\n\n')

    # Get the container mesh. - used for applying modifiers and non mesh objects.
    containerMesh = meshName = tempMesh = None
    for meshName in Blender.NMesh.GetNames():
        if meshName.startswith(temp_mesh_name):
            tempMesh = Mesh.Get(meshName)
            if not tempMesh.users:
                containerMesh = tempMesh
    if not containerMesh:
        containerMesh = Mesh.New(temp_mesh_name)
    
    del meshName
    del tempMesh
    
    # Initialize totals, these are updated each object
    totverts = totuvco = totno = 0
    
    face_vert_index = 0
    
    globalNormals = {}
    
    file.write('\n<library_objects>\n')
    # Get all meshs
    for ob_main in objects:
        obnamestring = fixName(ob_main.name)
        file.write('\t<object id="%s">\n' % obnamestring) # Write Object name

        for ob, ob_mat in BPyObject.getDerivedObjects(ob_main):
            # Will work for non meshes now! :)
            # getMeshFromObject(ob, container_mesh=None, apply_modifiers=True, vgroups=True, scn=None)
            me = BPyMesh.getMeshFromObject(ob, containerMesh, EXPORT_APPLY_MODIFIERS, False, scn)
            if not me:
                file.write('\t\t<loc>%.6f %.6f %.6f</loc>\n' % tuple(ob_main.loc)) # Write Object name
                file.write('\t\t<rot>%.6f %.6f %.6f</rot>\n' % tuple(ob_main.rot)) # Write Object name
                continue
            
            faceuv = me.faceUV
            
            # We have a valid mesh
            if me.faces:
                # Add a dummy object to it.
                has_quads = False
                for f in me.faces:
                    if len(f) == 4:
                        has_quads = True
                        break
                
                if has_quads:
                    oldmode = Mesh.Mode()
                    Mesh.Mode(Mesh.SelectModes['FACE'])
                    
                    me.sel = True
                    tempob = scn.objects.new(me)
                    me.quadToTriangle(0) # more=0 shortest length
                    oldmode = Mesh.Mode(oldmode)
                    scn.objects.unlink(tempob)
                    
                    Mesh.Mode(oldmode)
            
            # Make our own list so it can be sorted to reduce context switching
            faces = [ f for f in me.faces ]
            edges = me.edges
            
            if not (len(faces)+len(edges)+len(me.verts)): # Make sure there is somthing to write
                continue # dont bother with this mesh.
            
            me.transform(ob_mat)
            
            # High Quality Normals
            if faces:
                if EXPORT_NORMALS_HQ:
                    BPyMesh.meshCalcNormals(me)
                else:
                    # transforming normals is incorrect
                    # when the matrix is scaled,
                    # better to recalculate them
                    me.calcNormals()
            
            # # Crash Blender
            #materials = me.getMaterials(1) # 1 == will return None in the list.
            materials = me.materials
            
            materialNames = []
            materialItems = materials[:]
            if materials:
                for mat in materials:
                    if mat: # !=None
                        materialNames.append(mat.name)
                    else:
                        materialNames.append(None)
                # Cant use LC because some materials are None.
                # materialNames = map(lambda mat: mat.name, materials) # Bug Blender, dosent account for null materials, still broken.  
            
            # Possible there null materials, will mess up indicies
            # but at least it will export, wait until Blender gets fixed.
            materialNames.extend((16-len(materialNames)) * [None])
            materialItems.extend((16-len(materialItems)) * [None])
            
            # Sort by Material, then images
            # so we dont over context switch in the obj file.
            if faceuv:
                try:    faces.sort(key = lambda a: (a.mat, a.image, a.smooth))
                except: faces.sort(lambda a,b: cmp((a.mat, a.image, a.smooth), (b.mat, b.image, b.smooth)))
            elif len(materials) > 1:
                try:    faces.sort(key = lambda a: (a.mat, a.smooth))
                except: faces.sort(lambda a,b: cmp((a.mat, a.smooth), (b.mat, b.smooth)))
            else:
                # no materials
                try:    faces.sort(key = lambda a: a.smooth)
                except: faces.sort(lambda a,b: cmp(a.smooth, b.smooth))
            
            # Set the default mat to no material and no image.
            contextMat = (0, 0) # Can never be this, so we will label a new material teh first chance we get.
            contextSmooth = None # Will either be true or false,  set bad to force initialization switch.
            
            if len(faces) > 0:
                file.write('\t\t<mesh>\n')
            else:
                file.write('\t\t<curve>\n')

            vertname = "%s-Vertices" % obnamestring
            vertarrayname = "%s-Array" % vertname
            normname = "%s-Normals" % obnamestring
            normarrayname = "%s-Array" % normname
            texname = "%s-TexCoord" % obnamestring
            texarrayname = "%s-Array" % texname
            
            # Vert
            file.write('\t\t\t<float_array count="%d" id="%s">' % (len(me.verts), vertarrayname))
            for v in me.verts:
                file.write(' %.6f %.6f %.6f' % tuple(v.co))
            file.write('</float_array>\n')
            file.write('\t\t\t<vertices id="%s" source="#%s" />\n' % (vertname, vertarrayname))
            
            # UV
            if faceuv:
                file.write('\t\t\t<float_array id="%s">' % texarrayname)
                uv_face_mapping = [[0,0,0,0] for f in faces] # a bit of a waste for tri's :/
                
                uv_dict = {} # could use a set() here
                for f_index, f in enumerate(faces):
                    
                    for uv_index, uv in enumerate(f.uv):
                        uvkey = veckey2d(uv)
                        try:
                            uv_face_mapping[f_index][uv_index] = uv_dict[uvkey]
                        except:
                            uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] = len(uv_dict)
                            file.write(' %.6f %.6f' % tuple(uv))
                
                uv_unique_count = len(uv_dict)
                del uv, uvkey, uv_dict, f_index, uv_index
                # Only need uv_unique_count and uv_face_mapping
                file.write('</float_array>\n')
                file.write('\t\t\t<texcoords id="%s" source="#%s" />\n' % (texname, texarrayname))
            
            # NORMAL, Smooth/Non smoothed.
            if len(faces) > 0:
                file.write('\t\t\t<float_array id="%s">' % normarrayname)
                for f in faces:
                    if f.smooth:
                        for v in f:
                            noKey = veckey3d(v.no)
                            if not globalNormals.has_key( noKey ):
                                globalNormals[noKey] = totno
                                totno +=1
                                file.write(' %.6f %.6f %.6f' % noKey)
                    else:
                        # Hard, 1 normal from the face.
                        noKey = veckey3d(f.no)
                        if not globalNormals.has_key( noKey ):
                            globalNormals[noKey] = totno
                            totno +=1
                            file.write(' %.6f %.6f %.6f' % noKey)
                file.write('</float_array>\n')
                file.write('\t\t\t<normals id="%s" source="#%s" />\n' % (normname, normarrayname))
            
            if not faceuv:
                f_image = None
            in_triangles = False
            
            for f_index, f in enumerate(faces):
                f_v= f.v
                f_smooth= f.smooth
                f_mat = min(f.mat, len(materialNames)-1)
                if faceuv:
                    f_image = f.image
                    f_uv= f.uv
                
                # MAKE KEY
                if faceuv and f_image: # Object is always true.
                    key = materialNames[f_mat],  f_image.name
                else:
                    key = materialNames[f_mat],  None # No image, use None instead.
                
                # CHECK FOR CONTEXT SWITCH
                if key == contextMat:
                    pass # Context alredy switched, dont do anythoing
                else:
                    if key[0] == None and key[1] == None:
                        # Write a null material, since we know the context has changed.
                        if in_triangles:
                            file.write('</p>\n')
                            file.write('\t\t\t</triangles>\n')
                        file.write('\t\t\t<triangles id="%s_%s">\n' % (fixName(ob.name), fixName(ob.getData(1))))
                        in_triangles = True
                    else:
                        mat_data= MTL_DICT.get(key)
                        if not mat_data:
                            # First add to global dict so we can export to mtl
                            # Then write mtl
                            
                            # Make a new names from the mat and image name,
                            # converting any spaces to underscores with fixName.
                            
                            # If none image dont bother adding it to the name
                            if key[1] == None:
                                mat_data = MTL_DICT[key] = ('%s'%fixName(key[0])), materialItems[f_mat], f_image
                            else:
                                mat_data = MTL_DICT[key] = ('%s_%s' % (fixName(key[0]), fixName(key[1]))), materialItems[f_mat], f_image
                        if in_triangles:
                            file.write('</p>\n')
                            file.write('\t\t\t</triangles>\n')
                        file.write('\t\t\t<triangles id="%s_%s_%s" material="#%s">\n' %
                                   (fixName(ob.name), fixName(ob.getData(1)), mat_data[0], mat_data[0]) )
                        in_triangles = True

                    file.write('\t\t\t\t<input offset="0" semantic="VERTEX" source="#%s" />\n' % vertname)
                    file.write('\t\t\t\t<input offset="1" semantic="NORMAL" source="#%s" />\n' % normname)
                    if faceuv:
                        file.write('\t\t\t\t<input offset="2" semantic="TEXCOORD" source="#%s" />\n' % texname)
                    file.write('\t\t\t\t<p>')
                    
                contextMat = key
                if f_smooth != contextSmooth:
                    if f_smooth: # on now off
                        # file.write('s 1\n')
                        contextSmooth = f_smooth
                    else: # was off now on
                        # file.write('s off\n')
                        contextSmooth = f_smooth
                
                if faceuv:
                    if f_smooth: # Smoothed, use vertex normals
                        for vi, v in enumerate(f_v):
                            file.write( ' %d %d %d' % (\
                                v.index+totverts,\
                                totuvco + uv_face_mapping[f_index][vi],\
                                globalNormals[ veckey3d(v.no) ])) # vert, uv, normal
                        
                    else: # No smoothing, face normals
                        no = globalNormals[ veckey3d(f.no) ]
                        for vi, v in enumerate(f_v):
                            file.write( ' %d %d %d' % (\
                                v.index+totverts,\
                                totuvco + uv_face_mapping[f_index][vi],\
                                no)) # vert, uv, normal
                    
                    face_vert_index += len(f_v)
                
                else: # No UV's
                    if f_smooth: # Smoothed, use vertex normals
                        for v in f_v:
                            file.write( ' %d %d' % (\
                                v.index+totverts,\
                                globalNormals[ veckey3d(v.no) ]))
                    else: # No smoothing, face normals
                        no = globalNormals[ veckey3d(f.no) ]
                        for v in f_v:
                            file.write( ' %d %d' % (\
                                v.index+totverts,\
                                no))
            if in_triangles:
                file.write('</p>\n')
                file.write('\t\t\t</triangles>\n')
            
            # Write edges.
            LOOSE = Mesh.EdgeFlags.LOOSE
            has_edge = False
            for ed in edges:
                if ed.flag & LOOSE:
                    has_edge = True
            if has_edge:
                file.write('\t\t\t<edges>\n')
                file.write('\t\t\t\t<input offset="0" semantic="VERTEX" source="#%s" />\n' % vertname)
                file.write('\t\t\t\t<p>')
                for ed in edges:
                    if ed.flag & LOOSE:
                        file.write(' %d %d' % (ed.v1.index+totverts, ed.v2.index+totverts))
                file.write('</p>\n')
                file.write('\t\t\t</edges>\n')
                
            # Make the indicies global rather then per mesh
            # totverts += len(me.verts)
            # if faceuv:
            #     totuvco += uv_unique_count
            me.verts= None
            if len(faces) > 0:
                file.write('\t\t</mesh>\n')
            else:
                file.write('\t\t</curve>\n')
        file.write('\t</object>\n')
    file.write('</library_objects>\n\n')
    
    # Now we have all our materials, save them
    if EXPORT_MTL:
        write_library_materials(file)

    # Save the groups
    write_library_groups(file)

    file.write('</OPEN_TRACK>\n')
    file.close()

    if EXPORT_COPY_IMAGES:
        dest_dir = filename
        # Remove chars until we are just the path.
        while dest_dir and dest_dir[-1] not in '\\/':
            dest_dir = dest_dir[:-1]
        if dest_dir:
            copy_images(dest_dir)
        else:
            print '\tError: "%s" could not be used as a base for an image path.' % filename
    
    print "WTF Export time: %.2f" % (sys.time() - time1)
Пример #54
0
def write(filename, objects,\
EXPORT_TRI=False,  EXPORT_EDGES=False,  EXPORT_NORMALS=False,  EXPORT_NORMALS_HQ=False,\
EXPORT_UV=True,  EXPORT_MTL=True,  EXPORT_COPY_IMAGES=False,\
EXPORT_APPLY_MODIFIERS=True, EXPORT_ROTX90=True, EXPORT_BLEN_OBS=True,\
EXPORT_GROUP_BY_OB=False,  EXPORT_GROUP_BY_MAT=False, EXPORT_KEEP_VERT_ORDER=False,\
EXPORT_POLYGROUPS=False, EXPORT_CURVE_AS_NURBS=True):
	'''
	Basic write function. The context and options must be alredy set
	This can be accessed externaly
	eg.
	write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options.
	'''
	
	def veckey3d(v):
		return round(v.x, 6), round(v.y, 6), round(v.z, 6)
		
	def veckey2d(v):
		return round(v.x, 6), round(v.y, 6)
	
	def findVertexGroupName(face, vWeightMap):
		"""
		Searches the vertexDict to see what groups is assigned to a given face.
		We use a frequency system in order to sort out the name because a given vetex can
		belong to two or more groups at the same time. To find the right name for the face
		we list all the possible vertex group names with their frequency and then sort by
		frequency in descend order. The top element is the one shared by the highest number
		of vertices is the face's group 
		"""
		weightDict = {}
		for vert in face:
			vWeights = vWeightMap[vert.index]
			for vGroupName, weight in vWeights:
				weightDict[vGroupName] = weightDict.get(vGroupName, 0) + weight
		
		if weightDict:
			alist = [(weight,vGroupName) for vGroupName, weight in weightDict.iteritems()] # sort least to greatest amount of weight
			alist.sort()
			return(alist[-1][1]) # highest value last
		else:
			return '(null)'


	print 'OBJ Export path: "%s"' % filename
	temp_mesh_name = '~tmp-mesh'

	time1 = sys.time()
	scn = Scene.GetCurrent()

	file = open(filename, "w")
	
	# Write Header
	file.write('# Blender3D v%s OBJ File: %s\n' % (Blender.Get('version'), Blender.Get('filename').split('/')[-1].split('\\')[-1] ))
	file.write('# www.blender3d.org\n')

	# Tell the obj file what material file to use.
	if EXPORT_MTL:
		mtlfilename = '%s.mtl' % '.'.join(filename.split('.')[:-1])
		file.write('mtllib %s\n' % ( mtlfilename.split('\\')[-1].split('/')[-1] ))
	
	# Get the container mesh. - used for applying modifiers and non mesh objects.
	containerMesh = meshName = tempMesh = None
	for meshName in Blender.NMesh.GetNames():
		if meshName.startswith(temp_mesh_name):
			tempMesh = Mesh.Get(meshName)
			if not tempMesh.users:
				containerMesh = tempMesh
	if not containerMesh:
		containerMesh = Mesh.New(temp_mesh_name)
	
	if EXPORT_ROTX90:
		mat_xrot90= Blender.Mathutils.RotationMatrix(-90, 4, 'x')
		
	del meshName
	del tempMesh
	
	# Initialize totals, these are updated each object
	totverts = totuvco = totno = 1
	
	face_vert_index = 1
	
	globalNormals = {}
	
	# Get all meshes
	for ob_main in objects:
		for ob, ob_mat in BPyObject.getDerivedObjects(ob_main):
			
			# Nurbs curve support
			if EXPORT_CURVE_AS_NURBS and test_nurbs_compat(ob):
				if EXPORT_ROTX90:
					ob_mat = ob_mat * mat_xrot90
				
				totverts += write_nurb(file, ob, ob_mat)
				
				continue
			# end nurbs
			
			# Will work for non meshes now! :)
			# getMeshFromObject(ob, container_mesh=None, apply_modifiers=True, vgroups=True, scn=None)
			me= BPyMesh.getMeshFromObject(ob, containerMesh, EXPORT_APPLY_MODIFIERS, EXPORT_POLYGROUPS, scn)
			if not me:
				continue
			
			if EXPORT_UV:
				faceuv= me.faceUV
			else:
				faceuv = False
			
			# We have a valid mesh
			if EXPORT_TRI and me.faces:
				# Add a dummy object to it.
				has_quads = False
				for f in me.faces:
					if len(f) == 4:
						has_quads = True
						break
				
				if has_quads:
					oldmode = Mesh.Mode()
					Mesh.Mode(Mesh.SelectModes['FACE'])
					
					me.sel = True
					tempob = scn.objects.new(me)
					me.quadToTriangle(0) # more=0 shortest length
					oldmode = Mesh.Mode(oldmode)
					scn.objects.unlink(tempob)
					
					Mesh.Mode(oldmode)
			
			# Make our own list so it can be sorted to reduce context switching
			faces = [ f for f in me.faces ]
			
			if EXPORT_EDGES:
				edges = me.edges
			else:
				edges = []
			
			if not (len(faces)+len(edges)+len(me.verts)): # Make sure there is somthing to write
				continue # dont bother with this mesh.
			
			if EXPORT_ROTX90:
				me.transform(ob_mat*mat_xrot90)
			else:
				me.transform(ob_mat)
			
			# High Quality Normals
			if EXPORT_NORMALS and faces:
				if EXPORT_NORMALS_HQ:
					BPyMesh.meshCalcNormals(me)
				else:
					# transforming normals is incorrect
					# when the matrix is scaled,
					# better to recalculate them
					me.calcNormals()
			
			# # Crash Blender
			#materials = me.getMaterials(1) # 1 == will return None in the list.
			materials = me.materials
			
			materialNames = []
			materialItems = materials[:]
			if materials:
				for mat in materials:
					if mat: # !=None
						materialNames.append(mat.name)
					else:
						materialNames.append(None)
				# Cant use LC because some materials are None.
				# materialNames = map(lambda mat: mat.name, materials) # Bug Blender, dosent account for null materials, still broken.	
			
			# Possible there null materials, will mess up indicies
			# but at least it will export, wait until Blender gets fixed.
			materialNames.extend((16-len(materialNames)) * [None])
			materialItems.extend((16-len(materialItems)) * [None])
			
			# Sort by Material, then images
			# so we dont over context switch in the obj file.
			if EXPORT_KEEP_VERT_ORDER:
				pass
			elif faceuv:
				try:	faces.sort(key = lambda a: (a.mat, a.image, a.smooth))
				except:	faces.sort(lambda a,b: cmp((a.mat, a.image, a.smooth), (b.mat, b.image, b.smooth)))
			elif len(materials) > 1:
				try:	faces.sort(key = lambda a: (a.mat, a.smooth))
				except:	faces.sort(lambda a,b: cmp((a.mat, a.smooth), (b.mat, b.smooth)))
			else:
				# no materials
				try:	faces.sort(key = lambda a: a.smooth)
				except:	faces.sort(lambda a,b: cmp(a.smooth, b.smooth))
			
			# Set the default mat to no material and no image.
			contextMat = (0, 0) # Can never be this, so we will label a new material teh first chance we get.
			contextSmooth = None # Will either be true or false,  set bad to force initialization switch.
			
			if EXPORT_BLEN_OBS or EXPORT_GROUP_BY_OB:
				name1 = ob.name
				name2 = ob.getData(1)
				if name1 == name2:
					obnamestring = fixName(name1)
				else:
					obnamestring = '%s_%s' % (fixName(name1), fixName(name2))
				
				if EXPORT_BLEN_OBS:
					file.write('o %s\n' % obnamestring) # Write Object name
				else: # if EXPORT_GROUP_BY_OB:
					file.write('g %s\n' % obnamestring)
			
			
			# Vert
			for v in me.verts:
				file.write('v %.6f %.6f %.6f\n' % tuple(v.co))
			
			# UV
			if faceuv:
				uv_face_mapping = [[0,0,0,0] for f in faces] # a bit of a waste for tri's :/
				
				uv_dict = {} # could use a set() here
				for f_index, f in enumerate(faces):
					
					for uv_index, uv in enumerate(f.uv):
						uvkey = veckey2d(uv)
						try:
							uv_face_mapping[f_index][uv_index] = uv_dict[uvkey]
						except:
							uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] = len(uv_dict)
							file.write('vt %.6f %.6f\n' % tuple(uv))
				
				uv_unique_count = len(uv_dict)
				del uv, uvkey, uv_dict, f_index, uv_index
				# Only need uv_unique_count and uv_face_mapping
			
			# NORMAL, Smooth/Non smoothed.
			if EXPORT_NORMALS:
				for f in faces:
					if f.smooth:
						for v in f:
							noKey = veckey3d(v.no)
							if not globalNormals.has_key( noKey ):
								globalNormals[noKey] = totno
								totno +=1
								file.write('vn %.6f %.6f %.6f\n' % noKey)
					else:
						# Hard, 1 normal from the face.
						noKey = veckey3d(f.no)
						if not globalNormals.has_key( noKey ):
							globalNormals[noKey] = totno
							totno +=1
							file.write('vn %.6f %.6f %.6f\n' % noKey)
			
			if not faceuv:
				f_image = None
			
			if EXPORT_POLYGROUPS:
				# Retrieve the list of vertex groups
				vertGroupNames = me.getVertGroupNames()

				currentVGroup = ''
				# Create a dictionary keyed by face id and listing, for each vertex, the vertex groups it belongs to
				vgroupsMap = [[] for _i in xrange(len(me.verts))]
				for vertexGroupName in vertGroupNames:
					for vIdx, vWeight in me.getVertsFromGroup(vertexGroupName, 1):
						vgroupsMap[vIdx].append((vertexGroupName, vWeight))

			for f_index, f in enumerate(faces):
				f_v= f.v
				f_smooth= f.smooth
				f_mat = min(f.mat, len(materialNames)-1)
				if faceuv:
					f_image = f.image
					f_uv= f.uv
				
				# MAKE KEY
				if faceuv and f_image: # Object is always true.
					key = materialNames[f_mat],  f_image.name
				else:
					key = materialNames[f_mat],  None # No image, use None instead.
				
				# Write the vertex group
				if EXPORT_POLYGROUPS:
					if vertGroupNames:
						# find what vertext group the face belongs to
						theVGroup = findVertexGroupName(f,vgroupsMap)
						if	theVGroup != currentVGroup:
							currentVGroup = theVGroup
							file.write('g %s\n' % theVGroup)

				# CHECK FOR CONTEXT SWITCH
				if key == contextMat:
					pass # Context alredy switched, dont do anything
				else:
					if key[0] == None and key[1] == None:
						# Write a null material, since we know the context has changed.
						if EXPORT_GROUP_BY_MAT:
							file.write('g %s_%s\n' % (fixName(ob.name), fixName(ob.getData(1))) ) # can be mat_image or (null)
						file.write('usemtl (null)\n') # mat, image
						
					else:
						mat_data= MTL_DICT.get(key)
						if not mat_data:
							# First add to global dict so we can export to mtl
							# Then write mtl
							
							# Make a new names from the mat and image name,
							# converting any spaces to underscores with fixName.
							
							# If none image dont bother adding it to the name
							if key[1] == None:
								mat_data = MTL_DICT[key] = ('%s'%fixName(key[0])), materialItems[f_mat], f_image
							else:
								mat_data = MTL_DICT[key] = ('%s_%s' % (fixName(key[0]), fixName(key[1]))), materialItems[f_mat], f_image
						
						if EXPORT_GROUP_BY_MAT:
							file.write('g %s_%s_%s\n' % (fixName(ob.name), fixName(ob.getData(1)), mat_data[0]) ) # can be mat_image or (null)

						file.write('usemtl %s\n' % mat_data[0]) # can be mat_image or (null)
					
				contextMat = key
				if f_smooth != contextSmooth:
					if f_smooth: # on now off
						file.write('s 1\n')
						contextSmooth = f_smooth
					else: # was off now on
						file.write('s off\n')
						contextSmooth = f_smooth
				
				file.write('f')
				if faceuv:
					if EXPORT_NORMALS:
						if f_smooth: # Smoothed, use vertex normals
							for vi, v in enumerate(f_v):
								file.write( ' %d/%d/%d' % (\
								  v.index+totverts,\
								  totuvco + uv_face_mapping[f_index][vi],\
								  globalNormals[ veckey3d(v.no) ])) # vert, uv, normal
							
						else: # No smoothing, face normals
							no = globalNormals[ veckey3d(f.no) ]
							for vi, v in enumerate(f_v):
								file.write( ' %d/%d/%d' % (\
								  v.index+totverts,\
								  totuvco + uv_face_mapping[f_index][vi],\
								  no)) # vert, uv, normal
					
					else: # No Normals
						for vi, v in enumerate(f_v):
							file.write( ' %d/%d' % (\
							  v.index+totverts,\
							  totuvco + uv_face_mapping[f_index][vi])) # vert, uv
					
					face_vert_index += len(f_v)
				
				else: # No UV's
					if EXPORT_NORMALS:
						if f_smooth: # Smoothed, use vertex normals
							for v in f_v:
								file.write( ' %d//%d' % (\
								  v.index+totverts,\
								  globalNormals[ veckey3d(v.no) ]))
						else: # No smoothing, face normals
							no = globalNormals[ veckey3d(f.no) ]
							for v in f_v:
								file.write( ' %d//%d' % (\
								  v.index+totverts,\
								  no))
					else: # No Normals
						for v in f_v:
							file.write( ' %d' % (\
							  v.index+totverts))
						
				file.write('\n')
			
			# Write edges.
			if EXPORT_EDGES:
				LOOSE= Mesh.EdgeFlags.LOOSE
				for ed in edges:
					if ed.flag & LOOSE:
						file.write('f %d %d\n' % (ed.v1.index+totverts, ed.v2.index+totverts))
				
			# Make the indicies global rather then per mesh
			totverts += len(me.verts)
			if faceuv:
				totuvco += uv_unique_count
			me.verts= None
	file.close()
	
	
	# Now we have all our materials, save them
	if EXPORT_MTL:
		write_mtl(mtlfilename)
	if EXPORT_COPY_IMAGES:
		dest_dir = filename
		# Remove chars until we are just the path.
		while dest_dir and dest_dir[-1] not in '\\/':
			dest_dir = dest_dir[:-1]
		if dest_dir:
			copy_images(dest_dir)
		else:
			print '\tError: "%s" could not be used as a base for an image path.' % filename
	
	print "OBJ Export time: %.2f" % (sys.time() - time1)
Пример #55
0
 def export(self, scene, world, alltextures,\
         EXPORT_APPLY_MODIFIERS = False,\
         EXPORT_TRI=             False,\
     ):
     
     print "Info: starting X3D export to " + self.filename + "..."
     self.writeHeader()
     # self.writeScript()
     self.writeNavigationInfo(scene)
     self.writeBackground(world, alltextures)
     self.writeFog(world)
     self.proto = 0
     
     
     # COPIED FROM OBJ EXPORTER
     if EXPORT_APPLY_MODIFIERS:
         temp_mesh_name = '~tmp-mesh'
     
         # Get the container mesh. - used for applying modifiers and non mesh objects.
         containerMesh = meshName = tempMesh = None
         for meshName in Blender.NMesh.GetNames():
             if meshName.startswith(temp_mesh_name):
                 tempMesh = Mesh.Get(meshName)
                 if not tempMesh.users:
                     containerMesh = tempMesh
         if not containerMesh:
             containerMesh = Mesh.New(temp_mesh_name)
     # -------------------------- 
     
     
     for ob_main in scene.objects.context:
         for ob, ob_mat in BPyObject.getDerivedObjects(ob_main):
             objType=ob.type
             objName=ob.name
             self.matonly = 0
             if objType == "Camera":
                 self.writeViewpoint(ob, ob_mat, scene)
             elif objType in ("Mesh", "Curve", "Surf", "Text") :
                 if  EXPORT_APPLY_MODIFIERS or objType != 'Mesh':
                     me= BPyMesh.getMeshFromObject(ob, containerMesh, EXPORT_APPLY_MODIFIERS, False, scene)
                 else:
                     me = ob.getData(mesh=1)
                 
                 self.writeIndexedFaceSet(ob, me, ob_mat, world, EXPORT_TRI = EXPORT_TRI)
             elif objType == "Lamp":
                 data= ob.data
                 datatype=data.type
                 if datatype == Lamp.Types.Lamp:
                     self.writePointLight(ob, ob_mat, data, world)
                 elif datatype == Lamp.Types.Spot:
                     self.writeSpotLight(ob, ob_mat, data, world)
                 elif datatype == Lamp.Types.Sun:
                     self.writeDirectionalLight(ob, ob_mat, data, world)
                 else:
                     self.writeDirectionalLight(ob, ob_mat, data, world)
             # do you think x3d could document what to do with dummy objects?
             #elif objType == "Empty" and objName != "Empty":
             #   self.writeNode(ob, ob_mat)
             else:
                 #print "Info: Ignoring [%s], object type [%s] not handle yet" % (object.name,object.getType)
                 pass
     
     self.file.write("\n</Scene>\n</X3D>")
     
     if EXPORT_APPLY_MODIFIERS:
         if containerMesh:
             containerMesh.verts = None
     
     self.cleanup()
def file_callback(filename):

    if not filename.lower().endswith('.ctm'):
        filename += '.ctm'

    # Get object mesh from the selected object
    scn = bpy.data.scenes.active
    ob = scn.objects.active
    if not ob:
        Blender.Draw.PupMenu('Error%t|Select 1 active object')
        return
    mesh = BPyMesh.getMeshFromObject(ob, None, False, False, scn)
    if not mesh:
        Blender.Draw.PupMenu(
            'Error%t|Could not get mesh data from active object')
        return

    # Check which mesh properties are present...
    hasVertexUV = mesh.vertexUV or mesh.faceUV
    hasVertexColors = mesh.vertexColors

    # Show a GUI for the export settings
    pupBlock = []
    EXPORT_APPLY_MODIFIERS = Draw.Create(1)
    pupBlock.append(('Apply Modifiers', EXPORT_APPLY_MODIFIERS,
                     'Use transformed mesh data.'))
    EXPORT_NORMALS = Draw.Create(1)
    pupBlock.append(('Normals', EXPORT_NORMALS, 'Export vertex normal data.'))
    if hasVertexUV:
        EXPORT_UV = Draw.Create(1)
        pupBlock.append(('UVs', EXPORT_UV, 'Export texface UV coords.'))
    if hasVertexColors:
        EXPORT_COLORS = Draw.Create(1)
        pupBlock.append(('Colors', EXPORT_COLORS, 'Export vertex Colors.'))
    EXPORT_MG2 = Draw.Create(0)
    pupBlock.append(
        ('Fixed Point', EXPORT_MG2,
         'Use limited precision algorithm (MG2 method = better compression).'))
    if not Draw.PupBlock('Export...', pupBlock):
        return

    # Adjust export settings according to GUI selections
    EXPORT_APPLY_MODIFIERS = EXPORT_APPLY_MODIFIERS.val
    EXPORT_NORMALS = EXPORT_NORMALS.val
    if hasVertexUV:
        EXPORT_UV = EXPORT_UV.val
    else:
        EXPORT_UV = False
    if hasVertexColors:
        EXPORT_COLORS = EXPORT_COLORS.val
    else:
        EXPORT_COLORS = False
    EXPORT_MG2 = EXPORT_MG2.val

    # If the user wants to export MG2, then show another GUI...
    if EXPORT_MG2:
        pupBlock = []
        EXPORT_VPREC = Draw.Create(0.01)
        pupBlock.append(('Vertex', EXPORT_VPREC, 0.0001, 1.0,
                         'Relative vertex precision (fixed point).'))
        if EXPORT_NORMALS:
            EXPORT_NPREC = Draw.Create(1.0 / 256.0)
            pupBlock.append(('Normal', EXPORT_NPREC, 0.0001, 1.0,
                             'Normal precision (fixed point).'))
        if EXPORT_UV:
            EXPORT_UVPREC = Draw.Create(1.0 / 1024.0)
            pupBlock.append(('UV', EXPORT_UVPREC, 0.0001, 1.0,
                             'UV precision (fixed point).'))
        if EXPORT_COLORS:
            EXPORT_CPREC = Draw.Create(1.0 / 256.0)
            pupBlock.append(('Color', EXPORT_CPREC, 0.0001, 1.0,
                             'Color precision (fixed point).'))
        if not Draw.PupBlock('Fixed point precision...', pupBlock):
            return

    # Adjust export settings according to GUI selections
    if EXPORT_MG2:
        EXPORT_VPREC = EXPORT_VPREC.val
    else:
        EXPORT_VPREC = 0.1
    if EXPORT_MG2 and EXPORT_NORMALS:
        EXPORT_NPREC = EXPORT_NPREC.val
    else:
        EXPORT_NPREC = 0.1
    if EXPORT_MG2 and EXPORT_UV:
        EXPORT_UVPREC = EXPORT_UVPREC.val
    else:
        EXPORT_UVPREC = 0.1
    if EXPORT_MG2 and EXPORT_COLORS:
        EXPORT_CPREC = EXPORT_CPREC.val
    else:
        EXPORT_CPREC = 0.1

    is_editmode = Blender.Window.EditMode()
    if is_editmode:
        Blender.Window.EditMode(0, '', 0)
    Window.WaitCursor(1)
    try:
        # Get the mesh, again, if we wanted modifiers (from GUI selection)
        if EXPORT_APPLY_MODIFIERS:
            mesh = BPyMesh.getMeshFromObject(ob, None, EXPORT_APPLY_MODIFIERS,
                                             False, scn)
            if not mesh:
                Blender.Draw.PupMenu(
                    'Error%t|Could not get mesh data from active object')
                return
            mesh.transform(ob.matrixWorld, True)

        # Count triangles (quads count as two triangles)
        triangleCount = 0
        for f in mesh.faces:
            if len(f.v) == 4:
                triangleCount += 2
            else:
                triangleCount += 1

        # Extract indices from the Blender mesh (quads are split into two triangles)
        pindices = cast((c_int * 3 * triangleCount)(), POINTER(c_int))
        i = 0
        for f in mesh.faces:
            pindices[i] = c_int(f.v[0].index)
            pindices[i + 1] = c_int(f.v[1].index)
            pindices[i + 2] = c_int(f.v[2].index)
            i += 3
            if len(f.v) == 4:
                pindices[i] = c_int(f.v[0].index)
                pindices[i + 1] = c_int(f.v[2].index)
                pindices[i + 2] = c_int(f.v[3].index)
                i += 3

        # Extract vertex array from the Blender mesh
        vertexCount = len(mesh.verts)
        pvertices = cast((c_float * 3 * vertexCount)(), POINTER(c_float))
        i = 0
        for v in mesh.verts:
            pvertices[i] = c_float(v.co.x)
            pvertices[i + 1] = c_float(v.co.y)
            pvertices[i + 2] = c_float(v.co.z)
            i += 3

        # Extract normals
        if EXPORT_NORMALS:
            pnormals = cast((c_float * 3 * vertexCount)(), POINTER(c_float))
            i = 0
            for v in mesh.verts:
                pnormals[i] = c_float(v.no.x)
                pnormals[i + 1] = c_float(v.no.y)
                pnormals[i + 2] = c_float(v.no.z)
                i += 3
        else:
            pnormals = POINTER(c_float)()

        # Extract UVs
        if EXPORT_UV:
            ptexCoords = cast((c_float * 2 * vertexCount)(), POINTER(c_float))
            if mesh.faceUV:
                for f in mesh.faces:
                    for j, v in enumerate(f.v):
                        k = v.index
                        if k < vertexCount:
                            uv = f.uv[j]
                            ptexCoords[k * 2] = uv[0]
                            ptexCoords[k * 2 + 1] = uv[1]
            else:
                i = 0
                for v in mesh.verts:
                    ptexCoords[i] = c_float(v.uvco[0])
                    ptexCoords[i + 1] = c_float(v.uvco[1])
                    i += 2
        else:
            ptexCoords = POINTER(c_float)()

        # Extract colors
        if EXPORT_COLORS:
            pcolors = cast((c_float * 4 * vertexCount)(), POINTER(c_float))
            for f in mesh.faces:
                for j, v in enumerate(f.v):
                    k = v.index
                    if k < vertexCount:
                        col = f.col[j]
                        pcolors[k * 4] = col.r / 255.0
                        pcolors[k * 4 + 1] = col.g / 255.0
                        pcolors[k * 4 + 2] = col.b / 255.0
                        pcolors[k * 4 + 3] = 1.0
        else:
            pcolors = POINTER(c_float)()

        # Load the OpenCTM shared library
        if os.name == 'nt':
            libHDL = WinDLL('openctm.dll')
        else:
            libName = find_library('openctm')
            if not libName:
                Blender.Draw.PupMenu(
                    'Could not find the OpenCTM shared library')
                return
            libHDL = CDLL(libName)
        if not libHDL:
            Blender.Draw.PupMenu('Could not open the OpenCTM shared library')
            return

        # Get all the functions from the shared library that we need
        ctmNewContext = libHDL.ctmNewContext
        ctmNewContext.argtypes = [c_int]
        ctmNewContext.restype = c_void_p
        ctmFreeContext = libHDL.ctmFreeContext
        ctmFreeContext.argtypes = [c_void_p]
        ctmGetError = libHDL.ctmGetError
        ctmGetError.argtypes = [c_void_p]
        ctmGetError.restype = c_int
        ctmErrorString = libHDL.ctmErrorString
        ctmErrorString.argtypes = [c_int]
        ctmErrorString.restype = c_char_p
        ctmFileComment = libHDL.ctmFileComment
        ctmFileComment.argtypes = [c_void_p, c_char_p]
        ctmDefineMesh = libHDL.ctmDefineMesh
        ctmDefineMesh.argtypes = [
            c_void_p,
            POINTER(c_float), c_int,
            POINTER(c_int), c_int,
            POINTER(c_float)
        ]
        ctmSave = libHDL.ctmSave
        ctmSave.argtypes = [c_void_p, c_char_p]
        ctmAddUVMap = libHDL.ctmAddUVMap
        ctmAddUVMap.argtypes = [c_void_p, POINTER(c_float), c_char_p, c_char_p]
        ctmAddUVMap.restype = c_int
        ctmAddAttribMap = libHDL.ctmAddAttribMap
        ctmAddAttribMap.argtypes = [c_void_p, POINTER(c_float), c_char_p]
        ctmAddAttribMap.restype = c_int
        ctmCompressionMethod = libHDL.ctmCompressionMethod
        ctmCompressionMethod.argtypes = [c_void_p, c_int]
        ctmVertexPrecisionRel = libHDL.ctmVertexPrecisionRel
        ctmVertexPrecisionRel.argtypes = [c_void_p, c_float]
        ctmNormalPrecision = libHDL.ctmNormalPrecision
        ctmNormalPrecision.argtypes = [c_void_p, c_float]
        ctmUVCoordPrecision = libHDL.ctmUVCoordPrecision
        ctmUVCoordPrecision.argtypes = [c_void_p, c_int, c_float]
        ctmAttribPrecision = libHDL.ctmAttribPrecision
        ctmAttribPrecision.argtypes = [c_void_p, c_int, c_float]

        # Create an OpenCTM context
        ctm = ctmNewContext(0x0102)  # CTM_EXPORT
        try:
            # Set the file comment
            ctmFileComment(
                ctm,
                c_char_p('%s - created by Blender %s (www.blender.org)' %
                         (ob.getName(), Blender.Get('version'))))

            # Define the mesh
            ctmDefineMesh(ctm, pvertices, c_int(vertexCount), pindices,
                          c_int(triangleCount), pnormals)

            # Add UV coordinates?
            if EXPORT_UV:
                tm = ctmAddUVMap(ctm, ptexCoords, c_char_p(), c_char_p())
                if EXPORT_MG2:
                    ctmUVCoordPrecision(ctm, tm, EXPORT_UVPREC)

            # Add colors?
            if EXPORT_COLORS:
                cm = ctmAddAttribMap(ctm, pcolors, c_char_p('Color'))
                if EXPORT_MG2:
                    ctmAttribPrecision(ctm, cm, EXPORT_CPREC)

            # Set compression method
            if EXPORT_MG2:
                ctmCompressionMethod(ctm, 0x0203)  # CTM_METHOD_MG2
                ctmVertexPrecisionRel(ctm, EXPORT_VPREC)
                if EXPORT_NORMALS:
                    ctmNormalPrecision(ctm, EXPORT_NPREC)

            else:
                ctmCompressionMethod(ctm, 0x0202)  # CTM_METHOD_MG1

            # Save the file
            ctmSave(ctm, c_char_p(filename))

            # Check for errors
            e = ctmGetError(ctm)
            if e != 0:
                s = ctmErrorString(e)
                Blender.Draw.PupMenu('Error%t|Could not save the file: ' + s)

        finally:
            # Free the OpenCTM context
            ctmFreeContext(ctm)

    finally:
        Window.WaitCursor(0)
        if is_editmode:
            Blender.Window.EditMode(1, '', 0)
def extend(EXTEND_MODE,ob):
	if EXTEND_MODE == -1:
		return
	me = ob.getData(mesh=1)
	me_verts = me.verts
		# Toggle Edit mode
	is_editmode = Window.EditMode()
	if is_editmode:
		Window.EditMode(0)
	Window.WaitCursor(1)
	t = sys.time()
	edge_average_lengths = {}
	
	OTHER_INDEX = 2,3,0,1
	FAST_INDICIES = 0,2,1,3 # order is faster
	def extend_uvs(face_source, face_target, edge_key):
		'''
		Takes 2 faces,
		Projects its extends its UV coords onto the face next to it.
		Both faces must share an edge.
		'''
		
		def face_edge_vs(vi):
			# assunme a quad
			return [(vi[0], vi[1]), (vi[1], vi[2]), (vi[2], vi[3]), (vi[3], vi[0])]
		
		uvs_source = face_source.uv
		uvs_target = face_target.uv
		
		vidx_source = [v.index for v in face_source] 
		vidx_target = [v.index for v in face_target]
		
		# vertex index is the key, uv is the value
		uvs_vhash_source = dict( [ (vindex, uvs_source[i]) for i, vindex in enumerate(vidx_source)] )
		uvs_vhash_target = dict( [ (vindex, uvs_target[i]) for i, vindex in enumerate(vidx_target)] )
		
		edge_idxs_source = face_edge_vs(vidx_source)
		edge_idxs_target = face_edge_vs(vidx_target)
		
		source_matching_edge = -1
		target_matching_edge = -1
		
		edge_key_swap = edge_key[1], edge_key[0]
		
		try:	source_matching_edge = edge_idxs_source.index(edge_key)
		except:	source_matching_edge = edge_idxs_source.index(edge_key_swap)
		try:	target_matching_edge = edge_idxs_target.index(edge_key)
		except:	target_matching_edge = edge_idxs_target.index(edge_key_swap)
		

		
		edgepair_inner_source = edge_idxs_source[source_matching_edge]
		edgepair_inner_target = edge_idxs_target[target_matching_edge]
		edgepair_outer_source = edge_idxs_source[OTHER_INDEX[source_matching_edge]]
		edgepair_outer_target = edge_idxs_target[OTHER_INDEX[target_matching_edge]]
		
		if edge_idxs_source[source_matching_edge] == edge_idxs_target[target_matching_edge]:
			iA= 0; iB= 1 # Flipped, most common
		else: # The normals of these faces must be different
			iA= 1; iB= 0

		
		# Set the target UV's touching source face, no tricky calc needed,
		uvs_vhash_target[edgepair_inner_target[0]][:] = uvs_vhash_source[edgepair_inner_source[iA]]
		uvs_vhash_target[edgepair_inner_target[1]][:] = uvs_vhash_source[edgepair_inner_source[iB]]


		# Set the 2 UV's on the target face that are not touching
		# for this we need to do basic expaning on the source faces UV's
		if EXTEND_MODE == 2:
			
			try: # divide by zero is possible
				'''
				measure the length of each face from the middle of each edge to the opposite
				allong the axis we are copying, use this
				'''
				i1a= edgepair_outer_target[iB]
				i2a= edgepair_inner_target[iA]
				if i1a>i2a: i1a, i2a = i2a, i1a
				
				i1b= edgepair_outer_source[iB]
				i2b= edgepair_inner_source[iA]
				if i1b>i2b: i1b, i2b = i2b, i1b
				# print edge_average_lengths
				factor = edge_average_lengths[i1a, i2a][0] / edge_average_lengths[i1b, i2b][0]
			except:
				# Div By Zero?
				factor = 1.0
			
			uvs_vhash_target[edgepair_outer_target[iB]][:] = uvs_vhash_source[edgepair_inner_source[0]]  +factor * (uvs_vhash_source[edgepair_inner_source[0]] - uvs_vhash_source[edgepair_outer_source[1]])
			uvs_vhash_target[edgepair_outer_target[iA]][:] = uvs_vhash_source[edgepair_inner_source[1]]  +factor * (uvs_vhash_source[edgepair_inner_source[1]] - uvs_vhash_source[edgepair_outer_source[0]])
		
		else:
			# same as above but with no factor
			uvs_vhash_target[edgepair_outer_target[iB]][:] = uvs_vhash_source[edgepair_inner_source[0]] + (uvs_vhash_source[edgepair_inner_source[0]] - uvs_vhash_source[edgepair_outer_source[1]])
			uvs_vhash_target[edgepair_outer_target[iA]][:] = uvs_vhash_source[edgepair_inner_source[1]] + (uvs_vhash_source[edgepair_inner_source[1]] - uvs_vhash_source[edgepair_outer_source[0]])
	
	if not me.faceUV:
		me.faceUV= True
	
	face_act = 	me.activeFace
	if face_act == -1:
		Draw.PupMenu('ERROR: No active face')
		return
	
	face_sel= [f for f in me.faces if len(f) == 4 and f.sel]
	
	face_act_local_index = -1
	for i, f in enumerate(face_sel):
		if f.index == face_act:
			face_act_local_index = i
			break
	
	if face_act_local_index == -1:
		Draw.PupMenu('ERROR: Active face not selected')
		return
	
	
	
	# Modes
	# 0 unsearched
	# 1:mapped, use search from this face. - removed!!
	# 2:all siblings have been searched. dont search again.
	face_modes = [0] * len(face_sel)
	face_modes[face_act_local_index] = 1 # extend UV's from this face.
	
	
	# Edge connectivty
	edge_faces = {}
	for i, f in enumerate(face_sel):
		for edkey in f.edge_keys:
			try:	edge_faces[edkey].append(i)
			except:	edge_faces[edkey] = [i]
	
	SEAM = Mesh.EdgeFlags.SEAM
	
	if EXTEND_MODE == 2:
		edge_loops = BPyMesh.getFaceLoopEdges(face_sel, [ed.key for ed in me.edges if ed.flag & SEAM] )
		me_verts = me.verts
		for loop in edge_loops:
			looplen = [0.0]
			for ed in loop:
				edge_average_lengths[ed] = looplen
				looplen[0] += (me_verts[ed[0]].co - me_verts[ed[1]].co).length
			looplen[0] = looplen[0] / len(loop)
		
	
	
	# remove seams, so we dont map accross seams.
	for ed in me.edges:
		if ed.flag & SEAM:
			# remove the edge pair if we can
			try:	del edge_faces[ed.key]
			except:	pass
	# Done finding seams
	
	
	# face connectivity - faces around each face
	# only store a list of indicies for each face.
	face_faces = [[] for i in xrange(len(face_sel))]
	
	for edge_key, faces in edge_faces.iteritems():
		if len(faces) == 2: # Only do edges with 2 face users for now
			face_faces[faces[0]].append((faces[1], edge_key))
			face_faces[faces[1]].append((faces[0], edge_key))
	
	
	# Now we know what face is connected to what other face, map them by connectivity
	ok = True
	while ok:
		ok = False
		for i in xrange(len(face_sel)):
			if face_modes[i] == 1: # searchable
				for f_sibling, edge_key in face_faces[i]:
					if face_modes[f_sibling] == 0:
						face_modes[f_sibling] = 1 # mapped and search from.
						extend_uvs(face_sel[i], face_sel[f_sibling], edge_key)
						face_modes[i] = 1 # we can map from this one now.
						ok= True # keep searching
				
				face_modes[i] = 2 # dont search again
	print  sys.time() - t
	
	if is_editmode:
		Window.EditMode(1)
	else:
		me.update()
	
	Window.RedrawAll()
	Window.WaitCursor(0)
Пример #58
0
def main():
    PREF_Z_LOC = Blender.Draw.PupMenu(
        'Cut Z Location%t|Original Faces|Cutting Polyline')

    if PREF_Z_LOC == -1:
        return
    PREF_Z_LOC -= 1

    Blender.Window.WaitCursor(1)

    print '\nRunning Cookie Cutter'
    time = Blender.sys.time()
    scn = Blender.Scene.GetCurrent()
    obs = [ob for ob in scn.objects.context if ob.type in ('Mesh', 'Curve')]
    MULTIRES_ERROR = False

    # Divide into 2 lists- 1 with faces, one with only edges
    terrains = []  #[me for me in mes if me.faces]
    cutters = []  #[me for me in mes if not me.faces]

    terrain_type = auto_class([
        'mesh', 'bounds', 'face_bounds', 'edge_bounds', 'edge_dict', 'cutters',
        'matrix'
    ])

    for ob in obs:
        if ob.type == 'Mesh':
            me = ob.getData(mesh=1)
        elif ob.data.flag & 1:  # Is the curve 3D? else dont use.
            me = BPyMesh.getMeshFromObject(ob)  # get the curve
        else:
            continue

        # a new terrain instance
        if me.multires:
            MULTIRES_ERROR = True
        else:
            t = terrain_type()

            t.matrix = ob.matrixWorld * Blender.Window.GetViewMatrix()

            # Transform the object by its matrix
            me.transform(t.matrix)

            # Set the terrain bounds
            t.bounds = bounds_xy(me.verts)
            t.edge_bounds = [bounds_xy(ed) for ed in me.edges]
            t.mesh = me

            if me.faces:  # Terrain.
                t.edge_dict = mesh_edge_dict(me)
                t.face_bounds = [bounds_xy(f) for f in me.faces]
                t.cutters = []  # Store cutting objects that cut us here
                terrains.append(t)
            elif len(me.edges) > 2:  # Cutter
                cutters.append(t)

    totcuts = len(terrains) * len(cutters)
    if not totcuts:
        Blender.Window.WaitCursor(0)
        Blender.Draw.PupMenu(
            'ERROR%t|Select at least 1 closed loop mesh (edges only)|as the cutter...|and 1 or more meshes to cut into'
        )

    crazy_point = Vector(100000, 100000)

    for t in terrains:
        for c in cutters:
            # Main curring function
            terrain_cut_2d(t, c, PREF_Z_LOC)

            # Was the terrain touched?
            if len(t.face_bounds) != len(t.mesh.faces):
                t.edge_dict = mesh_edge_dict(t.mesh)
                # remake the bounds
                t.edge_bounds = [bounds_xy(ed) for ed in t.mesh.edges]
                t.face_bounds = [bounds_xy(f) for f in t.mesh.faces]
                t.cutters.append(c)

            print '\t%i remaining' % totcuts
            totcuts -= 1

        # SELECT INTERNAL FACES ONCE THIS TERRAIN IS CUT
        Blender.Mesh.Mode(Blender.Mesh.SelectModes['FACE'])
        t.mesh.sel = 0
        for c in t.cutters:
            edge_verts_c = [(ed_c.v1.co, ed_c.v2.co) for ed_c in c.mesh.edges]
            for f in t.mesh.faces:
                # How many edges do we intersect on our way to the faces center
                if not f.hide and not f.sel:  # Not alredy selected
                    c = f.cent
                    if point_in_bounds(c, t.bounds):
                        isect_count = 0
                        for edv1, edv2 in edge_verts_c:
                            isect_count += (LineIntersect2D(
                                c, crazy_point, edv1, edv2) != None)

                        if isect_count % 2:
                            f.sel = 1
    Blender.Mesh.Mode(Blender.Mesh.SelectModes['FACE'])

    # Restore the transformation
    for data in (terrains, cutters):
        for t in data:
            if t.mesh.users:  # it may have been a temp mesh from a curve.
                t.mesh.transform(t.matrix.copy().invert())

    Blender.Window.WaitCursor(0)

    if MULTIRES_ERROR:
        Blender.Draw.PupMenu(
            'Error%t|One or more meshes meshes not cut because they are multires.'
        )

    print 'terrains:%i cutters %i  %.2f secs taken' % (
        len(terrains), len(cutters), Blender.sys.time() - time)
Пример #59
0
def mesh_mirror(me, PREF_MIRROR_LOCATION, PREF_XMID_SNAP, PREF_MAX_DIST, PREF_XZERO_THRESH, PREF_MODE, PREF_SEL_ONLY, PREF_EDGE_USERS, PREF_MIRROR_WEIGHTS, PREF_FLIP_NAMES, PREF_CREATE_FLIP_NAMES):
	'''
	PREF_MIRROR_LOCATION, Will we mirror locations?
	PREF_XMID_SNAP, Should we snap verts to X-0?
	PREF_MAX_DIST, Maximum distance to test snapping verts.
	PREF_XZERO_THRESH, How close verts must be to the middle before they are considered X-Zero verts.
	PREF_MODE, 0:middle, 1: Left. 2:Right.
	PREF_SEL_ONLY, only snap the selection
	PREF_EDGE_USERS, match only verts with the same number of edge users.
	PREF_MIRROR_LOCATION, 
	'''
	
	
	# Operate on all verts
	if not PREF_SEL_ONLY:
		for v in me.verts:
			v.sel=1
	
	
	if PREF_EDGE_USERS:
		edge_users= [0]*len(me.verts)
		for ed in me.edges:
			edge_users[ed.v1.index]+=1
			edge_users[ed.v2.index]+=1
	
	
	
	if PREF_XMID_SNAP: # Do we snap locations at all?
		for v in me.verts:
			if v.sel:
				if abs(v.co.x) <= PREF_XZERO_THRESH:
					v.co.x= 0
					v.sel= 0
		
		# alredy de-selected verts
		neg_vts = [v for v in me.verts if v.sel and v.co.x < 0]
		pos_vts = [v for v in me.verts if v.sel and v.co.x > 0]
		
	else:
		# Use a small margin verts must be outside before we mirror them.
		neg_vts = [v for v in me.verts if v.sel if v.co.x <  -PREF_XZERO_THRESH]
		pos_vts = [v for v in me.verts if v.sel if v.co.x >   PREF_XZERO_THRESH]
	
	
	
	#*Mirror Location*********************************************************#
	if PREF_MIRROR_LOCATION:
		mirror_pairs= []
		# allign the negative with the positive.
		flipvec= Mathutils.Vector()
		len_neg_vts= float(len(neg_vts))
		for i1, nv in enumerate(neg_vts):
			if nv.sel: # we may alredy be mirrored, if so well be deselected
				nv_co= nv.co
				for i2, pv in enumerate(pos_vts):
					if pv.sel:
						# Enforce edge users.
						if not PREF_EDGE_USERS or edge_users[i1]==edge_users[i2]:
							flipvec[:]= pv.co
							flipvec.x= -flipvec.x
							l= (nv_co-flipvec).length
							
							if l==0.0: # Both are alredy mirrored so we dont need to think about them.
								# De-Select so we dont use again/
								pv.sel= nv.sel= 0
							
							# Record a match.
							elif l<=PREF_MAX_DIST:
								
								# We can adjust the length by the normal, now we know the length is under the limit.
								# DISABLED, WASNT VERY USEFULL
								'''
								if PREF_NOR_WEIGHT>0:
									# Get the normal and flipm reuse flipvec
									flipvec[:]= pv.no
									flipvec.x= -flipvec.x
									try:
										ang= Mathutils.AngleBetweenVecs(nv.no, flipvec)/180.0
									except: # on rare occasions angle between vecs will fail.- zero length vec.
										ang= 0
									
									l=l*(1+(ang*PREF_NOR_WEIGHT))
								'''
								# Record the pairs for sorting to see who will get joined
								mirror_pairs.append((l, nv, pv))
				
				# Update every 20 loops
				if i1 % 10 == 0:
					Window.DrawProgressBar(0.8 * (i1/len_neg_vts), 'Mirror verts %i of %i' % (i1, len_neg_vts))
		
		Window.DrawProgressBar(0.9, 'Mirror verts: Updating locations')
		
		# Now we have a list of the pairs we might use, lets find the best and do them first.
		# de-selecting as we go. so we can makke sure not to mess it up.
		try:	mirror_pairs.sort(key = lambda a: a[0])
		except:	mirror_pairs.sort(lambda a,b: cmp(a[0], b[0]))
		
		for dist, v1,v2 in mirror_pairs: # dist, neg, pos
			if v1.sel and v2.sel:
				if PREF_MODE==0: # Middle
					flipvec[:]= v2.co # positive
					flipvec.x= -flipvec.x # negatve
					v2.co= v1.co= (flipvec+v1.co)*0.5 # midway
					v2.co.x= -v2.co.x
				elif PREF_MODE==2: # Left
					v2.co= v1.co
					v2.co.x= -v2.co.x
				elif PREF_MODE==1: # Right
					v1.co= v2.co
					v1.co.x= -v1.co.x
				v1.sel= v2.sel= 0
	
	
	#*Mirror Weights**********************************************************#
	if PREF_MIRROR_WEIGHTS:
		
		groupNames, vWeightDict= BPyMesh.meshWeight2Dict(me)
		mirror_pairs_l2r= [] # Stor a list of matches for these verts.
		mirror_pairs_r2l= [] # Stor a list of matches for these verts.
		
		# allign the negative with the positive.
		flipvec= Mathutils.Vector()
		len_neg_vts= float(len(neg_vts))
		
		# Here we make a tuple to look through, if were middle well need to look through both.
		if PREF_MODE==0: # Middle
			find_set= ((neg_vts, pos_vts, mirror_pairs_l2r), (pos_vts, neg_vts, mirror_pairs_r2l))
		elif PREF_MODE==1: # Left
			find_set= ((neg_vts, pos_vts, mirror_pairs_l2r), )
		elif PREF_MODE==2: # Right
			find_set= ((pos_vts, neg_vts, mirror_pairs_r2l), )
		
		
		# Do a locational lookup again :/ - This isnt that good form but if we havnt mirrored weights well need to do it anyway.
		# The Difference with this is that we dont need to have 1:1 match for each vert- just get each vert to find another mirrored vert
		# and use its weight.
		# Use  "find_set" so we can do a flipped search L>R and R>L without duplicate code.
		for vtls_A, vtls_B, pair_ls  in  find_set:
			for i1, vA in enumerate(vtls_A):
				best_len=1<<30 # BIGNUM
				best_idx=-1
				
				# Find the BEST match
				vA_co= vA.co
				for i2, vB in enumerate(vtls_B):
					# Enforce edge users.
					if not PREF_EDGE_USERS or edge_users[i1]==edge_users[i2]:
						flipvec[:]= vB.co
						flipvec.x= -flipvec.x
						l= (vA_co-flipvec).length
						
						if l<best_len:
							best_len=l
							best_idx=i2
				
				if best_idx != -1:
					pair_ls.append((vtls_A[i1].index, vtls_B[best_idx].index)) # neg, pos.
		
		# Now we can merge the weights
		if PREF_MODE==0: # Middle
			newVWeightDict= [vWeightDict[i] for i in xrange(len(me.verts))] # Have empty dicts just incase
			for pair_ls in (mirror_pairs_l2r, mirror_pairs_r2l):
				if PREF_FLIP_NAMES:
					for i1, i2 in pair_ls:
						flipWeight, groupNames= BPyMesh.dictWeightFlipGroups( vWeightDict[i2], groupNames, PREF_CREATE_FLIP_NAMES )
						newVWeightDict[i1]= BPyMesh.dictWeightMerge([vWeightDict[i1], flipWeight] )
				else:
					for i1, i2 in pair_ls:
						newVWeightDict[i1]= BPyMesh.dictWeightMerge([vWeightDict[i1], vWeightDict[i2]])
			
			vWeightDict= newVWeightDict
		
		elif PREF_MODE==1: # Left
			if PREF_FLIP_NAMES:
				for i1, i2 in mirror_pairs_l2r:
					vWeightDict[i2], groupNames= BPyMesh.dictWeightFlipGroups(vWeightDict[i1], groupNames, PREF_CREATE_FLIP_NAMES)
			else:
				for i1, i2 in mirror_pairs_l2r:
					vWeightDict[i2]= vWeightDict[i1] # Warning Multiple instances of the same data, its ok in this case but dont modify later.
			
		elif PREF_MODE==2: # Right
			if PREF_FLIP_NAMES:
				for i1, i2 in mirror_pairs_r2l:
					vWeightDict[i2], groupNames= BPyMesh.dictWeightFlipGroups(vWeightDict[i1], groupNames, PREF_CREATE_FLIP_NAMES)
			else:
				for i1, i2 in mirror_pairs_r2l:
					vWeightDict[i2]= vWeightDict[i1] # Warning, ditto above
		
		BPyMesh.dict2MeshWeight(me, groupNames, vWeightDict)
	
	me.update()