コード例 #1
0
ファイル: skinDat.py プロジェクト: Italic-/maya-prefs
 def _refineProcessedData(self):
     """
     Push the processed data to dicts for easier calling...
     """
     #self._d_jointToWeighting = {jIdx:{vIDX:v}}
     #self._d_vertToWeighting = {vIdx:{jIdx:v...}} 
     if not self.l_jointsToUse:raise ValueError,"No joints to use found"
     
     #...normalize data
     _l_cleanData = []
     #{{index:value, index:value}}
     
     for i,_bfr_raw in enumerate(self._l_processed):#...for each vert
         _bfr_toNormalize = []
         _bfr_clean = {}
         _d_normalized = {}
         
         for k,value in _bfr_raw.iteritems():
             _bfr_clean[int(k)] = float(value)
             _d_normalized[int(k)] = float(value)
             
         #normalize the values...
         for k,value in _bfr_clean.iteritems():
             _bfr_toNormalize.append(value)
             
         _bfr_normalized = cgmMath.normSumList(_bfr_toNormalize,1.0)
         #self.log_info("To Normalize: {0}".format(_bfr_toNormalize))                
         #self.log_info("Normalized: {0}".format(_bfr_normalized))
         #self.log_info("{0} pre sum: {1}".format(i,sum(_bfr_toNormalize)))                
         #self.log_info("{0} sum: {1}".format(i,sum(_bfr_normalized)))
         
         """
         if _bfr_normalized != _bfr_toNormalize:
             self.log_info("{0} normalized".format(i))
             self.log_info("{0} toNorm: {1}".format(i,_bfr_toNormalize))                
             self.log_info("{0} norm:  {1}".format(i,_bfr_normalized)) """
             
         for ii,k in enumerate(_d_normalized.keys()):
             _d_normalized[k] = _bfr_normalized[ii]
         #self.log_info("{0} clean: {1}".format(i,_bfr_clean))                
         #self.log_info("{0} norm:  {1}".format(i,_d_normalized))                
             
         if not cgmMath.isFloatEquivalent(1.0, sum(_bfr_normalized) ):
             self.log_info("vert {0} not normalized".format(i))
         #self.log_info("vert {0} base: {1}".format(i,_bfr_toNormalize))
         #self.log_info("vert {0} norm: {1}".format(i,_bfr_normalized))
         _l_cleanData.append(_d_normalized)
         #if i == 3:return self._FailBreak_("stop")
     self._l_processed = _l_cleanData#...initial push data  
     
     #            
     for i in range(len(self.l_jointsToUse)):
         self._d_jointToWeighting[i] = {}
     
     for i,d_pair in enumerate(self._l_processed):
         self._d_vertToWeighting[i] = d_pair
         
         for j_idx in d_pair.keys():
             self._d_jointToWeighting[j_idx][i] = d_pair[j_idx] 
コード例 #2
0
ファイル: skinDat.py プロジェクト: liudger/cgmTools
 def _refineProcessedData(self):
     """
     Push the processed data to dicts for easier calling...
     """
     #self._d_jointToWeighting = {jIdx:{vIDX:v}}
     #self._d_vertToWeighting = {vIdx:{jIdx:v...}} 
     if not self.l_jointsToUse:raise ValueError,"No joints to use found"
     
     #...normalize data
     _l_cleanData = []
     #{{index:value, index:value}}
     
     for i,_bfr_raw in enumerate(self._l_processed):#...for each vert
         _bfr_toNormalize = []
         _bfr_clean = {}
         _d_normalized = {}
         
         for k,value in _bfr_raw.iteritems():
             _bfr_clean[int(k)] = float(value)
             _d_normalized[int(k)] = float(value)
             
         #normalize the values...
         for k,value in _bfr_clean.iteritems():
             _bfr_toNormalize.append(value)
             
         _bfr_normalized = cgmMath.normSumList(_bfr_toNormalize,1.0)
         #self.log_info("To Normalize: {0}".format(_bfr_toNormalize))                
         #self.log_info("Normalized: {0}".format(_bfr_normalized))
         #self.log_info("{0} pre sum: {1}".format(i,sum(_bfr_toNormalize)))                
         #self.log_info("{0} sum: {1}".format(i,sum(_bfr_normalized)))
         
         """
         if _bfr_normalized != _bfr_toNormalize:
             self.log_info("{0} normalized".format(i))
             self.log_info("{0} toNorm: {1}".format(i,_bfr_toNormalize))                
             self.log_info("{0} norm:  {1}".format(i,_bfr_normalized)) """
             
         for ii,k in enumerate(_d_normalized.keys()):
             _d_normalized[k] = _bfr_normalized[ii]
         #self.log_info("{0} clean: {1}".format(i,_bfr_clean))                
         #self.log_info("{0} norm:  {1}".format(i,_d_normalized))                
             
         if not cgmMath.isFloatEquivalent(1.0, sum(_bfr_normalized) ):
             self.log_info("vert {0} not normalized".format(i))
         #self.log_info("vert {0} base: {1}".format(i,_bfr_toNormalize))
         #self.log_info("vert {0} norm: {1}".format(i,_bfr_normalized))
         _l_cleanData.append(_d_normalized)
         #if i == 3:return self._FailBreak_("stop")
     self._l_processed = _l_cleanData#...initial push data  
     
     #            
     for i in range(len(self.l_jointsToUse)):
         self._d_jointToWeighting[i] = {}
     
     for i,d_pair in enumerate(self._l_processed):
         self._d_vertToWeighting[i] = d_pair
         
         for j_idx in d_pair.keys():
             self._d_jointToWeighting[j_idx][i] = d_pair[j_idx] 
コード例 #3
0
ファイル: skinDat.py プロジェクト: Italic-/maya-prefs
        def _fnc_processData(self):
            '''
            Sort out the components
            '''            
            #...check if our vtx counts match...
            self.log_toDo("Remap dictionary argument")
            self.log_toDo("Non matching mesh types")   
            self.mData.d_target = data.validateMeshArg(self.mData.d_target['mesh'])#...update
            
            _int_sourceCnt = int(self.mData.d_source['pointCount'])
            _int_targetCnt = int(self.mData.d_target['pointCount'])
            _type_source = self.mData.d_source['meshType']
            _type_target = self.mData.d_target['meshType']
            _target = self.mData.d_target['mesh']
            _component = self.mData.d_target['component']
            self.log_infoDict(self.mData.d_target,'target dict...')
            
            #if int(_int_sourceCnt) != int(_int_targetCnt):
                #return self._FailBreak_("Haven't implemented non matching component counts | source: {0} | target: {1}".format(_int_sourceCnt,_int_targetCnt))              
            if not _type_source == _type_target:
                return self._FailBreak_("Haven't implemented non matching mesh types | source: {0} | target: {1}".format(_type_source,_type_target))              
            
            #...generate a processed list...
            #[[jntIdx,v],[jntIdx,v]....] -- the count in the list is the vert count
            _raw_componentWeights = self.mData.d_sourceInfluences['componentWeights']
            _raw_blendweights = self.mData.d_sourceInfluences['blendWeights']
            
            _l_cleanData = []
            
            #...First loop is to only initially clean the data...
            for i in range(_int_sourceCnt):#...for each vert
                _str_i = str(i)
                _subL = []
                
                _bfr_raw = _raw_componentWeights[_str_i]
                
                _bfr_toNormalize = []
                _bfr_clean = {}
                _d_normalized = {}
                for k,value in _bfr_raw.iteritems():
                    _bfr_clean[int(k)] = float(value)
                    _d_normalized[int(k)] = None
                    
                #normalize the values...
                for k,value in _bfr_clean.iteritems():
                    _bfr_toNormalize.append(value)
                    
                _bfr_normalized = cgmMath.normSumList(_bfr_toNormalize,1.0)
                #self.log_info("To Normalize: {0}".format(_bfr_toNormalize))                
                #self.log_info("Normalized: {0}".format(_bfr_normalized))
                #self.log_info("{0} pre sum: {1}".format(i,sum(_bfr_toNormalize)))                
                #self.log_info("{0} sum: {1}".format(i,sum(_bfr_normalized)))
                
                for ii,k in enumerate(_d_normalized.keys()):
                    _d_normalized[k] = _bfr_normalized[ii]
                #self.log_info("clean: {0}".format(_bfr_clean))                
                #self.log_info("norm:  {0}".format(_d_normalized))                
                    
                if not cgmMath.isFloatEquivalent(1.0, sum(_bfr_normalized) ):
                    self.log_info("vert {0} not normalized".format(i))
                #self.log_info("vert {0} base: {1}".format(i,_bfr_toNormalize))
                #self.log_info("vert {0} norm: {1}".format(i,_bfr_normalized))
                _l_cleanData.append(_d_normalized)
                #if i == 3:return self._FailBreak_("stop")
            self._l_processed = _l_cleanData#...initial push data
            
            
            #...nameMatch ------------------------------------------------------------------------
            if self._b_nameMatch:
                self.log_info("nameMatch attempt...")
                _l_configInfluenceList = self.l_configInfluenceList
                _l_jointsToUseBaseNames = [names.getBaseName(n) for n in self.l_jointsToUse]
                
                for n in _l_jointsToUseBaseNames:#...see if all our names are there
                    if not n in _l_configInfluenceList:
                        #return self._FailBreak_
                        self.log_warning("nameMatch... joint '{0}' from joints to use list not in config list".format(n))                        
                        #self._FailBreak_("nameMatch... joint '{0}' from joints to use list not in config list".format(n))              
                        #return False
                        #return False
                    
                _d_rewire = {}       
                
                for i,n in enumerate(_l_configInfluenceList):
                    _idx_base = _l_jointsToUseBaseNames.index(n)
                    
                    #self.log_error("Rewire. Name:{0} | config idx:{1} ===> currentIdx: {2}".format(n,_idx_config,i))
                    _d_rewire[i] = _idx_base
                    
                """
                for i,n in enumerate(_l_configInfluenceList):
                    if _l_jointsToUseBaseNames[i] != n:
                        self.log_error("Name mismatch. idx:{0} | config:{1} | useJoint:{2}".format(i,n,_l_jointsToUseBaseNames[i]))
                        
                        #_d_rewire[i] = _l_configInfluenceList.index(_l_jointsToUseBaseNames[i])
                        _d_rewire[i] = _l_configInfluenceList.index(_l_jointsToUseBaseNames[_l_jointsToUseBaseNames.index(n)])
                        """
                self.log_infoDict(_d_rewire,"Rewire...")
                for i,d in enumerate(self._l_processed):
                    _d_dup = copy.copy(d)
                    #self.log_info("{0} before remap: {1}".format(i,d))                    
                    for r1,r2 in _d_rewire.iteritems():#...{1:2, 2:1}
                        if r1 in _d_dup.keys():#...1,2
                            if r2 in _d_dup.keys():
                                _bfr1 = _d_dup[r1]
                                _bfr2 = _d_dup[r2]
                                d[r1] = _bfr2
                                d[r2] = _bfr1
                            else:
                                d[r2] = d.pop(r1)
                    #self.log_info("{0} after remap: {1}".format(i,d))

                    
            if int(_int_sourceCnt) != int(_int_targetCnt) or self._b_forceClosestComponent:
                try:#closest to remap ------------------------------------------------------------------------
                    self.log_warning("Non matching component counts. Using closestTo method to remap")
                    _l_closestRetarget = []
                    #...generate a posList of the source data
                    l_source_pos = []
                    _d_pos = self.mData.d_source['d_vertPositions']
                    for i in range(_int_sourceCnt):
                        l_source_pos.append([float(v) for v in _d_pos[str(i)]])#...turn our strings to values
                       
                    self.progressBar_start(stepMaxValue=_int_targetCnt, 
                                           statusMessage='Calculating....', 
                                           interruptableState=False)  
                    
                    for i in range(_int_targetCnt):
                        _str_vert = "{0}.{1}[{2}]".format(_target,_component,i)
                        self.progressBar_iter(status = "Finding closest to '{0}'".format(_str_vert))                                        
                        
                        #self.log_info(_str_vert)
                        _pos = distance.returnWorldSpacePosition(_str_vert)#...get position       
                        _closestPos = distance.returnClosestPoint(_pos, l_source_pos)#....get closest
                        _closestIdx = l_source_pos.index(_closestPos)
                        #self.log_info("target idx: {0} | Closest idx: {1} | value{2}".format(i,_closestIdx,_l_cleanData[_closestIdx]))
                        _l_closestRetarget.append(_l_cleanData[_closestIdx])
                    self.progressBar_end()
                        
                    self._l_processed = _l_closestRetarget#...push it backs
                    self._b_smooth = True
                    
                    if _int_targetCnt >= _int_sourceCnt:
                        self._f_smoothWeightsValue = .00005
                    else:
                        self._f_smoothWeightsValue = .5
                        
                    self.log_info("closestTo remap complete...")
                except Exception,error:
                    raise Exception,"closestTo remap failure | {0}".format(error)
コード例 #4
0
#mode 2
attributes.doSetAttr('multiplyDivide1','input2X',2)
attributes.doSetAttr('multiplyDivide2','input2X',2)


'multiplyDivide1'
#Working through zero equivalency 
'%f'%(-4.11241646134e-07)
round(4.11241646134e-07,1)
round(f1,places)
round(.005,3)
number = .2
number = scientific
number = .000065183
number = 0.0
for n in [1,2,3,4,5,6,7,8,9]:
    log.info(round(number,n))

from cgm.lib import cgmMath
reload(cgmMath)
cgmMath.test_isFloatEquivalent()
cgmMath.isFloatEquivalent(-4.11241646134e-07,0.0)
cgmMath.isFloatEquivalent(0,0.0)
cgmMath.isFloatEquivalent(-4.11241646134e-07,.00001)
round(-0.00000000)
scientific = -4.11241646134e-07
round(scientific)
type(scientific)
mc.xform ('spine_1_1_surfaceJoint', q=True, os=True, ro=True)
assert isFloatEquivalent(-4.11241646134e-07,0.0)
コード例 #5
0
i_rig.buildModule.build_rigSkeleton(i_rig)
i_rig.buildModule.build_shapes(i_rig)
i_rig.buildModule.build_controls(i_rig)
i_rig.buildModule.build_FKIK(i_rig)
i_rig.buildModule.build_deformation(i_rig)
i_rig.buildModule.build_rig(i_rig)
i_rig.buildModule.__build__(i_rig)
from cgm.lib import distance
l_constrainTargetJoints = [u'l_left_index_1_blend_jnt', u'l_left_index_2_blend_jnt', u'l_left_index_3_blend_jnt', u'l_left_index_4_blend_jnt', u'l_left_index_5_blend_jnt']
distance.returnClosestObject('l_left_index_1_rig_jnt',l_constrainTargetJoints)
m1.rigNull.getMessage('blendJoints',False)
m1.moduleParent.rigNull.rigJoints[-1]
i_rig.buildModule.build_matchSystem(i_rig)
reload(Rig)
from cgm.lib import cgmMath
cgmMath.isFloatEquivalent(0.002,0,2)
rUtils.matchValue_iterator(drivenAttr='l_left_index_2_ik_jnt.rz',driverAttr='left_index_noFlip_ikH.twist',minIn = -179, maxIn = 179, maxIterations = 5,matchValue=0)
cgmMeta.cgmObject('l_ankle_ik_anim').scalePivotY = 0
i_rig._i_deformNull.controlsIK

ml_ikJoints = m1.rigNull.ikJoints
ml_fkJoints = m1.rigNull.fkJoints
ml_blendJoints = m1.rigNull.blendJoints
mi_settings = m1.rigNull.settings

#Queries	
m1.isSized()
m1.setState('skeleton',forceNew=True)
m1.skeletonDelete()
m1.doRig()
コード例 #6
0
ファイル: skinDat.py プロジェクト: liudger/cgmTools
        def _fnc_processData(self):
            '''
            Sort out the components
            '''            
            #...check if our vtx counts match...
            self.log_toDo("Remap dictionary argument")
            self.log_toDo("Non matching mesh types")   
            self.mData.d_target = data.validateMeshArg(self.mData.d_target['mesh'])#...update
            
            _int_sourceCnt = int(self.mData.d_source['pointCount'])
            _int_targetCnt = int(self.mData.d_target['pointCount'])
            _type_source = self.mData.d_source['meshType']
            _type_target = self.mData.d_target['meshType']
            _target = self.mData.d_target['mesh']
            _component = self.mData.d_target['component']
            self.log_infoDict(self.mData.d_target,'target dict...')
            
            #if int(_int_sourceCnt) != int(_int_targetCnt):
                #return self._FailBreak_("Haven't implemented non matching component counts | source: {0} | target: {1}".format(_int_sourceCnt,_int_targetCnt))              
            if not _type_source == _type_target:
                return self._FailBreak_("Haven't implemented non matching mesh types | source: {0} | target: {1}".format(_type_source,_type_target))              
            
            #...generate a processed list...
            #[[jntIdx,v],[jntIdx,v]....] -- the count in the list is the vert count
            _raw_componentWeights = self.mData.d_sourceInfluences['componentWeights']
            _raw_blendweights = self.mData.d_sourceInfluences['blendWeights']
            
            _l_cleanData = []
            
            #...First loop is to only initially clean the data...
            for i in range(_int_sourceCnt):#...for each vert
                _str_i = str(i)
                _subL = []
                
                _bfr_raw = _raw_componentWeights[_str_i]
                
                _bfr_toNormalize = []
                _bfr_clean = {}
                _d_normalized = {}
                for k,value in _bfr_raw.iteritems():
                    _bfr_clean[int(k)] = float(value)
                    _d_normalized[int(k)] = None
                    
                #normalize the values...
                for k,value in _bfr_clean.iteritems():
                    _bfr_toNormalize.append(value)
                    
                _bfr_normalized = cgmMath.normSumList(_bfr_toNormalize,1.0)
                #self.log_info("To Normalize: {0}".format(_bfr_toNormalize))                
                #self.log_info("Normalized: {0}".format(_bfr_normalized))
                #self.log_info("{0} pre sum: {1}".format(i,sum(_bfr_toNormalize)))                
                #self.log_info("{0} sum: {1}".format(i,sum(_bfr_normalized)))
                
                for ii,k in enumerate(_d_normalized.keys()):
                    _d_normalized[k] = _bfr_normalized[ii]
                #self.log_info("clean: {0}".format(_bfr_clean))                
                #self.log_info("norm:  {0}".format(_d_normalized))                
                    
                if not cgmMath.isFloatEquivalent(1.0, sum(_bfr_normalized) ):
                    self.log_info("vert {0} not normalized".format(i))
                #self.log_info("vert {0} base: {1}".format(i,_bfr_toNormalize))
                #self.log_info("vert {0} norm: {1}".format(i,_bfr_normalized))
                _l_cleanData.append(_d_normalized)
                #if i == 3:return self._FailBreak_("stop")
            self._l_processed = _l_cleanData#...initial push data
            
            
            #...nameMatch ------------------------------------------------------------------------
            if self._b_nameMatch:
                self.log_info("nameMatch attempt...")
                _l_configInfluenceList = self.l_configInfluenceList
                _l_jointsToUseBaseNames = [names.getBaseName(n) for n in self.l_jointsToUse]
                
                for n in _l_jointsToUseBaseNames:#...see if all our names are there
                    if not n in _l_configInfluenceList:
                        #return self._FailBreak_
                        self.log_warning("nameMatch... joint '{0}' from joints to use list not in config list".format(n))                        
                        #self._FailBreak_("nameMatch... joint '{0}' from joints to use list not in config list".format(n))              
                        #return False
                        #return False
                    
                _d_rewire = {}       
                
                for i,n in enumerate(_l_configInfluenceList):
                    _idx_base = _l_jointsToUseBaseNames.index(n)
                    #self.log_error("Rewire. Name:{0} | config idx:{1} ===> currentIdx: {2}".format(n,_idx_config,i))
                    _d_rewire[i] = _idx_base
                    
                """
                for i,n in enumerate(_l_configInfluenceList):
                    if _l_jointsToUseBaseNames[i] != n:
                        self.log_error("Name mismatch. idx:{0} | config:{1} | useJoint:{2}".format(i,n,_l_jointsToUseBaseNames[i]))
                        
                        #_d_rewire[i] = _l_configInfluenceList.index(_l_jointsToUseBaseNames[i])
                        _d_rewire[i] = _l_configInfluenceList.index(_l_jointsToUseBaseNames[_l_jointsToUseBaseNames.index(n)])
                        """
                self.log_infoDict(_d_rewire,"Rewire...")
                for i,d in enumerate(self._l_processed):
                    _d_dup = copy.copy(d)
                    #self.log_info("{0} before remap: {1}".format(i,d))                    
                    for r1,r2 in _d_rewire.iteritems():#...{1:2, 2:1}
                        if r1 in _d_dup.keys():#...1,2
                            if r2 in _d_dup.keys():
                                _bfr1 = _d_dup[r1]
                                _bfr2 = _d_dup[r2]
                                d[r1] = _bfr2
                                d[r2] = _bfr1
                            else:
                                d[r2] = d.pop(r1)
                    #self.log_info("{0} after remap: {1}".format(i,d))

                    
            if int(_int_sourceCnt) != int(_int_targetCnt) or self._b_forceClosestComponent:
                try:#closest to remap ------------------------------------------------------------------------
                    self.log_warning("Non matching component counts. Using closestTo method to remap")
                    _l_closestRetarget = []
                    #...generate a posList of the source data
                    l_source_pos = []
                    _d_pos = self.mData.d_source['d_vertPositions']
                    for i in range(_int_sourceCnt):
                        l_source_pos.append([float(v) for v in _d_pos[str(i)]])#...turn our strings to values
                       
                    self.progressBar_start(stepMaxValue=_int_targetCnt, 
                                           statusMessage='Calculating....', 
                                           interruptableState=False)  
                    
                    for i in range(_int_targetCnt):
                        _str_vert = "{0}.{1}[{2}]".format(_target,_component,i)
                        self.progressBar_iter(status = "Finding closest to '{0}'".format(_str_vert))                                        
                        
                        #self.log_info(_str_vert)
                        _pos = distance.returnWorldSpacePosition(_str_vert)#...get position       
                        _closestPos = distance.returnClosestPoint(_pos, l_source_pos)#....get closest
                        _closestIdx = l_source_pos.index(_closestPos)
                        #self.log_info("target idx: {0} | Closest idx: {1} | value{2}".format(i,_closestIdx,_l_cleanData[_closestIdx]))
                        _l_closestRetarget.append(_l_cleanData[_closestIdx])
                    self.progressBar_end()
                        
                    self._l_processed = _l_closestRetarget#...push it backs
                    self._b_smooth = True
                    
                    if _int_targetCnt >= _int_sourceCnt:
                        self._f_smoothWeightsValue = .00005
                    else:
                        self._f_smoothWeightsValue = .5
                        
                    self.log_info("closestTo remap complete...")
                except Exception,error:
                    raise Exception,"closestTo remap failure | {0}".format(error)
コード例 #7
0
ファイル: curve_Utils.py プロジェクト: GuidoPollini/MuTools
	def getCurveMirrorData(self,mi_crv):
	    d_return = {}
	    d_return['f_bbMin'] = mi_crv.boundingBoxMin[self.int_across]
	    d_return['f_bbMax'] = mi_crv.boundingBoxMax[self.int_across]
	    d_return['b_oneSided'] = False
	    d_return['b_balanced'] = False
	    d_return['b_weighted'] = None	    
	    	    
	    #> First see our push direction ----------------------------------------------------------
	    try:
		if cgmMath.isFloatEquivalent( d_return['f_bbMax'], d_return['f_bbMin']):
		    d_return['b_balanced'] = 1
		    
		if d_return['f_bbMax'] > d_return['f_bbMin']:
		    d_return['b_weighted'] = 1
		elif d_return['f_bbMax'] < d_return['f_bbMin']:
		    d_return['b_weighted'] = -1
	    except Exception,error:raise StandardError,"Push direction check | %s"%error

	    #> Check thresholds ----------------------------------------------------------------------
	    try:
		if -d_return['f_bbMin'] <= self.f_threshold and d_return['f_bbMax'] >= self.f_threshold or d_return['f_bbMin'] <= -self.f_threshold and d_return['f_bbMax'] <= -self.f_threshold:
		    d_return['b_oneSided'] = True		
		"""if abs(d_return['f_bbMin']) <= self.f_threshold or abs(d_return['f_bbMax']) <= self.f_threshold:
		    d_return['b_oneSided'] = True"""
	    except Exception,error:raise StandardError,"Threshholds check | %s"%error
	
	    #> Is ep --------------------------------------------------------------------
	    try:
		d_return['b_epState'] = isEP(mi_crv)
	    except Exception,error:raise StandardError,"ep check | %s"%error
	    
	    #> Get positions -------------------------------------------------------------------------
	    try:
		l_cvs = mi_crv.getComponents('cv')	    	    		
		l_cvPos = []
		l_epPos = []
		if d_return['b_epState']:
		    for ep in  mi_crv.getComponents('ep'):
			pos = mc.pointPosition(ep,w=True)
			l_epPos.append( pos )	
		    for cv in l_cvs:
			l_cvPos.append( mc.pointPosition(cv,w=True) )	
		else:
		    for cv in l_cvs:
			l_cvPos.append( mc.pointPosition(cv,w=True) )	 
			#Get an ep value
			locatorName = locators.locMeObject(cv)
			pos = distance.returnClosestUPosition(locatorName,mi_crv.mNode)
			mc.delete(locatorName)
			l_epPos.append( pos )	 
			
		d_return['l_cvPos'] = l_cvPos
		d_return['l_epPos'] = l_epPos	    
		d_return['l_cvs'] = l_cvs
	    except Exception,error:raise StandardError,"Get positions | %s"%error
	     
	    #> Is even --------------------------------------------------------------------
	    try:
		if len(l_cvs)%2==0:#even
		    d_return['b_even'] = True
		else: d_return['b_even'] = False
	    except Exception,error:"Even check | %s"%error
	    
	    #> Which end is bigger
	    try:
		if abs(l_cvPos[0][self.int_across]) <= self.f_threshold:
		    d_return['b_startInThreshold'] = True
		else:d_return['b_startInThreshold'] = False
		if abs(l_cvPos[-1][self.int_across]) <= self.f_threshold:
		    d_return['b_endInThreshold'] = True
		else:d_return['b_endInThreshold'] = False
	    except Exception,error:raise StandardError,"End check | %s"%error
	    
	    return d_return
コード例 #8
0
from cgm.core import cgm_Meta as cgmMeta
from cgm.core import cgm_PuppetMeta as cgmPM
import Red9.core.Red9_Meta as r9Meta
import cgm.core
cgm.core._reload()
import maya.cmds as mc

from cgm.lib import locators
from cgm.lib import distance
reload(distance)
from cgm.core.classes import NodeFactory as NodeF
reload(NodeF)
from cgm.lib import cgmMath
reload(cgmMath)
cgmMath.isFloatEquivalent(3,3.0)
obj = mc.ls(sl=True)[0] or False
obj = ''
objList = []
objList = mc.ls(sl=True)
cgmMeta.cgmObject(obj).createTransformFromObj()


#>>> connect_controlWiring
#=======================================================
reload(NodeF)
_obj = 'face_attrHolder'

_wiringDict = {'mouth_up':{'driverAttr':'ty'},'mouth_dn':{'driverAttr':'-ty'},'mouth_right':{'driverAttr':'-tx'}}

_wiringDict = {'mouth_up':{'driverAttr':'ty','driverAttr2':'tx','mode':'cornerBlend'}}