def undoIt(self):
     '''
     Build up the undo command data
     '''
     sys.stdout.write("evalManager_switch.undoIt : mode=%s\n" %
                      self.undostate)
     cmds.evaluationManager(mode=self.undostate)
Beispiel #2
0
    def wrap(*args, **kwargs):

        parallel = False
        if 'parallel' in cmds.evaluationManager(q=True, mode=True):
            cmds.evaluationManager(mode='off')
            parallel = True
            print "Turning off Parallel evaluation..."
        # Turn $gMainPane Off:
        mel.eval("paneLayout -e -manage false $gMainPane")
        cmds.refresh(suspend=True)
        # Hide the timeslider
        mel.eval("setTimeSliderVisible 0;")

        # Decorator will try/except running the function.
        # But it will always turn on the viewport at the end.
        # In case the function failed, it will prevent leaving maya viewport off.
        try:
            return func(*args, **kwargs)
        except Exception:
            raise  # will raise original error
        finally:
            cmds.refresh(suspend=False)
            mel.eval("setTimeSliderVisible 1;")
            if parallel:
                cmds.evaluationManager(mode='parallel')
                print "Turning on Parallel evaluation..."
            mel.eval("paneLayout -e -manage true $gMainPane")
            cmds.refresh()
Beispiel #3
0
    def prefFunction(self, pref, obj, *args):

        if pref == 'evaluation':
            cmds.evaluationManager(mode=obj)
            print 'Evaluation Mode: %s' % cmds.evaluationManager(q=True,
                                                                 mode=True)[0]

        elif pref == 'default tangent':
            cmds.keyTangent(g=True, itt=obj)
            cmds.keyTangent(g=True, ott=obj)
            print 'Default Tangents: %s, %s' % (cmds.keyTangent(
                q=True, g=True,
                itt=True)[0], cmds.keyTangent(q=True, g=True, ott=True)[0])

        elif pref == 'frames per second':
            # Keep Keys at Current Frames
            # cmds.currentUnit(time='', ua=True)

            cmds.currentUnit(t=obj)
            print 'Frames Per Second: %s' % cmds.currentUnit(
                q=True,
                t=True,
                ua=cmds.menuItem(self.keepFrames, q=True, checkBox=True))

        elif pref == 'playback speed':
            cmds.playbackOptions(ps=obj)
            print 'Playback Speed: %s' % cmds.playbackOptions(q=True, ps=True)

        elif pref == 'up axis':
            cmds.upAxis(ax=obj)
            print 'Up Axis: %s' % cmds.upAxis(q=True, ax=True)

        elif pref == 'working units':
            cmds.currentUnit(l=obj)
            print 'Working Units: %s' % cmds.currentUnit(q=True, l=True)
Beispiel #4
0
def temp_DG_evaluation():
    initial_mode = mc.evaluationManager(query=True, mode=True)[0]
    mc.evaluationManager(mode='off')
    try:
        yield None
    finally:
        mc.evaluationManager(mode=initial_mode)
Beispiel #5
0
 def rebuild(include_scheduling=False):
     '''
     Invalidate the EM and rebuild it.
     '''
     cmds.evaluationManager(invalidate=True)
     if include_scheduling:
         # Need to set the time to force the scheduling graph to rebuild too
         cmds.currentTime(cmds.currentTime(query=True))
Beispiel #6
0
	def evalCallback(self, *args):
		value = args[0]
		if value == Component.dg:
			value = 'off'
		cmds.evaluationManager(mode=value)
		self.evaluation = value
		print 'Evaluation Mode: Set to {}.'.format(cmds.evaluationManager(q=True, mode=True)[0])
		savePrefs()
		return
Beispiel #7
0
def toggle_parallel_evaluation():
    current_mode = mc.evaluationManager(query=True, mode=True)[0]
    if current_mode == 'off':
        new_name = new_mode = 'parallel'
    else:
        new_mode = 'off'
        new_name = 'DG'
    mc.warning('Switched evaluation manager to "%s"' % new_name.upper())
    mc.evaluationManager(mode=new_mode)
Beispiel #8
0
def toggleAnimEval_2 ():

	mode = cmds.evaluationManager (q = 1, mode = 1)[0]
	if mode == 'off':
		cmds.evaluationManager (e = 1, mode = 'parallel')
		rslt = 'Parallel'
	elif mode == 'parallel' or mode == 'serial':
		cmds.evaluationManager (e = 1, mode = 'off')
		rslt = 'DG'
	return rslt
 def doIt(self, args):
     '''
     simple switch of the evalManager JUST to get it registered in the undo stack
     '''
     argData = OpenMaya.MArgDatabase(self.syntax(), args)
     if argData.isFlagSet(self.kModeFlag):
         mode=argData.flagArgumentString(self.kModeFlag, 0)
     cmds.evaluationManager(mode=mode)
     sys.stdout.write("evalManager_switch.doIt : setting mode=%s\n" % mode)
     OpenMayaMPx.MPxCommand.clearResult()
     OpenMayaMPx.MPxCommand.setResult(self.undostate)
 def doIt(self, args):
     '''
     simple switch of the evalManager JUST to get it registered in the undo stack
     '''
     argData = OpenMaya.MArgDatabase(self.syntax(), args)
     if argData.isFlagSet(self.kModeFlag):
         mode = argData.flagArgumentString(self.kModeFlag, 0)
     cmds.evaluationManager(mode=mode)
     sys.stdout.write("evalManager_switch.doIt : setting mode=%s\n" % mode)
     OpenMayaMPx.MPxCommand.clearResult()
     OpenMayaMPx.MPxCommand.setResult(self.undostate)
Beispiel #11
0
 def verifyScopeSetup(self):
     '''
     Meta-test to check that the scope was defined correctly
     :param unit_test: The test object from which this method was called
     '''
     self.unit_test.assertTrue( cmds.evaluationManager( mode=True, query=True )[0] == 'parallel' )
     if cmds.pluginInfo('cacheEvaluator', loaded=True, query=True):
         self.unit_test.assertFalse( cmds.evaluator( query=True, en=True, name='cache' ) )
Beispiel #12
0
def evaluation(mode="off"):
    """Set the evaluation manager during context.

    Arguments:
        mode (str): The mode to apply during context.
            "off": The standard DG evaluation (stable)
            "serial": A serial DG evaluation
            "parallel": The Maya 2016+ parallel evaluation

    """

    original = cmds.evaluationManager(query=True, mode=1)[0]
    try:
        cmds.evaluationManager(mode=mode)
        yield
    finally:
        cmds.evaluationManager(mode=original)
Beispiel #13
0
	def setDefaults(self):
		# Anim Layers
		cmds.timeControl(mayaUI.timeControl, e=True,
						 animLayerFilterOptions='selected')
		mel.eval('outlinerEditor -edit -animLayerFilterOptions selected graphEditor1OutlineEd;')

		# Buffer Curves
		mel.eval('animCurveEditor -edit -showBufferCurves true graphEditor1GraphEd;')

		# Auto Frame
		mel.eval('animCurveEditor -edit -autoFit true graphEditor1GraphEd;optionVar -intValue graphEditorAutoFit true;')

		# Evaluation
		cmds.evaluationManager(mode=Component.parallel)

		mel.eval('generateAllUvTilePreviews;')

		return
Beispiel #14
0
def evalManagerState(mode='off'):
    '''
    wrapper function for the evalManager so that it's switching is recorded in
    the undo stack via the Red9.evalManager_switch plugin
    '''
    if r9Setup.mayaVersion() >= 2016:
        if not cmds.pluginInfo('evalManager_switch', q=True, loaded=True):
            try:
                cmds.loadPlugin('evalManager_switch')
            except:
                log.warning('Plugin Failed to load : evalManager_switch')
        try:
            # via the plug-in to register the switch to the undoStack
            cmds.evalManager_switch(mode=mode)
        except:
            log.debug('evalManager_switch plugin not found, running native Maya evalManager command')
            cmds.evaluationManager(mode=mode)  # run the default maya call instead
        log.debug('EvalManager - switching state : %s' % mode)
    else:
        log.debug("evalManager skipped as you're in an older version of Maya")
def evalManagerState(mode='off'):
    '''
    wrapper function for the evalManager so that it's switching is recorded in 
    the undo stack via the Red9.evalManager_switch plugin
    '''
    if r9Setup.mayaVersion()>=2016:
        if not cmds.pluginInfo('evalManager_switch', q=True, loaded=True):
            try:
                cmds.loadPlugin('evalManager_switch')
            except:
                log.warning('Plugin Failed to load : evalManager_switch')
        try:
            # via the plug-in to register the switch to the undoStack
            cmds.evalManager_switch(mode=mode)
        except:
            log.debug('evalManager_switch plugin not found, running native Maya evalManager command')
            cmds.evaluationManager(mode=mode)  # run the default maya call instead
        log.debug('EvalManager - switching state : %s' % mode)
    else:
        log.debug("evalManager skipped as you're in an older version of Maya")
Beispiel #16
0
    def __init__(self):
        self.bo = [[-1] * h for i in range(w)]  #screen borad
        self.preBo = [[-2] * h for i in range(w)]  #off screen borad
        self.playerPos = list(spownPos)
        self.playerMino = [[None, None]] * 3
        self.playerMinoNum = 0
        self.minoRand = self.createMinoRand()
        self.timer = 0
        self.timerLevel = 20
        self.isContact = False
        self.gameOverCount = 100
        self.rec = {}
        self.recLen = 0

        cmds.undoInfo(st=False)
        cmds.currentUnit(t='ntsc')
        cmds.playbackOptions(e=1, playbackSpeed=0, maxPlaybackSpeed=1)
        cmds.evaluationManager(mode='off')
        #cmds.evaluationManager(inv=True)
        cmds.playbackOptions(min=1, max=5000)
        self.createObjs()
Beispiel #17
0
	def __init__(self):
		self.evaluation = cmds.evaluationManager(q=True, mode=True)[0]
		self.tearOffScriptJob = None
		self.timelineScriptJob = None

		self.ui = cmds.menuBarLayout()

		cmds.menu(l='Scene')
		cmds.menuItem(d=True, dl='On New Scene')
		cmds.menuItem(l='Tear Off Copy', cb=True, c=self.tearOffCallback)
		cmds.menuItem(l='Set Timeline (1001-1200)', cb=True, c=self.timelineCallback)
		# cmds.menuItem(l='Playback: Every Frame', cb=True)

		cmds.menu(l='Evaluation')
		cmds.radioMenuItemCollection()
		cmds.menuItem(l='DG', rb=False, c=lambda *_: self.evalCallback(Component.dg), enable=False)
		cmds.menuItem(l='Serial', rb=False, c=lambda *_: self.evalCallback(Component.serial), enable=False)
		cmds.menuItem(l='Parallel', rb=True, c=lambda *_: self.evalCallback(Component.parallel))
		cmds.menuItem(d=True)
		cmds.menuItem(l='Print Debug Info', c=self.debugEvaluation)

		cmds.menu(l='Keys')
		cmds.menuItem(l='Delete Redundant', c=anim_mancer.tools.keys.deleteRedundant)
		cmds.menuItem(l='Delete All Static Channels', c=anim_mancer.tools.keys.deleteStaticAllChannels)

		cmds.menu(l='Tangents')
		prefsRadioMenu(pref='default tangent', )
		cmds.menuItem(l='', divider=True)
		cmds.menuItem(l='Weighted tangents', checkBox=(cmds.keyTangent(q=True, g=True, wt=True)),
					  c=lambda x: cmds.keyTangent(e=True, g=True, wt=x))

		cmds.menu(l='Time')
		prefsRadioMenu(pref='playback speed', )
		cmds.menuItem(d=True)
		cmds.menuItem(l='Snapping', cb=mel.eval('timeControl -q -snap $gPlayBackSlider;'), c=self.timeSnapCallback)

		gridMenu()

		cmds.menu(l='UI', hm=True)
		cmds.menuItem(l='Close', c=removeUI)
		cmds.setParent('..')

		# ScriptJobs
		cmds.scriptJob(p=self.ui, event=['SceneOpened', self.setDefaults])

		# Defaults
		self.setDefaults()
		# self.createTearOffScriptJob()
		self.createTimelineScriptJob()
		set_timeline()
		savePrefs()
Beispiel #18
0
	def restore(self):
		'''
		Restore the evaluation manager to its original mode prior to enabling
		this one.  Not necessary to call this when using the "with emModeManager()"
		syntax. Only needed when you explicitly instantiate the mode manager.
		Then you have to call this if you want your original state restored.
		'''
		_dbg( '*** emModeManager::restore' )
		# Prevent multiple calls
		if not self.enabled:
			_dbg( '    Oops, nothing to restore' )
			return

		# Evaluation mode
		if self.restoreMode:
			_dbg( '     Restore mode to %s' % self.restoreMode )
			cmds.evaluationManager( mode=self.restoreMode )

		# Evaluators turned on
		for evaluator in self.evaluatorsTurnedOn:
			_dbg( '     Re-disable %s' % evaluator )
			cmds.evaluator( enable=False, name=evaluator )

		# Evaluators turned off
		for evaluator in self.evaluatorsTurnedOff:
			_dbg( '     Restore %s' % evaluator )
			cmds.evaluator( enable=True, name=evaluator )

		# Plugins we loaded
		for plugin in self.pluginsToUnload:
			try:
				_dbg( '     Unload %s' % plugin )
				cmds.unloadPlugin( plugin )
			except:
				# Just in case someone else already unloaded it
				pass

		self.enabled = False
Beispiel #19
0
    def prefsRadioMenu(self, pref, *args):

        if pref:

            # Get pref type

            if pref == 'evaluation':
                list = ['off', 'serial', 'parallel']
                current = cmds.evaluationManager(q=True, mode=True)[0]

            elif pref == 'default tangent':
                list = ['auto', 'clamped', 'linear', 'spline']
                current = cmds.keyTangent(q=True, g=True, itt=True)[0]

            elif pref == 'frames per second':
                list = ['film', 'ntsc', 'ntscf']
                current = cmds.currentUnit(q=True, t=True)

            elif pref == 'playback speed':
                list = [0.0, 1.0]
                current = cmds.playbackOptions(q=True, ps=True)

            elif pref == 'up axis':
                list = ['y', 'z']
                current = cmds.upAxis(q=True, ax=True)

            elif pref == 'working units':
                list = ['mm', 'cm', 'm']
                current = cmds.currentUnit(q=True, l=True)

            # Build Menu

            # Divider

            cmds.menuItem(l=pref.capitalize(), divider=True)
            cmds.radioMenuItemCollection()

            # Radio Buttons

            for obj in list:

                if obj == current:
                    currentVar = True

                else:
                    currentVar = False

                item = cmds.menuItem(label=str(obj).capitalize(),
                                     radioButton=currentVar,
                                     c=partial(self.prefFunction, pref, obj))
Beispiel #20
0
 def wrapper(*args, **kwargs):
     try:
         evalmode = None
         if r9Setup.mayaVersion() >= 2016:
             evalmode = cmds.evaluationManager(mode=True, q=True)[0]
             if evalmode == 'parallel':
                 evalManagerState(mode='off')
         res = func(*args, **kwargs)
     except:
         log.info('Failed on evalManager_DG decorator')
     finally:
         if evalmode:
             evalManagerState(mode=evalmode)
     return res
 def wrapper(*args, **kwargs):
     try:
         evalmode=None
         if r9Setup.mayaVersion()>=2016:
             evalmode=cmds.evaluationManager(mode=True,q=True)[0]
             if evalmode=='parallel':
                 evalManagerState(mode='off')
         res = func(*args, **kwargs)
     except:
         log.info('Failed on evalManager_DG decorator')
     finally:
         if evalmode:
             evalManagerState(mode=evalmode)
     return res
    def __enter__(self):
        self.autoKeyState=cmds.autoKeyframe(query=True, state=True)
        self.timeStore['currentTime'] = cmds.currentTime(q=True)
        self.timeStore['minTime'] = cmds.playbackOptions(q=True, min=True)
        self.timeStore['maxTime'] = cmds.playbackOptions(q=True, max=True)
        self.timeStore['startTime'] = cmds.playbackOptions(q=True, ast=True)
        self.timeStore['endTime'] = cmds.playbackOptions(q=True, aet=True)
        self.timeStore['playSpeed'] = cmds.playbackOptions(query=True, playbackSpeed=True)

        if self.mangage_undo:
            cmds.undoInfo(openChunk=True)
        else:
            cmds.undoInfo(swf=False)
        if self.manage_em:
            if r9Setup.mayaVersion()>=2016:
                self.evalmode=cmds.evaluationManager(mode=True,q=True)[0]
                if self.evalmode=='parallel':
                    evalManagerState(mode='off')
Beispiel #23
0
    def __enter__(self):
        self.autoKeyState=cmds.autoKeyframe(query=True, state=True)
        self.timeStore['currentTime'] = cmds.currentTime(q=True)
        self.timeStore['minTime'] = cmds.playbackOptions(q=True, min=True)
        self.timeStore['maxTime'] = cmds.playbackOptions(q=True, max=True)
        self.timeStore['startTime'] = cmds.playbackOptions(q=True, ast=True)
        self.timeStore['endTime'] = cmds.playbackOptions(q=True, aet=True)
        self.timeStore['playSpeed'] = cmds.playbackOptions(query=True, playbackSpeed=True)

        if self.mangage_undo:
            cmds.undoInfo(openChunk=True)
        else:
            cmds.undoInfo(swf=False)
        if self.manage_em:
            if r9Setup.mayaVersion()>=2016:
                self.evalmode=cmds.evaluationManager(mode=True,q=True)[0]
                if self.evalmode=='parallel':
                    evalManagerState(mode='off')
Beispiel #24
0
 def __save_state(self):
     '''
     Remember the current state of all EM related parameters so that they
     can be restored on exit.
     '''
     _dbg('*** emModeManager::__save_state')
     self.original_mode = cmds.evaluationManager(mode=True, query=True)[0]
     self.original_evaluators_enabled = as_list(
         cmds.evaluator(query=True, enable=True))
     self.original_evaluators_disabled = as_list(
         cmds.evaluator(query=True, enable=False))
     self.original_evaluator_node_types = {}
     for evaluator in self.original_evaluators_enabled + self.original_evaluators_disabled:
         node_types = cmds.evaluator(nodeType=True,
                                     query=True,
                                     name=evaluator)
         if node_types == None:
             node_types = []
         self.original_evaluator_node_types[evaluator] = node_types
     self.plugins_to_unload = []
     return self
 def setEvaluationMode(self, newMode):
     """
     Helper to switch evaluation modes. Handles DG evaluation and both
     serial and parallel evaluation manager modes. Degrades gracefully
     when the evaluation manager is not present.
     """
     if newMode == emPerformanceOptions.EVALUATION_MODE_DG or not self.hasEvaluationManager:
         if self.hasEvaluationManager:
             cmds.evaluationManager( mode='off' )
     elif newMode == emPerformanceOptions.EVALUATION_MODE_EM_SERIAL:
         cmds.evaluationManager( mode='serial' )
     elif newMode == emPerformanceOptions.EVALUATION_MODE_EM_PARALLEL:
         cmds.evaluationManager( mode='parallel' )
     else:
         raise NameError( 'Switching to unknown mode: %s' % str(newMode) )
     self.evalMode = newMode
Beispiel #26
0
def export_rigs(nodes, clip_data, **kwargs):
    """Bake and export the given rigs as FBX animated skeletons.

    Args:
        nodes: Valid rig nodes
        clip_data: list of clip type data
        **kwargs: keyword arguments for `export_rig`

    Returns:
        None
    """
    if not nodes:
        raise ValueError('Could not run FBX export, needs atleast 1 rig')
    # If clips, we're doing an animation export
    LOG.info(clip_data)
    if clip_data:
        for clip in clip_data:
            if not clip[3]:
                continue
            for node in nodes:
                LOG.info("FBX Animation Export: {}".format(node))
                kw = dict()
                kw.update(kwargs)
                kw['clipName'] = clip[0]
                remap_anim(-clip[1]+1)
                kw['bake_start'] = 1
                kw['bake_end'] = clip[2] - (clip[1] - 1)
                fbx_name = kw['user_filename']
                kw['output_path'] = os.path.abspath(os.path.join(kw['output_path'], fbx_name))
                kw['output_path'] = kw['output_path'].replace('\\', '/')
                kw['DeleteStaticChannels'] = kwargs['DeleteStaticChannels']

                # Check if this rig is a propIt rig
                if is_propIt_rig(node):
                    kw['ExportMeshes'] = True
                    kw['FBXExportSkins'] = True

                # Export optimizations for faster baking
                # hide all other rigs and isolateSelection
                hide_rigs()
                cmds.showHidden(node)
                cmds.refresh(suspend=True)
                # isolate select rig to export
                isolate_selected_nodes(node, state=True)

                # Turn parallel evaluation on
                current_eval_state = cmds.evaluationManager(query=True, mode=True)[0]
                cmds.evaluationManager(mode="off")
                cmds.evaluationManager(mode="parallel")

                # Do export
                export_rig(node, **kw)

                # Turn isolate select off
                isolate_selected_nodes(node, state=False)

                # Set evaluation mode back to original state
                cmds.evaluationManager(mode=current_eval_state)
                cmds.refresh(suspend=False)
    else:
        for node in nodes:
            LOG.info("FBX Rig Export: {}".format(node))
            kw = dict()
            kw.update(kwargs)
            kw['clipName'] = 'rig'
            kw['bake_start'] = 1
            kw['bake_end'] = 1
            fbx_name = kw['user_filename']
            kw['output_path'] = os.path.abspath(os.path.join(kw['output_path'], fbx_name))
            kw['output_path'] = kw['output_path'].replace('\\', '/')
            kw['DeleteStaticChannels'] = kwargs['DeleteStaticChannels']
            export_rig(node, **kw)
Beispiel #27
0
    # Fail if evaluation manager is not available
    if not _hasEvaluationManager():
        print 'ERROR: Evaluation manager is not available.'
        return False

    # Fail if the fileName is not a valid Maya file.
    if fileName != None and not _isMayaFile(fileName):
        print 'ERROR: %s is not a Maya file' % fileName
        return False

    # Load the fileName if it was specified, otherwise the current scene will be tested
    if fileName != None:
        cmds.file(fileName, force=True, open=True)

    # Run the actual test
    oldMode = cmds.evaluationManager(query=True, mode=True)[0]

    if doParallel:
        cmds.evaluationManager(mode='parallel')
    else:
        cmds.evaluationManager(mode='serial')

    success = _testPlayback(outputFile=outputFile,
                            maxFrames=maxFrames,
                            resultsPath=resultsPath)

    cmds.evaluationManager(mode=oldMode)

    if resultsPath != None:
        print 'Result differences dumped to "%s"' % resultsFileName
 def undoIt(self):
     '''
     Build up the undo command data
     '''
     sys.stdout.write("evalManager_switch.undoIt : mode=%s\n" % self.undostate)
     cmds.evaluationManager(mode=self.undostate)
Beispiel #29
0
	def debugEvaluation(self, *args):
		print cmds.evaluationManager(q=True, mode=True)[0],
		return
 def __init__(self):
     OpenMayaMPx.MPxCommand.__init__(self)  
     self.undostate=cmds.evaluationManager(mode=True,q=True)[0]
Beispiel #31
0
def optimizePerformance():
    """Function to optimize performance by disabling some Maya functions"""
    cmds.evaluationManager(mode="off")  # set up animation evaluation to DG
Beispiel #32
0
def emCorrectnessTest(fileName=None,
                      resultsPath=None,
                      verbose=False,
                      modes=['ems'],
                      maxFrames=EMCORRECTNESS_MAX_FRAMECOUNT,
                      dataTypes=['matrix', 'vertex', 'screen'],
                      emSetup=EMCORRECTNESS_NO_SETUP):
    """
    Evaluate the file in multiple modes and compare the results.

    fileName:    Name of file to load for comparison. None means use the current scene
    resultsPath: Where to store the results. None means don't store anything
    verbose:     If True then dump the differing values when they are encountered
    modes:       List of modes to run the tests in. 'ems' and 'emp' are the
                 only valid ones. A mode can optionally enable or disable an
                 evaluator as follows:
                     'ems+deformer': Run in EM Serial mode with the deformer evalutor turned on
                     'emp-dynamics': Run in EM Parallel mode with the dynamics evalutor turned off
                     'ems+deformer-dynamics': Run in EM Serial mode with the dynamics evalutor
                                              turned off and the deformer evaluator turned on
    maxFrames:   Maximum number of frames in the playback, to avoid long tests.
    dataTypes:   List of data types to include in the analysis. These are the possibilities:
                 matrix: Any attribute that returns a matrix
                 vertex: Attributes on the mesh shape that hold vertex positions
                 number: Any attribute that returns a number
                 screen: Screenshot after the animation runs
    emSetup:     What to do before running an EM mode test
                 EMCORRECTNESS_NO_SETUP        Do nothing, just run playback
                 EMCORRECTNESS_DOUBLE_PLAYBACK Run playback twice to ensure graph is valid
                 EMCORRECTNESS_INVALIDATE      Invalidate the graph to force rebuild

    Returns a list of value tuples indicating the run mode and the number of
    (additions,changes,removals) encountered in that mode. e.g. ['ems', (0,0,0)]

    If verbose is true then instead of counts return a list of actual changes.
    e.g. ['ems', ([], ["plug1,oldValue,newValue"], [])]

    Changed values are a CSV 3-tuple with "plug name", "value in DG mode", "value in the named EM mode"
    in most cases.

    In the special case of an image difference the plug name will be one
    of the special ones below and the values will be those generated by the
    comparison method used:
        SCREENSHOT_PLUG_MD5 : MD5 values when the image compare could not be done
        SCREENSHOT_PLUG_MAG : MD5 and image difference values from ImageMagick
        SCREENSHOT_PLUG_IMF : MD5 and image difference values from imf_diff
    """
    # Fail if the fileName is not a valid Maya file.
    if fileName != None and not __isMayaFile(fileName):
        print 'ERROR: %s is not a Maya file' % fileName
        return []

    # Load the fileName if it was specified, otherwise the current scene will be tested
    if fileName != None:
        cmds.file(fileName, force=True, open=True)

    dgResults = None
    dgResultsImage = None

    # Using lists allows me to do a comparison of two identical modes.
    # If resultsPath is given then the second and successive uses of the
    # same type will go into files with an incrementing suffix (X.dg.txt,
    # X.dg1.txt, X.dg2.txt...)
    modeResultsFiles = []
    modeResultsImageFiles = []
    results = []
    emPlugFileName = None

    # Create a list of unique mode suffixes, appending a count number whenever
    # the same mode appears more than once on the modes list.
    modeCounts = {}
    uniqueModes = []
    modeCounts['dg'] = 1
    for mode in modes:
        modeCounts[mode] = modeCounts.get(mode, 0) + 1
        suffix = ''
        if modeCounts[mode] > 1:
            suffix = str(modeCounts[mode])
        uniqueModes.append('%s%s' % (mode, suffix))

    if resultsPath != None:
        # Make sure the path exists
        if not os.path.isdir(resultsPath):
            os.makedirs(resultsPath)

        emPlugFileName = os.path.join(resultsPath, 'EMPlugs.txt')

        # Build the rest of the paths to the results files.
        # If no file was given default the results file prefix to "SCENE".
        if fileName != None:
            # Absolute paths cannot be appended to the results path. Assume
            # that in those cases just using the base name is sufficient.
            if os.path.isabs(fileName):
                resultsPath = os.path.join(resultsPath,
                                           os.path.basename(fileName))
            else:
                resultsPath = os.path.join(resultsPath, fileName)
        else:
            resultsPath = os.path.join(resultsPath, 'SCENE')

        dgResults = '%s.dg.txt' % resultsPath
        modeCounts['dg'] = 1

        for mode in uniqueModes:
            modeResultsFiles.append('%s.%s.txt' % (resultsPath, mode))
    else:
        # Still need the file args to pass in to DGState. None = don't output.
        for mode in modes:
            modeResultsFiles.append(None)

    # If the image comparison was requested figure out where to store the
    # file. Done separately because even if the files won't be saved the image
    # comparison needs to dump a file out for comparison.
    if 'screen' in dataTypes:
        if resultsPath == None:
            imageDir = tempfile.gettempdir()
            if fileName != None:
                # Absolute paths cannot be appended to the results path. Assume
                # that in those cases just using the base name is sufficient.
                if os.path.isabs(fileName):
                    imageDir = os.path.join(imageDir,
                                            os.path.basename(fileName))
                else:
                    imageDir = os.path.join(imageDir, fileName)
            else:
                imageDir = os.path.join(imageDir, 'SCENE')
        else:
            imageDir = resultsPath

        dgResultsImage = '%s.dg.png' % imageDir
        for mode in uniqueModes:
            modeResultsImageFiles.append('%s.%s.png' % (imageDir, mode))
    else:
        dgResultsImage = None
        for mode in uniqueModes:
            modeResultsImageFiles.append(None)

    # Fail if evaluation manager is not available. Should never happen.
    if not __hasEvaluationManager():
        print 'ERROR: Evaluation manager is not available.'
        return None

    emPlugs = None
    comparisons = {}
    # Record the DG evaluation version of the results
    with emModeManager() as modeMgr:
        modeMgr.setMode('dg')
        _playback(maxFrames)
        mDG = DGState(dgResults,
                      dgResultsImage,
                      doEval=True,
                      dataTypes=dataTypes)

        # Walk all of the modes requested and run the tests for them
        for modeNum in range(len(modes)):
            modeMgr.setMode(modes[modeNum])
            if emSetup == EMCORRECTNESS_DOUBLE_PLAYBACK:
                _playback(maxFrames)
            elif emSetup == EMCORRECTNESS_INVALIDATE:
                cmds.evaluationManager(invalidate=True)
            _playback(maxFrames)
            if emPlugs == None:
                emPlugs = {}
                __findEmPlugs(emPlugs)
                mDG.filterState(emPlugs)
                if emPlugFileName:
                    try:
                        emHandle = open(emPlugFileName, 'w')
                        for (node, plugList) in emPlugs.iteritems():
                            emHandle.write('%s\n' % node)
                            for plug in plugList.keys():
                                emHandle.write('\t%s\n' % plug)
                        emHandle.close()
                    except Exception, ex:
                        print 'ERROR: Could not write to EM plug file %s: "%s"' % (
                            emPlugFileName, str(ex))
            results.append(
                DGState(modeResultsFiles[modeNum],
                        modeResultsImageFiles[modeNum],
                        doEval=False,
                        dataTypes=dataTypes))
            results[modeNum].filterState(emPlugs)
            comparisons[modes[modeNum]] = mDG.compare(results[modeNum],
                                                      verbose=verbose)

            TODO(
                'REFACTOR',
                'Remove the unused first and third values once QATA is modified to accept it',
                'MAYA-45714')
            if verbose:
                comparisons[modes[modeNum]] = ([], comparisons[modes[modeNum]],
                                               [])
            else:
                comparisons[modes[modeNum]] = (0, comparisons[modes[modeNum]],
                                               0)
Beispiel #33
0
def Bake(assets, bakeSetName='bakeSet', startFrame=None, endFrame=None):
    _str_func = 'Bake'

    if startFrame is None:
        startFrame = mc.playbackOptions(q=True, min=True)
    if endFrame is None:
        endFrame = mc.playbackOptions(q=True, max=True)

    baked = False

    #if(mc.optionVar(exists='cgm_bake_set')):
    #bakeSetName = mc.optionVar(q='cgm_bake_set')

    # set tangent options to spline
    currentTangent = mc.keyTangent(q=True, g=True, ott=True)[0]
    mc.keyTangent(g=True, ott="spline")

    #Eval mode ----
    _evalMode = mc.evaluationManager(q=True, mode=True)
    mc.evaluationManager(mode='off')

    bakeTransforms = []
    bakeSets = []

    currentTime = mc.currentTime(q=True)
    log.debug("{0} ||currentTime: {1}".format(_str_func, currentTime))

    for asset in assets:
        #if ':' in assets:
        log.debug("{0} || asset: {1}".format(_str_func, asset))

        topNodeSN = asset.split(':')[-1]

        # gather data
        namespaces = asset.split(':')[:-1]

        if len(namespaces) > 0:
            ns = ':'.join(asset.split(':')[:-1]) + ':'
        else:
            ns = "%s_" % asset.split('|')[-1]

        # bake
        bakeSet = "%s%s" % (ns, bakeSetName)
        if mc.objExists(bakeSet):
            if bakeSet not in bakeSets:
                bakeSets.append(bakeSet)
                bakeTransforms += mc.sets(bakeSet, q=True)
        else:
            bakeTransforms.append(asset)
        #else:
        #    bakeTransforms.append(asset)
        log.debug("{0} || bakeSet: {1}".format(_str_func, bakeSet))

    if len(bakeTransforms) > 0:
        log.debug("{0} || baking transforms".format(_str_func))

        #pprint.pprint(bakeTransforms)
        log.debug("{0} || time | start: {1} | end: {2}".format(
            _str_func, startFrame, endFrame))

        mc.bakeResults(bakeTransforms,
                       simulation=True,
                       t=(startFrame, endFrame),
                       sampleBy=1,
                       disableImplicitControl=True,
                       preserveOutsideKeys=False,
                       sparseAnimCurveBake=False,
                       removeBakedAttributeFromLayer=False,
                       removeBakedAnimFromLayer=True,
                       bakeOnOverrideLayer=False,
                       minimizeRotation=True,
                       controlPoints=False,
                       shape=True)

        mc.setInfinity(bakeTransforms, pri='constant', poi='constant')

        baked = True
    else:
        baked = False

    mc.keyTangent(g=True, ott=currentTangent)

    #eval mode restore ----
    if _evalMode[0] != 'off':
        print "Eval mode restored: {0}".format(_evalMode[0])
        mc.evaluationManager(mode=_evalMode[0])

    mc.currentTime(currentTime)

    return baked
    def testPlayback(self):
        """
        Run a playback sequence, repeating 'iterationCount' times to get an overall average.
        Dump all of the results into the object's emPerformanceResults member.

            For EM modes returns a 3-tuple of emPerformanceResults object where timing will be stored
                1. Timing for evaluation graph creation (0 in DG mode) - 1 repetition
                2. Timing for scheduling graph creation (0 in DG mode) - 1 repetition
                3. Timing for playback - self.options.iterationCount repetitions
            For DG mode just returns the third element of the tuple.

        In the EM modes there is a bootstrapping process for evaluation that
        works as follows:
            - First frame of animation is used to build the evaluation graph
            - Second frame of animation is used to build the scheduling graph
            - Third frame of animation may be used to clean up any uninitialized caches in some scenes

        We want to separate the timing of the graph creation with the
        playback. The graph creation is a one-time cost whereas the playback
        is a repeating cost. Keeping these times separate lets us distinguish
        between per-frame evaluation speed and startup cost.
        """
        graphCreationResults = None
        graphSchedulingResults = None
        if 'off' != cmds.evaluationManager( query=True, mode=True )[0]:
            graphCreationResults = emPerformanceResults()
            graphSchedulingResults = emPerformanceResults()
        playbackResults = emPerformanceResults()
        self.startFrame = cmds.playbackOptions( query=True, minTime=True )
        actualEndFrame = cmds.playbackOptions( query=True, maxTime=True )
        self.endFrame = actualEndFrame

        # If you have a scene with hugely long playback you don't want to be
        # waiting forever for the runs so limit the frame length to something
        # reasonable. (Or this could be recoded to make this an option.)
        if actualEndFrame - self.startFrame >= EMPERFORMANCE_PLAYBACK_MAX:
            self.endFrame = self.startFrame + EMPERFORMANCE_PLAYBACK_MAX - 1
            cmds.playbackOptions( maxTime=self.endFrame )

        # Make sure it only plays a single loop
        oldLoopStyle = cmds.playbackOptions( query=True, loop=True )
        cmds.playbackOptions( loop="once" )

        # If using any of the evaluation manager modes collect the graph
        # building timing. A slight hack is used to collect this by nudging
        # the current time back and forth.
        if graphCreationResults:
            currentFrame = cmds.currentTime( query=True )
            graphCreationResults.startRep( 0 )
            cmds.currentTime( currentFrame + 1, edit=True )
            graphCreationResults.endRep( 0 )
            graphSchedulingResults.startRep( 0 )
            cmds.currentTime( currentFrame + 2, edit=True )
            graphSchedulingResults.endRep( 0 )
            # The currentTime call before playback will take care of any
            # uninitialized caches.

        self.progress.startPhase( '%s Play (%d times)' % (self.evalMode, self.options.iterationCount) )

        for idx in range(0, self.options.iterationCount):
            # Move to first frame to avoid slight variation in self.options.iterationCount
            # caused by the initialization of playback from different frames.
            cmds.currentTime( self.startFrame, edit=True )

            # Time a single runthrough of the entire animation sequence
            playbackResults.startRep( idx )
            cmds.play( wait=True )
            playbackResults.endRep( idx )

        # Restore the original options
        cmds.playbackOptions( loop=oldLoopStyle )
        cmds.playbackOptions( maxTime=actualEndFrame )

        # If the creation and scheduling results exist return them too
        if graphCreationResults:
            return (graphCreationResults, graphSchedulingResults, playbackResults)

        # Otherwise just the playback results go back. The caller has to
        # determine which set of results they have received.
        return playbackResults
Beispiel #35
0
    def restore_state(self):
        '''
        Restore the evaluation manager to its original mode prior to creation
        of this object. Using the "with" syntax this will be called automatically.
        You only need to call explicitly when you instantiate the mode manager
        as an object.

        For now the state is brute-force restored to what the original was without
        regards to current settings. The assumptions are that the states are
        independent, and the performance is good enough that it's not necessary to
        remember just the things that were changed.
        '''
        _dbg('*** emModeManager::restore_state')

        # Evaluation mode
        _dbg('     Restore mode to %s' % self.original_mode)
        cmds.evaluationManager(mode=self.original_mode)

        # Evaluators originally on
        for evaluator in self.original_evaluators_enabled:
            _dbg('     Enabling {}'.format(evaluator))
            cmds.evaluator(enable=True, name=evaluator)

        # Evaluators originally off
        for evaluator in self.original_evaluators_disabled:
            _dbg('     Disabling {}'.format(evaluator))
            cmds.evaluator(enable=False, name=evaluator)

        # Node types for evaluators
        for (evaluator, restored_node_types
             ) in self.original_evaluator_node_types.iteritems():
            # The list of node types is too long to just set/unset everything so instead
            # compare the current list with the original list and toggle on and off as
            # appropriate to restore back to the original.
            current_node_types = cmds.evaluator(name=evaluator,
                                                nodeType=True,
                                                query=True)
            if current_node_types == None:
                current_node_types = []
            for node_type in current_node_types:
                if node_type not in restored_node_types:
                    _dbg('     Enabling node type {} on {}'.format(
                        node_type, evaluator))
                    cmds.evaluator(name=evaluator,
                                   nodeType=node_type,
                                   enable=False)
            for node_type in restored_node_types:
                if node_type not in current_node_types:
                    _dbg('     Disabling node type {} on {}'.format(
                        node_type, evaluator))
                    cmds.evaluator(name=evaluator,
                                   nodeType=node_type,
                                   enable=True)

        # Plugins we loaded
        for plugin in self.plugins_to_unload:
            try:
                _dbg('     Unload %s' % plugin)
                cmds.unloadPlugin(plugin)
            except:
                # Just in case someone else already unloaded it
                pass
Beispiel #36
0
    def setMode(self, modeName):
        '''
        Ensure the EM has a named mode set. See class docs for details on mode names.
        The changes are cumulative so long as they don't conflict so this only sets
        the mode to serial:
            self.setMode('emp')
            self.setMode('ems')
        however this will enable both evaluators
            self.setMode('+deformer')
            self.setMode('+cache')

        Changes can also be put into one single string:
            self.setMode( 'ems+deformer+cache' )

        Lastly by using the '/' character as a separator the enabled node types on
        evaluators can also be manipulated:
            self.setMode( 'ems+deformer+cache/+expression-transform' )

        raises SyntaxError if the mode name is not legal
        '''
        _dbg('*** Setting mode to %s' % modeName)

        # To avoid partial setting the state isn't touched until all mode information
        # has been parsed.
        #
        evaluators_to_enable = []
        evaluators_to_disable = []
        node_types_to_enable = {}
        node_types_to_disable = {}

        match = RE_MODE.match(modeName)
        if match:
            if match.group(1) == 'ems':
                em_mode = 'serial'
            elif match.group(1) == 'emp':
                em_mode = 'parallel'
            elif match.group(1) == 'dg':
                em_mode = 'off'
            elif match.group(1) == '':
                em_mode = cmds.evaluationManager(query=True, mode=True)[0]
            else:
                raise SyntaxError('%s is not a recognized EM mode' %
                                  match.group(1))

            _dbg('    +++ Processing evaluator modes {}'.format(
                match.group(2)))

            # Separate the evaluators from the node types
            evaluator_split = match.group(2).split('/')
            node_types = evaluator_split[1:]
            node_types_to_add = []
            node_types_to_remove = []

            # Now handle the node type information
            for node_type in node_types:
                _dbg('       Raw Node type {}'.format(node_type))
                action = node_type[0]
                node_type_name = node_type[1:]
                _dbg('    ... Node type {} {}'.format(action, node_type_name))

                # Don't allow both '+' and '-', or even two the same
                if node_type_name in node_types_to_add or node_type_name in node_types_to_remove:
                    raise SyntaxError(
                        'Node type {}s was specified twice'.format(
                            node_type_name))

                if action == '+':
                    _dbg('       Will turn on node type {}'.format(
                        node_type_name))
                    node_types_to_add.append(node_type_name)
                elif action == '-':
                    _dbg('       Will turn off node type {}'.format(
                        node_type_name))
                    node_types_to_remove.append(node_type_name)
                else:
                    raise SyntaxError(
                        '{} is not a recognized node type mode (+XX or -XX)'.
                        format(node_type))

            # Process the evaluator modes
            eval_matches = RE_EVALUATORS.findall(evaluator_split[0])
            for eval_match in as_list(eval_matches):
                _dbg('    ... Processing evaluator mode {}'.format(eval_match))
                action = eval_match[0]
                evaluator_info = eval_match[1:].split('/')
                evaluator = evaluator_info[0]
                node_types = evaluator_info[1:]

                # Don't allow both '+' and '-', or even two the same
                if evaluator in as_list(
                        evaluators_to_enable) or evaluator in as_list(
                            evaluators_to_disable):
                    raise SyntaxError('Evaluator %s was specified twice' %
                                      evaluator)

                if action == '+':
                    _dbg('       Will turn on %s' % evaluator)
                    evaluators_to_enable.append(evaluator)
                elif action == '-':
                    _dbg('       Will turn off %s' % evaluator)
                    evaluators_to_disable.append(evaluator)
                else:
                    raise SyntaxError(
                        '%s is not a recognized EM evaluator command (+XX or -XX)'
                        % eval_match)

                # Now handle the node type information
                for node_type in as_list(node_types_to_add):
                    node_types_to_enable[evaluator] = node_types_to_enable.get(
                        evaluator, []) + [node_type]
                for node_type in as_list(node_types_to_remove):
                    node_types_to_disable[
                        evaluator] = node_types_to_disable.get(
                            evaluator, []) + [node_type]
        else:
            raise SyntaxError(
                '%s is not a recognized EM command "{ems|emp|dg}{[+-]XX}*{/[+-]YY}*"'
                % modeName)

        # Now that the state is prepared switch to the new modes
        cmds.evaluationManager(mode=em_mode)

        # Check to see which evaluators had to be turned on and remember them.
        for turn_on in evaluators_to_enable:
            if turn_on in EVALUATOR_PLUGINS:
                # Check the loaded state first to prevent the warning message if it's already loaded
                if not cmds.pluginInfo(
                        EVALUATOR_PLUGINS[turn_on], query=True, loaded=True):
                    _dbg('    Loading plugin %s' % EVALUATOR_PLUGINS[turn_on])
                    loaded = cmds.loadPlugin(EVALUATOR_PLUGINS[turn_on])
                else:
                    loaded = None
                # We like to avoid perturbing state so if we loaded the
                # plug-in we'll unload it when done
                if loaded != None:
                    self.plugins_to_unload += loaded
            cmds.evaluator(enable=True, name=turn_on)
            _dbg('     Enable {}'.format(turn_on))

        # Check to see which evaluators had to be turned off and remember them.
        for turn_off in evaluators_to_disable:
            cmds.evaluator(enable=False, name=turn_off)
            _dbg('     Disable {}'.format(turn_off))

        # If any node type changes were specified do them now
        for (evaluator, node_types) in node_types_to_enable.iteritems():
            for node_type in node_types:
                cmds.evaluator(name=evaluator, enable=True, nodeType=node_type)
                _dbg('     Enable type {} on {}'.format(node_type, evaluator))
        for (evaluator, node_types) in node_types_to_disable.iteritems():
            for node_type in node_types:
                cmds.evaluator(name=evaluator,
                               enable=False,
                               nodeType=node_type)
                _dbg('     Disable type {} on {}'.format(node_type, evaluator))