예제 #1
0
    def run(self):
        """
        Examine animated deformers nodes and check how they are used.

        If the 'details' option is set the CSV columns are:
            DeformerNode      : Name of the animated deformer node
            Type              : Type for this node
            SupportedGeometry : True if the geometry processed by animated
                                deformer node is supported by deformer evaluator

        otherwise the CSV columns are:
            DeformerMode       : Description of the usage for the animated deformer node
            Type               : Deformer type
            SupportedGeometry  : True if the geometry processed by animated
                                 deformer nodes is supported by deformer evaluator
            Count              : Number of animated deformer nodes in this mode

            See is_supported_geometry() for what criteria a geometry must meet to be supported.

        One row is output for every animated deformer node.

        Return True if the analysis succeeded, else False
        """
        with emModeManager() as em_manager:
            em_manager.setMode( 'ems' )
            em_manager.rebuild()

            # Get all animated nodes.
            try:
                json_nodes = json.loads(cmds.dbpeek( op='graph', eg=True, all=True, a='nodes' ))
                animated_nodes = set( json_nodes['nodes'] )
            except Exception, ex:
                self.error( 'Graph examination failure ({0:s})'.format( str(ex) ) )
                return
예제 #2
0
    def __enter__(self):
        '''Enter the scope, setting up the evaluator managers and initial states'''
        self.em_mgr = emModeManager()
        self.em_mgr.setMode('emp')
        self.em_mgr.setMode('+cache')
        # Enable idle build to make sure we can rebuild the graph when waiting.
        self.em_mgr.idle_action = emModeManager.idle_action_build

        # Setup caching options
        self.cache_mgr = CacheEvaluatorManager()
        self.cache_mgr.save_state()
        self.cache_mgr.plugin_loaded = True
        self.cache_mgr.enabled = True
        self.cache_mgr.cache_mode = CACHE_STANDARD_MODE_EVAL
        self.cache_mgr.resource_guard = False
        self.cache_mgr.fill_mode = 'syncAsync'

        # Setup autokey options
        self.auto_key_state = cmds.autoKeyframe(q=True, state=True)
        self.auto_key_chars = cmds.autoKeyframe(q=True, characterOption=True)
        cmds.autoKeyframe(e=True, state=False)

        self.waitForCache()

        return self
예제 #3
0
    def __enter__(self):
        '''Enter the scope, setting up the evaluator managers and initial states'''
        self.em_mgr = emModeManager()
        self.em_mgr.setMode('emp')
        self.em_mgr.setMode('-cache')

        return self
예제 #4
0
    def run(self):
        """
        Examine animated cluster nodes and check how they are used.  It checks
        whether they are used for fixed rigid transform, weighted rigid transform
        or per-vertex-weighted transform.

        When the 'details' option is set the CSV columns are:
            ClusterNode         : Name of the animated cluster node
            envelope_is_static : True if the envelope is not animated and its value is 1
            uses_weights         : True if weights are used in the node
            uses_same_weight      : True if weight is the same for all vertices
            Mode                : Mode for this node
            supported_geometry   : True if the geometry processed by animated cluster node
                                  is supported by deformer evaluator

        otherwise the CSV columns are:
            ClusterMode        : Description of the usage for the animated cluster node
            Mode               : Mode for animated cluster nodes meeting this criteria
            supported_geometry  : True if the geometry processed by animated cluster nodes
                                 meeting this criteria is supported by deformer evaluator
            Count              : Number of animated cluster nodes in this mode

            See is_supported_geometry() for what criteria a geometry must meet to be supported.

        One row is output for every animated cluster node.

        The "Mode" is an integer value with the following meaning:
        - 1 => Rigid transform          : cluster node only performs a rigid transform
        - 2 => Weighted rigid transform : cluster node performs a rigid transform, but it
                                          is weighted down by a factor
        - 3 => Per-vertex transform     : cluster node computes a different transform for
                                          each individually-weighted vertex

        Return True if the analysis succeeded, else False
        """
        with emModeManager() as em_manager:
            em_manager.setMode('ems')
            em_manager.rebuild()

            # Get all animated nodes.
            try:
                json_nodes = json.loads(
                    cmds.dbpeek(op='graph', eg=True, all=True, a='nodes'))
                animated_nodes = set(json_nodes['nodes'])
            except Exception, ex:
                self.error('Graph examination failure ({0:s})'.format(str(ex)))
                return
예제 #5
0
    def run(self):
        """
        Examine animated tweak nodes and check how they are used.  It checks
        whether they use the relative or absolute mode and whether individual
        tweaks themselves are actually used.

        If the 'details' option is set the CSV columns are:
            TweakNode  : Name of the animated tweak node
            Relative   : Value of the relative_tweak attribute of the animated tweak node
            uses_tweaks : True if tweaks are used in the node
            UsesMesh   : True if some of the geometry processed by animated tweak node is a mesh

        otherwise the CSV columns are:
            TweakType   : Description of the usage for the animated tweak node
            Relative    : Value of the relative_tweak attribute of the animated
                          tweak nodes meeting this criteria
            uses_tweaks  : True if tweaks are used in the nodes meeting this criteria
            UsesMesh    : True if some of the geometry processed by animated tweak
                          nodes meeting this criteria is a mesh
            Count       : Number of animated tweak nodes meeting this criteria

        One row is output for every animated tweak node.

        Return True if the analysis succeeded, else False
        """
        with emModeManager() as em_manager:
            em_manager.setMode('ems')
            em_manager.rebuild()

            # Get all animated nodes.
            try:
                node_list = cmds.dbpeek(op='graph',
                                        eg=True,
                                        all=True,
                                        a='nodes')
                json_nodes = json.loads(node_list)
                animated_nodes = set(json_nodes['nodes'])
            except Exception, ex:
                self.error('Graph examination failure ({0:s})'.format(str(ex)))
                return
예제 #6
0
    def run(self):
        """
        Generates a JSON structure containing the evaluation graph information

        If the 'details' option is set then include the extra information as described
        in the analytic help information.
        """
        node_data = {
            'BuildTime': 0,
            'Parallel': {},
            'Serial': {},
            'GloballySerial': {},
            'Untrusted': {}
        }
        node_counts = {
            scheduling_type: 0
            for scheduling_type in node_data.keys()
        }

        try:
            with emModeManager() as em_manager:

                # Rebuild the graph in parallel mode, then extract the schedulingGraph event
                # timing from it, which is the root level timing event for graph rebuilding.
                # (The rebuild also counts invalidation and redraw time so that can't be used as-is.)
                em_manager.setMode('emp')

                self.debug('Getting profiler information')
                cmds.profiler(sampling=True)
                em_manager.rebuild()
                cmds.profiler(sampling=False)
                node_data['BuildTime'] = self.__get_event_timing(
                    'GraphConstruction')
                self.debug('Got the sample time of {}'.format(
                    node_data['BuildTime']))

                em_json = None
                try:
                    graph_data = cmds.dbpeek(
                        operation='graph',
                        all=True,
                        evaluationGraph=True,
                        argument=['plugs', 'connections', 'scheduling'])
                    em_json = json.loads(graph_data)
                except Exception, ex:
                    self.warning(
                        'First evaluation failed, forcing time change for second attempt ({})'
                        .format(ex))
                    # If the first attempt to get the graph fails maybe the
                    # rebuild didn't work so force it the old ugly way for
                    # now.
                    now = cmds.currentTime(query=True)
                    cmds.currentTime(now + 1)
                    cmds.currentTime(now)

                if em_json is None:
                    # Second chance to get the graph data. This one is not
                    # protected by an inner try() because if this fails we
                    # want the outer exception handler to kick in.
                    graph_data = cmds.dbpeek(
                        operation='graph',
                        all=True,
                        evaluationGraph=True,
                        argument=['plugs', 'connections', 'scheduling'])
                    em_json = json.loads(graph_data)

            if self.option(OPTION_SUMMARY):
                # Gather node summary information if requested
                summary_info = self.__gather_summary_data(em_json)
                node_data['summary'] = summary_info

            # Gather node detail information if requested
            detailed_info = self.__gather_detail_data(em_json) if self.option(
                OPTION_DETAILS) else {}

            # Relies on the fact that the scheduling output keys match the
            # ones being put into the node_data dictionary, which they do by
            # design.
            for (scheduling_type,
                 scheduling_list) in em_json['scheduling'].iteritems():
                try:
                    node_counts[scheduling_type] = len(scheduling_list)
                    # Any extra scheduling information is for detailed output only
                    if scheduling_type not in node_data.keys():
                        if self.option(OPTION_DETAILS):
                            node_data[scheduling_type] = scheduling_list
                        if self.option(OPTION_SUMMARY):
                            node_counts[scheduling_type] = node_counts.get(
                                scheduling_type, 0) + len(scheduling_list)
                        continue

                    # The simplest output will just have the nodes of each
                    # type in a list.
                    if not self.option(OPTION_SUMMARY) and not self.option(
                            OPTION_DETAILS):
                        node_data[scheduling_type] = scheduling_list
                        continue

                    node_data[scheduling_type] = {}
                    for node in scheduling_list:
                        node_info = {}

                        # Add in the detailed information if requested
                        if node in detailed_info:
                            node_info.update(detailed_info[node])

                        # Attach the node data to its name
                        node_data[scheduling_type][self._node_name(
                            node)] = node_info

                except Exception, ex:
                    # There may be a formatting problem if scheduling types
                    # are not found since they will be dumped even if empty.
                    self.warning(
                        'Node information not available for type {} ({})'.
                        format(scheduling_type, ex))
예제 #7
0
def emCorrectnessTest(fileName=None,
                      resultsPath=None,
                      verbose=False,
                      modes=['ems'],
                      maxFrames=EMCORRECTNESS_MAX_FRAMECOUNT,
                      dataTypes=['matrix', 'vertex', 'screen'],
                      emSetup=EMCORRECTNESS_NO_SETUP):
    """
    Evaluate the file in multiple modes and compare the results.

    fileName:    Name of file to load for comparison. None means use the current scene
    resultsPath: Where to store the results. None means don't store anything
    verbose:     If True then dump the differing values when they are encountered
    modes:       List of modes to run the tests in. 'ems' and 'emp' are the
                 only valid ones. A mode can optionally enable or disable an
                 evaluator as follows:
                     'ems+deformer': Run in EM Serial mode with the deformer evalutor turned on
                     'emp-dynamics': Run in EM Parallel mode with the dynamics evalutor turned off
                     'ems+deformer-dynamics': Run in EM Serial mode with the dynamics evalutor
                                              turned off and the deformer evaluator turned on
    maxFrames:   Maximum number of frames in the playback, to avoid long tests.
    dataTypes:   List of data types to include in the analysis. These are the possibilities:
                 matrix: Any attribute that returns a matrix
                 vertex: Attributes on the mesh shape that hold vertex positions
                 number: Any attribute that returns a number
                 screen: Screenshot after the animation runs
    emSetup:     What to do before running an EM mode test
                 EMCORRECTNESS_NO_SETUP        Do nothing, just run playback
                 EMCORRECTNESS_DOUBLE_PLAYBACK Run playback twice to ensure graph is valid
                 EMCORRECTNESS_INVALIDATE      Invalidate the graph to force rebuild

    Returns a list of value tuples indicating the run mode and the number of
    (additions,changes,removals) encountered in that mode. e.g. ['ems', (0,0,0)]

    If verbose is true then instead of counts return a list of actual changes.
    e.g. ['ems', ([], ["plug1,oldValue,newValue"], [])]

    Changed values are a CSV 3-tuple with "plug name", "value in DG mode", "value in the named EM mode"
    in most cases.

    In the special case of an image difference the plug name will be one
    of the special ones below and the values will be those generated by the
    comparison method used:
        SCREENSHOT_PLUG_MD5 : MD5 values when the image compare could not be done
        SCREENSHOT_PLUG_MAG : MD5 and image difference values from ImageMagick
        SCREENSHOT_PLUG_IMF : MD5 and image difference values from imf_diff
    """
    # Fail if the fileName is not a valid Maya file.
    if fileName != None and not __isMayaFile(fileName):
        print 'ERROR: %s is not a Maya file' % fileName
        return []

    # Load the fileName if it was specified, otherwise the current scene will be tested
    if fileName != None:
        cmds.file(fileName, force=True, open=True)

    dgResults = None
    dgResultsImage = None

    # Using lists allows me to do a comparison of two identical modes.
    # If resultsPath is given then the second and successive uses of the
    # same type will go into files with an incrementing suffix (X.dg.txt,
    # X.dg1.txt, X.dg2.txt...)
    modeResultsFiles = []
    modeResultsImageFiles = []
    results = []
    emPlugFileName = None

    # Create a list of unique mode suffixes, appending a count number whenever
    # the same mode appears more than once on the modes list.
    modeCounts = {}
    uniqueModes = []
    modeCounts['dg'] = 1
    for mode in modes:
        modeCounts[mode] = modeCounts.get(mode, 0) + 1
        suffix = ''
        if modeCounts[mode] > 1:
            suffix = str(modeCounts[mode])
        uniqueModes.append('%s%s' % (mode, suffix))

    if resultsPath != None:
        # Make sure the path exists
        if not os.path.isdir(resultsPath):
            os.makedirs(resultsPath)

        emPlugFileName = os.path.join(resultsPath, 'EMPlugs.txt')

        # Build the rest of the paths to the results files.
        # If no file was given default the results file prefix to "SCENE".
        if fileName != None:
            # Absolute paths cannot be appended to the results path. Assume
            # that in those cases just using the base name is sufficient.
            if os.path.isabs(fileName):
                resultsPath = os.path.join(resultsPath,
                                           os.path.basename(fileName))
            else:
                resultsPath = os.path.join(resultsPath, fileName)
        else:
            resultsPath = os.path.join(resultsPath, 'SCENE')

        dgResults = '%s.dg.txt' % resultsPath
        modeCounts['dg'] = 1

        for mode in uniqueModes:
            modeResultsFiles.append('%s.%s.txt' % (resultsPath, mode))
    else:
        # Still need the file args to pass in to DGState. None = don't output.
        for mode in modes:
            modeResultsFiles.append(None)

    # If the image comparison was requested figure out where to store the
    # file. Done separately because even if the files won't be saved the image
    # comparison needs to dump a file out for comparison.
    if 'screen' in dataTypes:
        if resultsPath == None:
            imageDir = tempfile.gettempdir()
            if fileName != None:
                # Absolute paths cannot be appended to the results path. Assume
                # that in those cases just using the base name is sufficient.
                if os.path.isabs(fileName):
                    imageDir = os.path.join(imageDir,
                                            os.path.basename(fileName))
                else:
                    imageDir = os.path.join(imageDir, fileName)
            else:
                imageDir = os.path.join(imageDir, 'SCENE')
        else:
            imageDir = resultsPath

        dgResultsImage = '%s.dg.png' % imageDir
        for mode in uniqueModes:
            modeResultsImageFiles.append('%s.%s.png' % (imageDir, mode))
    else:
        dgResultsImage = None
        for mode in uniqueModes:
            modeResultsImageFiles.append(None)

    # Fail if evaluation manager is not available. Should never happen.
    if not __hasEvaluationManager():
        print 'ERROR: Evaluation manager is not available.'
        return None

    emPlugs = None
    comparisons = {}
    # Record the DG evaluation version of the results
    with emModeManager() as modeMgr:
        modeMgr.setMode('dg')
        _playback(maxFrames)
        mDG = DGState(dgResults,
                      dgResultsImage,
                      doEval=True,
                      dataTypes=dataTypes)

        # Walk all of the modes requested and run the tests for them
        for modeNum in range(len(modes)):
            modeMgr.setMode(modes[modeNum])
            if emSetup == EMCORRECTNESS_DOUBLE_PLAYBACK:
                _playback(maxFrames)
            elif emSetup == EMCORRECTNESS_INVALIDATE:
                cmds.evaluationManager(invalidate=True)
            _playback(maxFrames)
            if emPlugs == None:
                emPlugs = {}
                __findEmPlugs(emPlugs)
                mDG.filterState(emPlugs)
                if emPlugFileName:
                    try:
                        emHandle = open(emPlugFileName, 'w')
                        for (node, plugList) in emPlugs.iteritems():
                            emHandle.write('%s\n' % node)
                            for plug in plugList.keys():
                                emHandle.write('\t%s\n' % plug)
                        emHandle.close()
                    except Exception, ex:
                        print 'ERROR: Could not write to EM plug file %s: "%s"' % (
                            emPlugFileName, str(ex))
            results.append(
                DGState(modeResultsFiles[modeNum],
                        modeResultsImageFiles[modeNum],
                        doEval=False,
                        dataTypes=dataTypes))
            results[modeNum].filterState(emPlugs)
            comparisons[modes[modeNum]] = mDG.compare(results[modeNum],
                                                      verbose=verbose)

            TODO(
                'REFACTOR',
                'Remove the unused first and third values once QATA is modified to accept it',
                'MAYA-45714')
            if verbose:
                comparisons[modes[modeNum]] = ([], comparisons[modes[modeNum]],
                                               [])
            else:
                comparisons[modes[modeNum]] = (0, comparisons[modes[modeNum]],
                                               0)
예제 #8
0
def measureMPxTransformPerformance():
    '''
    Run two performance tests with 1000 transforms keyed randomly over 1000 frames
    for both the native Ttransform and the API leanTransformTest. Report the timing
    for playback of the two, and dump profile files for both for manual inspection.
    '''
    cmds.file(force=True, new=True)

    # Make sure the test plug-in is loaded and remember whether it was already
    # loaded or not so that the state can be restored after the test is finished.
    plugin_loaded = (cmds.loadPlugin(PLUGIN) is not None)

    # Do all profiling in parallel mode
    with emModeManager() as em_mgr:
        em_mgr.setMode('emp')

        #----------------------------------------------------------------------
        # Test 1: Simple node derived from MPxTransform
        print 'Testing plug-in transform...'
        animate(create_nodes(NODE_COUNT, NODE_NAME), KEY_COUNT)
        print '   playing back'
        with playbackModeManager() as play_mgr:
            play_mgr.setOptions(loop='once',
                                minTime=1.0,
                                maxTime=KEY_COUNT,
                                framesPerSecond=0.0)
            plugin_playback = play_mgr.playAll()
            # Sample enough of the playback range to get good results
            cmds.profiler(sampling=True)
            play_mgr.playLimitedRange(10)
            cmds.profiler(sampling=False)
            cmds.profiler(output=PLUGIN_PROFILE)
        cmds.file(force=True, new=True)

        #----------------------------------------------------------------------
        # Test 2: The native Ttransform
        print 'Testing internal transforms'
        animate(create_nodes(NODE_COUNT, 'transform'), KEY_COUNT)
        print '   playing back'
        with playbackModeManager() as play_mgr:
            play_mgr.setOptions(loop='once',
                                minTime=1.0,
                                maxTime=KEY_COUNT,
                                framesPerSecond=0.0)
            native_playback = play_mgr.playAll()
            # Sample enough of the playback range to get good results
            cmds.profiler(sampling=True)
            play_mgr.playLimitedRange(10)
            cmds.profiler(sampling=False)
            cmds.profiler(output=NATIVE_PROFILE)
        cmds.file(force=True, new=True)

    # If the test loaded the plug-in then unload it so that state is unchanged.
    if plugin_loaded:
        cmds.unloadPlugin(PLUGIN)

    # Report the results
    #
    print 'Native transform playback time = {}'.format(native_playback)
    print 'Plugin transform playback time = {}'.format(plugin_playback)
    print 'Profile outputs are in {} and {}'.format(NATIVE_PROFILE,
                                                    PLUGIN_PROFILE)
예제 #9
0
def run_correctness_test(referenceMode,
                         modes,
                         fileName=None,
                         resultsPath=None,
                         verbose=False,
                         maxFrames=CORRECTNESS_MAX_FRAMECOUNT,
                         dataTypes=['matrix', 'vertex', 'screen'],
                         emSetup=CORRECTNESS_NO_SETUP):
    """
    Evaluate the file in multiple modes and compare the results.

    referenceMode: Mode to which other modes will be compared for correctness.
                   It's a string that can be passed to emModeManager.setMode()
                   function.
    modes:         List of modes to run the tests in.  They must be have the following methods:
                   getTitle   : returns a string describing the mode
                   getEmMode  : returns a string to be passed to emModeManager.setMode()
                                before running the test.
                   getContext : returns a context object that can set extra state on enter
                                and reset it on exit (or None if not needed).
    fileName:      Name of file to load for comparison. None means use the current scene
    resultsPath:   Where to store the results. None means don't store anything
    verbose:       If True then dump the differing values when they are encountered
    maxFrames:     Maximum number of frames in the playback, to avoid long tests.
    dataTypes:     List of data types to include in the analysis. These are the possibilities:
                   matrix: Any attribute that returns a matrix
                   vector: Any attribute with type 3Double
                   vertex: Attributes on the mesh shape that hold vertex positions
                   number: Any attribute that returns a number
                   screen: Screenshot after the animation runs
    emSetup:       What to do before running an EM mode test, in bitfield combinations
                   CORRECTNESS_NO_SETUP        Do nothing, just run playback
                   CORRECTNESS_DOUBLE_PLAYBACK Run playback twice to ensure graph is valid
                   CORRECTNESS_INVALIDATE      Invalidate the graph to force rebuild
                   CORRECTNESS_LOAD            Load the file between every mode's run
                                               (Default is to just load once at the beginning.)

    Returns a list of value tuples indicating the run mode and the number of
    changes encountered in that mode. e.g. ['ems', 0]

    If verbose is true then instead of counts return a list of actual changes.
    e.g. ['ems', ["plug1,oldValue,newValue"]]

    Changed values are a CSV 3-tuple with "plug name", "value in reference mode", "value in the named test mode"
    in most cases.

    In the special case of an image difference the plug name will be one
    of the special ones below and the values will be those generated by the
    comparison method used:
        DGState.SCREENSHOT_PLUG_MD5 : md5 values when the image compare could not be done
        DGState.SCREENSHOT_PLUG_MAG : md5 and image difference values from ImageMagick
        DGState.SCREENSHOT_PLUG_IMF : md5 and image difference values from imf_diff
    """
    # Fail if the fileName is not a valid Maya file.
    if fileName != None and not __is_maya_file(fileName):
        print 'ERROR: %s is not a Maya file' % fileName
        return {}

    # Load the fileName if it was specified, otherwise the current scene will bbbbbbbbb
    if fileName != None:
        cmds.file(fileName, force=True, open=True)

    ref_results = None
    ref_results_image = None

    # Using lists allows me to do a comparison of two identical modes.
    # If resultsPath is given then the second and successive uses of the
    # same type will go into files with an incrementing suffix (X.ref.txt,
    # X.ref1.txt, X.ref2.txt...)
    mode_results_files = []
    mode_compare_files = []
    mode_results_image_files = []
    results = []
    em_plug_file_name = None

    # Create a list of unique mode suffixes, appending a count number whenever
    # the same mode appears more than once on the modes list.
    mode_counts = {}
    unique_modes = []
    mode_counts['ref'] = 1
    for modeObject in modes:
        mode = modeObject.getTitle()
        mode_counts[mode] = mode_counts.get(mode, 0) + 1
        suffix = ''
        if mode_counts[mode] > 1:
            suffix = str(mode_counts[mode])
        unique_modes.append('%s%s' % (mode, suffix))

    if resultsPath != None:
        # Make sure the path exists
        if not os.path.isdir(resultsPath):
            os.makedirs(resultsPath)

        em_plug_file_name = os.path.join(resultsPath, 'EMPlugs.txt')

        # Build the rest of the paths to the results files.
        # If no file was given default the results file prefix to "SCENE".
        if fileName != None:
            # Absolute paths cannot be appended to the results path. Assume
            # that in those cases just using the base name is sufficient.
            if os.path.isabs(fileName):
                resultsPath = os.path.join(resultsPath,
                                           os.path.basename(fileName))
            else:
                resultsPath = os.path.join(resultsPath, fileName)
        else:
            resultsPath = os.path.join(resultsPath, 'SCENE')

        ref_results = '%s.ref.txt' % resultsPath
        mode_counts['ref'] = 1

        for mode in unique_modes:
            # mode strings can have '/' which are illegal in filenames, replace with '='.
            mode = mode.replace('/', '=')
            mode_results_files.append('%s.%s.txt' % (resultsPath, mode))
            mode_compare_files.append('%s.DIFF.%s.txt' % (resultsPath, mode))
    else:
        # Still need the file args to pass in to DGState. None = don't output.
        for _ in modes:
            mode_results_files.append(None)
            mode_compare_files.append(None)

    # If the image comparison was requested figure out where to store the
    # file. Done separately because even if the files won't be saved the image
    # comparison needs to dump a file out for comparison.
    if 'screen' in dataTypes:
        if resultsPath == None:
            image_dir = tempfile.gettempdir()
            if fileName != None:
                # Absolute paths cannot be appended to the results path. Assume
                # that in those cases just using the base name is sufficient.
                if os.path.isabs(fileName):
                    image_dir = os.path.join(image_dir,
                                             os.path.basename(fileName))
                else:
                    image_dir = os.path.join(image_dir, fileName)
            else:
                image_dir = os.path.join(image_dir, 'SCENE')
        else:
            image_dir = resultsPath

        ref_results_image = '%s.ref.png' % image_dir
        for mode in unique_modes:
            # mode strings can have '/' which are illegal in filenames, replace with '='.
            mode = mode.replace('/', '=')
            mode_results_image_files.append('%s.%s.png' % (image_dir, mode))
    else:
        ref_results_image = None
        for _ in unique_modes:
            mode_results_image_files.append(None)

    # The IK multi-chain solver is known to create inconsistent results so remove
    # any joints that are being controlled by it from the list being compared.
    TODO(
        'REFACTOR',
        'Is this still needed now that we have an evaluator that handles it and disable EM if they are found?',
        None)
    ignored_nodes = []
    for node in cmds.ls(type='ikHandle'):
        try:
            solver_type = None
            solver_type = cmds.nodeType(
                cmds.ikHandle(node, query=True, solver=True))
        except Exception:
            pass

        # Any other kind of IK solver is fine
        if solver_type != 'ikMCsolver':
            continue

        multi_chain_joints = cmds.ikHandle(node, query=True, jointList=True)
        if multi_chain_joints is not None:
            ignored_nodes += multi_chain_joints
        multi_chain_effector = cmds.ikHandle(node,
                                             query=True,
                                             endEffector=True)
        if multi_chain_effector is not None:
            ignored_nodes += [multi_chain_effector]

    em_plugs = None
    comparisons = {}

    TODO(
        'FEATURE',
        'Could modify the verbose input to allow dumping of JSON instead of CSV',
        None)
    comparison_mode = DGState.OUTPUT_CSV

    # Record the reference evaluation version of the results
    with emModeManager() as em_mode:
        em_mode.setMode(referenceMode)

        with playbackModeManager() as play_mode:
            # Set to free running but hit every frame
            play_mode.setOptions(framesPerSecond=0.0,
                                 maxPlaybackSpeed=0.0,
                                 loop='once')
            play_mode.setLimitedRange(maxFrames=maxFrames, fromStart=True)

            # If no model panel is visible the refresh command won't trigger any evaluation
            if model_panel_visible():
                cmds.refresh()
            else:
                cmds.dgdirty(allPlugs=True)

            if (emSetup & CORRECTNESS_DOUBLE_PLAYBACK) != 0:
                play_mode.playAll()
            play_mode.playAll()

            mDG = DGState()
            mDG.scan_scene(do_eval=(referenceMode == 'dg'),
                           data_types=dataTypes)
            mDG.store_state(ref_results, ref_results_image)

            # Walk all of the modes requested and run the tests for them
            for mode_num in range(len(modes)):
                test_mode = modes[mode_num]
                with emModeManager() as test_em_mode:
                    test_em_mode.setMode(test_mode.getEmMode())
                    extra_context = test_mode.getContext()
                    if not extra_context:
                        extra_context = EmptyContext()
                    with extra_context:
                        if (emSetup & CORRECTNESS_LOAD !=
                                0) and fileName != None:
                            cmds.file(fileName, force=True, open=True)
                        if (emSetup & CORRECTNESS_DOUBLE_PLAYBACK) != 0:
                            play_mode.playAll()
                        if (emSetup & CORRECTNESS_INVALIDATE) != 0:
                            cmds.evaluationManager(invalidate=True)
                        play_mode.playAll()
                        if em_plugs == None:
                            em_plugs = __find_em_plugs(ignored_nodes)
                            mDG.filter_state(em_plugs)
                            if em_plug_file_name:
                                try:
                                    em_handle = open(em_plug_file_name, 'w')
                                    for (node,
                                         plug_list) in em_plugs.iteritems():
                                        em_handle.write('%s\n' % node)
                                        for plug in plug_list.keys():
                                            em_handle.write('\t%s\n' % plug)
                                    em_handle.close()
                                except Exception, ex:
                                    print 'ERROR: Could not write to EM plug file %s: "%s"' % (
                                        em_plug_file_name, str(ex))
                        mode_state = DGState()
                        # Catch the case when the EM has been disabled due to unsupported areas in the graph.
                        # When that happens the evaluation has to be forced or the values will be wrong.
                        em_still_enabled = cmds.evaluationManager(
                            query=True,
                            mode=True) != 'dg' and cmds.evaluationManager(
                                query=True, enabled=True)

                        mode_state.scan_scene(do_eval=not em_still_enabled,
                                              data_types=dataTypes)
                        mode_state.store_state(
                            mode_results_files[mode_num],
                            mode_results_image_files[mode_num])

                        results.append(mode_state)
                        results[mode_num].filter_state(em_plugs)
                        mode_title = test_mode.getTitle()
                        (comparison, error_count,
                         _) = mDG.compare(results[mode_num],
                                          output_mode=comparison_mode)
                        if verbose:
                            comparisons[mode_title] = comparison
                        else:
                            comparisons[mode_title] = error_count
                        if mode_compare_files[mode_num] is not None:
                            with open(mode_compare_files[mode_num],
                                      'w') as compare_file:
                                compare_file.write(str(comparison))