def scan_scene(self, do_eval, data_types): """ Read in the state information from the current scene. Create a new state object, potentially saving results offline if requested. do_eval : True means force evaluation of the plugs before checking state. Used in DG mode since not all outputs used for (e.g.) drawing will be in the datablock after evaluation. data_types : Type of data to look for - {mesh, vertex, number, vector, screen} If screen is in the list the 'image_file' argument must also be specified. """ self.results_file = None self.image_file = None self.state = [] self.md5_value = MD5_DEFAULT # Translat the method args into arguments for the dbpeek operations data_args = [] mesh_args = ['vertex', 'verbose'] if do_eval: data_args += ['eval'] mesh_args += ['eval'] if 'number' in data_types: data_args += ['number'] if 'matrix' in data_types: data_args += ['matrix'] if 'vector' in data_types: data_args += ['vector'] # The two dbpeek operations both generate CSV data with similar # formatting (PLUG,#,#,#...) so a simple join is good enough. # # More complex data might warrant a more complex algorithm # such as splitting the state data into separate objects and # comparing them that way. # # The "[1:]" is to skip the title lines since those are irrelevant. # self.state = [ line for line in cmds.dbpeek(op='data', a=data_args, all=True).strip().split('\n') if line != '\n' ][1:] if 'mesh' in data_types: self.state += [ line for line in cmds.dbpeek(op='mesh', a=mesh_args, all=True).strip().split('\n') if line != '\n' ][1:]
def run(self): """ Examine animated deformers nodes and check how they are used. If the 'details' option is set the CSV columns are: DeformerNode : Name of the animated deformer node Type : Type for this node SupportedGeometry : True if the geometry processed by animated deformer node is supported by deformer evaluator otherwise the CSV columns are: DeformerMode : Description of the usage for the animated deformer node Type : Deformer type SupportedGeometry : True if the geometry processed by animated deformer nodes is supported by deformer evaluator Count : Number of animated deformer nodes in this mode See is_supported_geometry() for what criteria a geometry must meet to be supported. One row is output for every animated deformer node. Return True if the analysis succeeded, else False """ with emModeManager() as em_manager: em_manager.setMode( 'ems' ) em_manager.rebuild() # Get all animated nodes. try: json_nodes = json.loads(cmds.dbpeek( op='graph', eg=True, all=True, a='nodes' )) animated_nodes = set( json_nodes['nodes'] ) except Exception, ex: self.error( 'Graph examination failure ({0:s})'.format( str(ex) ) ) return
def list_frozen_in_scheduling(): ''' Returns a list of all nodes that were frozen either directly or indirectly as a result of the frozen evaluator settings. If no cluster information is available a TypeError is raised. If the frozen evaluator is not enabled an AttributeError is raised. ''' if 'frozen' not in cmds.evaluator(enable=True, query=True): raise AttributeError('Frozen evaluator is not active') frozen_nodes = [] try: clusters = json.loads( cmds.dbpeek(op='graph', evaluationGraph=True, all=True, a='scheduling'))['scheduling']['Clusters'] for cluster_name, cluster_members in clusters.iteritems(): if RE_FROZEN_CLUSTER.match(cluster_name): frozen_nodes += cluster_members except: # If an exception was raised it was probably due to the dbpeek command not # returning scheduling information, which only happens when the graph is # not available raise TypeError( 'Cluster information is not available, evaluation graph needs rebuilding' ) return frozen_nodes
def __find_em_plugs(ignored_nodes): """ Find all of the root level plugs that the EM will be marking dirty. The passed-in dictionary will be populated by a list of dictionaries. em_plugs[NODE] = {DIRTY_PLUG_IN_NODE:True} ignored_nodes = [NODES_TO_SKIP] """ em_plugs = {} try: json_plugs = json.loads( cmds.dbpeek(op='graph', eg=True, all=True, a='plugs')) except ValueError: print 'WARNING: No output from plug list' return em_plugs if not json_plugs or 'plugs' not in json_plugs: print 'WARNING: No output from plug list' return em_plugs for node, per_node_list in json_plugs['plugs'].iteritems(): if node in ignored_nodes: continue input_plugs = per_node_list['input'] output_plugs = per_node_list['output'] world_plugs = per_node_list['affectsWorld'] attribute_plugs = per_node_list['attributes'] for attribute in input_plugs + output_plugs + world_plugs + attribute_plugs: if node in em_plugs: em_plugs[node][attribute] = True else: em_plugs[node] = {attribute: True} return em_plugs
def _initFromScene(self): """ Create a graph structure object from the current Maya scene. """ if not checkMaya(): return self._name = '__SCENE__' self._nodes = [] self._plugsOut = [] self._plugsIn = [] self._plugsWorld = [] self._connections = [] nodeTypes = ['nodes'] connectionTypes = ['connections'] plugTypes = None if self.evaluationGraph: nodeTypes.append('evaluationGraph') plugTypes = ['plugs', 'evaluationGraph'] connectionTypes.append('evaluationGraph') for node in [ node.rstrip() for node in cmds.dbpeek( op='graph', all=True, argument=nodeTypes).split('\n') ]: # Skip blank lines if len(node) == 0: continue self._nodes.append(node) if plugTypes != None: for plugInfo in cmds.dbpeek(op='graph', all=True, argument=plugTypes).split('\n'): if len(plugInfo.strip()) == 0: continue try: (plugType, plugName) = plugInfo.split('\t') if plugType == PLUG_OUT_TAG: self._plugsOut.append(plugName) elif plugType == PLUG_WORLD_TAG: self._plugsWorld.append(plugName) elif plugType == PLUG_IN_TAG: self._plugsIn.append(plugName) else: raise Exception except Exception, ex: print 'WARN: Could not parse plug "%s"' % plugInfo
def sort_by_evaluation_order(nodes, minimal=False): """Return `nodes` sorted by the order in which they are evaluated Reach into Maya's evaluation graph for hints about the execution order, accessible via cmds.dbpeek. This won't work for DG evaluation however.. Arguments: node (list): Of any kind of DG or DagNode minimal (bool, optional): Only look at `nodes`, default False """ peek_args = { "op": "graph", "evaluationGraph": True, "argument": ["scheduling", "verbose"] } data = cmds.dbpeek( [node.shortest_path() for node in nodes] if minimal else [], **peek_args ) if data.startswith("\nERROR"): # This only works in Parallel/Serial modes raise RuntimeError("No valid graph") scheduling = json.loads(data)["scheduling"] keys = {node.shortest_path().encode("ascii"): node for node in nodes} nodes = {key: 0 for key in keys.keys()} def walk(key, value, depth=0): # Include evaluators, e.g. CycleLayer[2,_:R_leftFoot_ctl] # and e.g. pruneRoots|CustomEvaluatorLayer[2,_:L_hand_ctl] key = key.rsplit(",", 1)[-1].rstrip("]") if key in nodes: nodes[key] += depth for key, value in value.items(): walk(key, value, depth + 1) with Timer() as t: # The execution order is a depth-first dictionary # of the order in which nodes execute. walk("", scheduling["executionOrder"]) log.debug("sort_by_evaluation_order: %.2fms" % t.ms) # Turn back into objects items = sorted(nodes.items(), key=lambda item: item[1]) return list(keys[item[0]] for item in items)
def __init_from_scene(self): """ Create a graph structure object from the current Maya scene. """ if not checkMaya(): return self.name = '__SCENE__' args = self.inclusions if self.evaluation_graph: args.append('evaluationGraph') self.__init_from_json( cmds.dbpeek(operation='graph', all=True, argument=args))
def run(self): """ Generates a JSON structure containing the node type hierarchy If the 'details' option is set include the list of attributes attached to each node type. """ self.attribute_json = None if self.option(OPTION_DETAILS): try: self.attribute_json = json.loads( cmds.dbpeek(op='attributes', a='nodeType', all=True))['nodeTypes'] except Exception, ex: self.error( 'Could not find node type attributes : "{0:s}"'.format(ex))
def run(self): """ Examine animated cluster nodes and check how they are used. It checks whether they are used for fixed rigid transform, weighted rigid transform or per-vertex-weighted transform. When the 'details' option is set the CSV columns are: ClusterNode : Name of the animated cluster node envelope_is_static : True if the envelope is not animated and its value is 1 uses_weights : True if weights are used in the node uses_same_weight : True if weight is the same for all vertices Mode : Mode for this node supported_geometry : True if the geometry processed by animated cluster node is supported by deformer evaluator otherwise the CSV columns are: ClusterMode : Description of the usage for the animated cluster node Mode : Mode for animated cluster nodes meeting this criteria supported_geometry : True if the geometry processed by animated cluster nodes meeting this criteria is supported by deformer evaluator Count : Number of animated cluster nodes in this mode See is_supported_geometry() for what criteria a geometry must meet to be supported. One row is output for every animated cluster node. The "Mode" is an integer value with the following meaning: - 1 => Rigid transform : cluster node only performs a rigid transform - 2 => Weighted rigid transform : cluster node performs a rigid transform, but it is weighted down by a factor - 3 => Per-vertex transform : cluster node computes a different transform for each individually-weighted vertex Return True if the analysis succeeded, else False """ with emModeManager() as em_manager: em_manager.setMode('ems') em_manager.rebuild() # Get all animated nodes. try: json_nodes = json.loads( cmds.dbpeek(op='graph', eg=True, all=True, a='nodes')) animated_nodes = set(json_nodes['nodes']) except Exception, ex: self.error('Graph examination failure ({0:s})'.format(str(ex))) return
def __findEmPlugs(emPlugs): """ Find all of the root level plugs that the EM will be marking dirty. The passed-in dictionary will be populated by a list of dictionaries. emPlugs[NODE] = {DIRTY_PLUG_IN_NODE:True} """ for nodeLine in cmds.dbpeek(op='graph', all=True, a=['evaluationGraph', 'plugs']).split('\n'): try: (_, plug) = nodeLine.split('\t') (node, attribute) = plug.split('.') if node in emPlugs: emPlugs[node][attribute] = True else: emPlugs[node] = {attribute: True} except Exception: # Skip lines that cannot be processed pass
def run(self): """ Examine animated tweak nodes and check how they are used. It checks whether they use the relative or absolute mode and whether individual tweaks themselves are actually used. If the 'details' option is set the CSV columns are: TweakNode : Name of the animated tweak node Relative : Value of the relative_tweak attribute of the animated tweak node uses_tweaks : True if tweaks are used in the node UsesMesh : True if some of the geometry processed by animated tweak node is a mesh otherwise the CSV columns are: TweakType : Description of the usage for the animated tweak node Relative : Value of the relative_tweak attribute of the animated tweak nodes meeting this criteria uses_tweaks : True if tweaks are used in the nodes meeting this criteria UsesMesh : True if some of the geometry processed by animated tweak nodes meeting this criteria is a mesh Count : Number of animated tweak nodes meeting this criteria One row is output for every animated tweak node. Return True if the analysis succeeded, else False """ with emModeManager() as em_manager: em_manager.setMode('ems') em_manager.rebuild() # Get all animated nodes. try: node_list = cmds.dbpeek(op='graph', eg=True, all=True, a='nodes') json_nodes = json.loads(node_list) animated_nodes = set(json_nodes['nodes']) except Exception, ex: self.error('Graph examination failure ({0:s})'.format(str(ex))) return
def run(self): """ Generates a JSON structure containing the evaluation graph information If the 'details' option is set then include the extra information as described in the analytic help information. """ node_data = { 'BuildTime': 0, 'Parallel': {}, 'Serial': {}, 'GloballySerial': {}, 'Untrusted': {} } node_counts = { scheduling_type: 0 for scheduling_type in node_data.keys() } try: with emModeManager() as em_manager: # Rebuild the graph in parallel mode, then extract the schedulingGraph event # timing from it, which is the root level timing event for graph rebuilding. # (The rebuild also counts invalidation and redraw time so that can't be used as-is.) em_manager.setMode('emp') self.debug('Getting profiler information') cmds.profiler(sampling=True) em_manager.rebuild() cmds.profiler(sampling=False) node_data['BuildTime'] = self.__get_event_timing( 'GraphConstruction') self.debug('Got the sample time of {}'.format( node_data['BuildTime'])) em_json = None try: graph_data = cmds.dbpeek( operation='graph', all=True, evaluationGraph=True, argument=['plugs', 'connections', 'scheduling']) em_json = json.loads(graph_data) except Exception, ex: self.warning( 'First evaluation failed, forcing time change for second attempt ({})' .format(ex)) # If the first attempt to get the graph fails maybe the # rebuild didn't work so force it the old ugly way for # now. now = cmds.currentTime(query=True) cmds.currentTime(now + 1) cmds.currentTime(now) if em_json is None: # Second chance to get the graph data. This one is not # protected by an inner try() because if this fails we # want the outer exception handler to kick in. graph_data = cmds.dbpeek( operation='graph', all=True, evaluationGraph=True, argument=['plugs', 'connections', 'scheduling']) em_json = json.loads(graph_data) if self.option(OPTION_SUMMARY): # Gather node summary information if requested summary_info = self.__gather_summary_data(em_json) node_data['summary'] = summary_info # Gather node detail information if requested detailed_info = self.__gather_detail_data(em_json) if self.option( OPTION_DETAILS) else {} # Relies on the fact that the scheduling output keys match the # ones being put into the node_data dictionary, which they do by # design. for (scheduling_type, scheduling_list) in em_json['scheduling'].iteritems(): try: node_counts[scheduling_type] = len(scheduling_list) # Any extra scheduling information is for detailed output only if scheduling_type not in node_data.keys(): if self.option(OPTION_DETAILS): node_data[scheduling_type] = scheduling_list if self.option(OPTION_SUMMARY): node_counts[scheduling_type] = node_counts.get( scheduling_type, 0) + len(scheduling_list) continue # The simplest output will just have the nodes of each # type in a list. if not self.option(OPTION_SUMMARY) and not self.option( OPTION_DETAILS): node_data[scheduling_type] = scheduling_list continue node_data[scheduling_type] = {} for node in scheduling_list: node_info = {} # Add in the detailed information if requested if node in detailed_info: node_info.update(detailed_info[node]) # Attach the node data to its name node_data[scheduling_type][self._node_name( node)] = node_info except Exception, ex: # There may be a formatting problem if scheduling types # are not found since they will be dumped even if empty. self.warning( 'Node information not available for type {} ({})'. format(scheduling_type, ex))
def __init__(self, resultsFile=None, imageFile=None, doEval=False, dataTypes=None): """ Create a new state object, potentially saving results offline if requested. resultsFile : Name of file in which to save the results. Do not save anything if None. imageFile : Name of file in which to save the current viewport screenshot. Do not save anything if None. doEval : True means force evaluation of the plugs before checking state. Used in DG mode since not all outputs used for (e.g.) drawing will be in the datablock after evaluation. dataTypes : Type of data to look for - {mesh, vertex, number, screen} If screen is in the list the 'imageFile' argument must also be specified. """ self.md5Value = MD5_DEFAULT self.resultsFile = resultsFile self.imageFile = imageFile dataArgs = [] meshArgs = ['vertex', 'verbose'] if doEval: dataArgs += ['eval'] meshArgs += ['eval'] if 'number' in dataTypes: dataArgs += ['number'] if 'matrix' in dataTypes: dataArgs += ['matrix'] # The two dbpeek operations both generate CSV data with similar # formatting (PLUG,#,#,#...) so a simple join is good enough. # # More complex data might warrant a more complex algorithm # such as splitting the state data into separate objects and # comparing them that way. # # The "[1:]" is to skip the title lines since those are irrelevant. # self.state = [ line for line in cmds.dbpeek(op='data', a=dataArgs, all=True).strip().split('\n') if line != '\n' ][1:] if 'mesh' in dataTypes: self.state += [ line for line in cmds.dbpeek(op='mesh', a=meshArgs, all=True).strip().split('\n') if line != '\n' ][1:] if resultsFile != None: try: rfHandle = open(resultsFile, 'w') for line in sorted(self.state): if line != '\n': rfHandle.write('%s\n' % line) rfHandle.close() except Exception, ex: print 'ERROR: Could not write to results file %s: "%s"' % ( resultsFile, str(ex))
class graphStructure: """ Provides access and manipulation on graph structure data that has been produced by the 'dbpeek -op graph' or 'dbpeek -op evaluationGraph' commands. """ #====================================================================== def __init__(self, structureFileName=None, longNames=False, evaluationGraph=False): """ Create a graph structure object from a file or the current scene. If the structureFileName is None then the current scene will be used, otherwise the file will be read. The graph data is read in and stored internally in a format that makes formatting and comparison easy. If longNames is True then don't attempt to shorten the node names by removing namespaces and DAG path elements. If evaluationGraph is True then get the structure of the evaluation manager graph, not the DG. This requires that the graph has already been created of course, e.g. by playing back a frame or two in EM serial or EM parallel mode. """ self.useLongNames = longNames self.evaluationGraph = evaluationGraph if structureFileName == None: self._initFromScene() else: self._initFromFile(structureFileName) def _initFromScene(self): """ Create a graph structure object from the current Maya scene. """ if not checkMaya(): return self._name = '__SCENE__' self._nodes = [] self._plugsOut = [] self._plugsIn = [] self._plugsWorld = [] self._connections = [] nodeTypes = ['nodes'] connectionTypes = ['connections'] plugTypes = None if self.evaluationGraph: nodeTypes.append('evaluationGraph') plugTypes = ['plugs', 'evaluationGraph'] connectionTypes.append('evaluationGraph') for node in [ node.rstrip() for node in cmds.dbpeek( op='graph', all=True, argument=nodeTypes).split('\n') ]: # Skip blank lines if len(node) == 0: continue self._nodes.append(node) if plugTypes != None: for plugInfo in cmds.dbpeek(op='graph', all=True, argument=plugTypes).split('\n'): if len(plugInfo.strip()) == 0: continue try: (plugType, plugName) = plugInfo.split('\t') if plugType == PLUG_OUT_TAG: self._plugsOut.append(plugName) elif plugType == PLUG_WORLD_TAG: self._plugsWorld.append(plugName) elif plugType == PLUG_IN_TAG: self._plugsIn.append(plugName) else: raise Exception except Exception, ex: print 'WARN: Could not parse plug "%s"' % plugInfo connection = cmds.dbpeek(op='graph', all=True, argument=connectionTypes) for connectionLine in connection.split('\n'): # Skip blank lines if len(connectionLine) == 0: continue connectionList = connectionLine.split('\t') if len(connectionList) == 2: self._connections.append( (connectionList[0], connectionList[1])) else: print 'WARN: Could not parse connection %s' % connection