def process(self, instance): # Collect fur settings settings = {"nodes": []} # Get yeti nodes and their transforms yeti_shapes = cmds.ls(instance, type="pgYetiMaya") for shape in yeti_shapes: shape_data = { "transform": None, "name": shape, "cbId": lib.get_id(shape), "attrs": None } # Get specific node attributes attr_data = {} for attr in SETTINGS: current = cmds.getAttr("%s.%s" % (shape, attr)) attr_data[attr] = current # Get transform data parent = cmds.listRelatives(shape, parent=True)[0] transform_data = {"name": parent, "cbId": lib.get_id(parent)} # Store collected data shape_data["attrs"] = attr_data shape_data["transform"] = transform_data settings["nodes"].append(shape_data) instance.data["fursettings"] = settings
def process(self, instance): assert "input_SET" in cmds.sets( instance.name, query=True), ("Yeti Rig must have an input_SET") # Get the input meshes information input_content = cmds.sets("input_SET", query=True) input_nodes = cmds.listRelatives( input_content, allDescendents=True, fullPath=True) or input_content # Get all the shapes input_shapes = cmds.ls(input_nodes, long=True, noIntermediate=True) # Store all connections connections = cmds.listConnections(input_shapes, source=True, destination=False, connections=True, plugs=True) or [] # Group per source, destination pair. We need to reverse the connection # list as it comes in with the shape used to query first while that # shape is the destination of the connection grouped = [(connections[i + 1], item) for i, item in enumerate(connections) if i % 2 == 0] inputs = [] for src, dest in grouped: source_node, source_attr = src.split(".", 1) dest_node, dest_attr = dest.split(".", 1) inputs.append({ "connections": [source_attr, dest_attr], "sourceID": lib.get_id(source_node), "destinationID": lib.get_id(dest_node) }) # Collect any textures if used yeti_resources = [] yeti_nodes = cmds.ls(instance[:], type="pgYetiMaya") for node in yeti_nodes: # Get Yeti resources (textures) # TODO: referenced files in Yeti Graph resources = self.get_yeti_resources(node) yeti_resources.extend(resources) instance.data["rigsettings"] = {"inputs": inputs} instance.data["resources"] = yeti_resources # Force frame range for export instance.data["startFrame"] = 1 instance.data["endFrame"] = 1
def get_invalid(cls, instance): """Return the member nodes that are invalid""" # Check only non intermediate shapes # todo: must the instance itself ensure to have no intermediates? # todo: how come there are intermediates? from maya import cmds instance_members = cmds.ls(instance, noIntermediate=True, long=True) # Collect each id with their members ids = defaultdict(list) for member in instance_members: object_id = lib.get_id(member) if not object_id: continue ids[object_id].append(member) # Take only the ids with more than one member invalid = list() for _ids, members in ids.iteritems(): if len(members) > 1: cls.log.error("ID found on multiple nodes: '%s'" % members) invalid.extend(members) return invalid
def get_invalid(cls, instance): """Get all nodes which do not match the criteria""" invalid = [] types_to_skip = ["locator"] # get asset id nodes = instance.data.get("out_hierarchy", instance[:]) for node in nodes: # We only check when the node is *not* referenced if cmds.referenceQuery(node, isNodeReferenced=True): continue # Check if node is a shape as deformers only work on shapes obj_type = cmds.objectType(node, isAType="shape") if not obj_type: continue # Skip specific types if cmds.objectType(node) in types_to_skip: continue # Get the current id of the node node_id = lib.get_id(node) if not node_id: invalid.append(node) continue history_id = lib.get_id_from_history(node) if history_id is not None and node_id != history_id: invalid.append(node) return invalid
def get_invalid(cls, instance): """Return the member nodes that are invalid""" invalid = list() asset = instance.data['asset'] asset_data = io.find_one({ "name": asset, "type": "asset" }, projection={"_id": True}) asset_id = str(asset_data['_id']) # We do want to check the referenced nodes as we it might be # part of the end product for node in instance: _id = lib.get_id(node) if not _id: continue node_asset_id = _id.split(":", 1)[0] if node_asset_id != asset_id: invalid.append(node) return invalid
def get_invalid(cls, instance): invalid = [] # Get all id required nodes id_required_nodes = lib.get_id_required_nodes(referenced_nodes=True, nodes=instance[:]) # check ids against database ids db_asset_ids = io.find({"type": "asset"}).distinct("_id") db_asset_ids = set(str(i) for i in db_asset_ids) # Get all asset IDs for node in id_required_nodes: cb_id = lib.get_id(node) # Ignore nodes without id, those are validated elsewhere if not cb_id: continue asset_id = cb_id.split(":", 1)[0] if asset_id not in db_asset_ids: cls.log.error("`%s` has unassociated asset ID" % node) invalid.append(node) return invalid
def collect_member_data(self, member, instance_members): """Get all information of the node Args: member (str): the name of the node to check instance_members (set): the collected instance members Returns: dict """ node, components = (member.rsplit(".", 1) + [None])[:2] # Only include valid members of the instance if node not in instance_members: return node_id = lib.get_id(node) if not node_id: self.log.error("Member '{}' has no attribute 'cbId'".format(node)) return member_data = {"name": node, "uuid": node_id} if components: member_data["components"] = components return member_data
def collect_sets(self, instance): """Collect all objectSets which are of importance for publishing It checks if all nodes in the instance are related to any objectSet which need to be Args: instance (list): all nodes to be published Returns: dict """ sets = dict() for node in instance: related_sets = lib.get_related_sets(node) if not related_sets: continue for objset in related_sets: if objset in sets: continue sets[objset] = {"uuid": lib.get_id(objset), "members": list()} return sets
def collect_input_connections(self, instance): """Collect the inputs for all nodes in the input_SET""" # Get the input meshes information input_content = cmds.ls(cmds.sets("input_SET", query=True), long=True) # Include children input_content += cmds.listRelatives( input_content, allDescendents=True, fullPath=True) or [] # Ignore intermediate objects input_content = cmds.ls(input_content, long=True, noIntermediate=True) if not input_content: return [] # Store all connections connections = cmds.listConnections( input_content, source=True, destination=False, connections=True, # Only allow inputs from dagNodes # (avoid display layers, etc.) type="dagNode", plugs=True) or [] connections = cmds.ls(connections, long=True) # Ensure long names inputs = [] for dest, src in pairwise(connections): source_node, source_attr = src.split(".", 1) dest_node, dest_attr = dest.split(".", 1) # Ensure the source of the connection is not included in the # current instance's hierarchy. If so, we ignore that connection # as we will want to preserve it even over a publish. if source_node in instance: self.log.debug("Ignoring input connection between nodes " "inside the instance: %s -> %s" % (src, dest)) continue inputs.append({ "connections": [source_attr, dest_attr], "sourceID": lib.get_id(source_node), "destinationID": lib.get_id(dest_node) }) return inputs
def get_invalid(cls, instance): """Return the member nodes that are invalid""" # We do want to check the referenced nodes as it might be # part of the end product. id_nodes = lib.get_id_required_nodes(referenced_nodes=True, nodes=instance[:]) invalid = [n for n in id_nodes if not lib.get_id(n)] return invalid
def create_id_hash(nodes): """Create a hash based on cbId attribute value Args: nodes (list): a list of nodes Returns: dict """ node_id_hash = defaultdict(list) for node in nodes: value = cb.get_id(node) if value is None: continue node_id_hash[value].append(node) return dict(node_id_hash)
def get_asset_id_item(item): if cmds.objectType(item) == "objectSet": content = cmds.sets(item, query=True) shapes = cmds.ls(content, long=True, type="shape") assert len(shapes) != 0, "Container has no shapes, this is an error" item = shapes[0] # Take the first shape, assuming all shapes in the container are from # the same asset cb_id = cblib.get_id(item) if not cb_id: return asset_id = cb_id.rsplit(":")[0] return asset_id
def create_asset_id_hash(nodes): """Create a hash based on cbId attribute value Args: nodes (list): a list of nodes Returns: dict """ node_id_hash = defaultdict(list) for node in nodes: value = cblib.get_id(node) if value is None: continue asset_id = value.split(":")[0] node_id_hash[asset_id].append(node) return dict(node_id_hash)
def get_invalid(cls, instance): """Get all nodes which do not match the criteria""" shapes = cmds.ls(instance[:], dag=True, leaf=True, shapes=True, long=True, noIntermediate=True) invalid = [] for shape in shapes: history_id = lib.get_id_from_history(shape) if history_id: current_id = lib.get_id(shape) if current_id != history_id: invalid.append(shape) return invalid
def collect_attributes_changed(self, instance): """Collect all userDefined attributes which have changed Each node gets checked for user defined attributes which have been altered during development. Each changes gets logged in a dictionary [{name: node, uuid: uuid, attributes: {attribute: value}}] Args: instance (list): all nodes which will be published Returns: list """ attributes = [] for node in instance: # Collect changes to "custom" attributes node_attrs = get_look_attrs(node) # Only include if there are any properties we care about if not node_attrs: continue node_attributes = {} for attr in node_attrs: if not cmds.attributeQuery(attr, node=node, exists=True): continue attribute = "{}.{}".format(node, attr) node_attributes[attr] = cmds.getAttr(attribute) attributes.append({ "name": node, "uuid": lib.get_id(node), "attributes": node_attributes }) return attributes
def get_invalid(cls, instance): """Get all nodes which do not match the criteria""" invalid = [] out_set = next(x for x in instance if x.endswith("out_SET")) members = cmds.sets(out_set, query=True) shapes = cmds.ls(members, dag=True, leaf=True, shapes=True, long=True, noIntermediate=True) for shape in shapes: history_id = lib.get_id_from_history(shape) if history_id: current_id = lib.get_id(shape) if current_id != history_id: invalid.append(shape) return invalid
def update(self, container, representation): namespace = container["namespace"] container_node = container["objectName"] path = api.get_representation_path(representation) # Get all node data fname, ext = os.path.splitext(path) settings_fname = "{}.fursettings".format(fname) with open(settings_fname, "r") as fp: settings = json.load(fp) # Collect scene information of asset set_members = cmds.sets(container["objectName"], query=True) container_root = lib.get_container_transforms(container, members=set_members, root=True) scene_nodes = cmds.ls(set_members, type="pgYetiMaya", long=True) # Build lookup with cbId as keys scene_lookup = defaultdict(list) for node in scene_nodes: cb_id = lib.get_id(node) scene_lookup[cb_id].append(node) # Re-assemble metadata with cbId as keys meta_data_lookup = {n["cbId"]: n for n in settings["nodes"]} # Compare look ups and get the nodes which ar not relevant any more to_delete_lookup = { cb_id for cb_id in scene_lookup.keys() if cb_id not in meta_data_lookup } if to_delete_lookup: # Get nodes and remove entry from lookup to_remove = [] for _id in to_delete_lookup: # Get all related nodes shapes = scene_lookup[_id] # Get the parents of all shapes under the ID transforms = cmds.listRelatives( shapes, parent=True, fullPath=True) or [] to_remove.extend(shapes + transforms) # Remove id from look uop scene_lookup.pop(_id, None) cmds.delete(to_remove) for cb_id, data in meta_data_lookup.items(): # Update cache file name file_name = data["name"].replace(":", "_") cache_file_path = "{}.%04d.fur".format(file_name) data["attrs"]["cacheFileName"] = os.path.join( path, cache_file_path) if cb_id not in scene_lookup: # Create new nodes self.log.info("Creating new Yeti nodes ..") new_nodes = self.create_nodes(namespace, [data]) cmds.sets(new_nodes, addElement=container_node) cmds.parent(new_nodes, container_root) else: # Update existing matching nodes scene_nodes = scene_lookup[cb_id] lookup_result = meta_data_lookup[cb_id]["name"] # Remove namespace if any (e.g.: "character_01_:head_YNShape") node_name = lookup_result.rsplit(":", 1)[-1] for scene_node in scene_nodes: # Get transform node, this makes renaming easier transforms = cmds.listRelatives( scene_node, parent=True, fullPath=True) or [] assert len(transforms) == 1, "This is a bug!" # Get scene node's namespace and rename the transform node lead = scene_node.rsplit(":", 1)[0] namespace = ":{}".format(lead.rsplit("|")[-1]) new_shape_name = "{}:{}".format(namespace, node_name) new_trans_name = new_shape_name.rsplit("Shape", 1)[0] transform_node = transforms[0] cmds.rename(transform_node, new_trans_name, ignoreShape=False) # Get the newly named pgYetiMaya shape node yeti_nodes = cmds.listRelatives(new_trans_name, children=True, type="pgYetiMaya") yeti_node = yeti_nodes[0] attributes = data["attrs"] self._set_attributes(yeti_node, attributes) cmds.setAttr("{}.representation".format(container_node), str(representation["_id"]), typ="string")