def check_inventory(): if not lib.any_outdated(): return host = pyblish.registered_host() outdated_containers = [] for container in host.ls(): representation = container['representation'] representation_doc = io.find_one( { "_id": io.ObjectId(representation), "type": "representation" }, projection={"parent": True}) if representation_doc and not lib.is_latest(representation_doc): outdated_containers.append(container) # Warn about outdated containers. print("Starting new QApplication..") app = Qt.QtWidgets.QApplication(sys.argv) message_box = Qt.QtWidgets.QMessageBox() message_box.setIcon(Qt.QtWidgets.QMessageBox.Warning) msg = "There are outdated containers in the scene." message_box.setText(msg) message_box.exec_() # Garbage collect QApplication. del app
def process(self, context): # Sort by displayOrder def sort_by_display_order(layer): return cmds.getAttr("%s.displayOrder" % layer) host = api.registered_host() asset = api.Session["AVALON_ASSET"] work_dir = context.data["workspaceDir"] # Get VRay Scene instance vray_scenes = host.lsattr("family", "vrayscene") if not vray_scenes: self.log.info("Skipping vrayScene collection, no " "vrayscene instance found..") return assert len(vray_scenes) == 1, "Multiple vrayscene instances found!" vray_scene = vray_scenes[0] vrscene_data = host.read(vray_scene) assert cmds.ls("vraySettings", type="VRaySettingsNode"), ( "VRay Settings node does not exists. " "Please ensure V-Ray is the current renderer.") # Output data start_frame = int(cmds.getAttr("defaultRenderGlobals.startFrame")) end_frame = int(cmds.getAttr("defaultRenderGlobals.endFrame")) # Create output file path with template file_name = context.data["currentFile"].replace("\\", "/") vrscene = ("vrayscene", "<Scene>", "<Scene>_<Layer>", "<Layer>") vrscene_output = os.path.join(work_dir, *vrscene) # Check and create render output template for render job # outputDir is required for submit_publish_job if not vrscene_data.get("suspendRenderJob", False): renders = ("renders", "<Scene>", "<Scene>_<Layer>", "<Layer>") output_renderpath = os.path.join(work_dir, *renders) vrscene_data["outputDir"] = output_renderpath # Get resolution resolution = (cmds.getAttr("defaultResolution.width"), cmds.getAttr("defaultResolution.height")) # Get format extension extension = cmds.getAttr("vraySettings.imageFormatStr") # Get render layers render_layers = [ i for i in cmds.ls(type="renderLayer") if cmds.getAttr("{}.renderable".format(i)) and not cmds.referenceQuery(i, isNodeReferenced=True) ] render_layers = sorted(render_layers, key=sort_by_display_order) for layer in render_layers: subset = layer if subset == "defaultRenderLayer": subset = "masterLayer" data = { "subset": subset, "setMembers": layer, "startFrame": start_frame, "endFrame": end_frame, "renderer": "vray", "resolution": resolution, "ext": ".{}".format(extension), # instance subset "family": "VRay Scene", "families": ["vrayscene"], "asset": asset, "time": api.time(), "author": context.data["user"], # Add source to allow tracing back to the scene from # which was submitted originally "source": file_name, # Store VRay Scene additional data "vrsceneOutput": vrscene_output } data.update(vrscene_data) instance = context.create_instance(subset) self.log.info("Created: %s" % instance.name) instance.data.update(data)
def _publish(self, rig_source, rig_subsets): import os import re import pyblish.util import maya.cmds as cmds from avalon import api from reveries.maya import lib # Switch task api.update_current_task(task="rigging", asset=self.asset_name) # Open rig source file cmds.file(rig_source, open=True, force=True) # Update all loaded model which subset name has matched _updated = False host = api.registered_host() for _container in host.ls(): if _container["name"] == self.model_subset: api.update(_container) _updated = True if not _updated: # Not likely to happen, but just in case raise Exception("No matched model subset, this is a bug.") # Config rig instances' activities # Activate rig instances that need to be published, and deactivate # the rest. for instance_set in lib.lsAttr("id", "pyblish.avalon.instance"): active = cmds.getAttr(instance_set + ".subset") in rig_subsets cmds.setAttr(instance_set + ".active", active) # Save as file _tmp_dir = os.path.join(os.path.dirname(rig_source), "_auto_update") if not os.path.exists(_tmp_dir): os.mkdir(_tmp_dir) os.chmod(_tmp_dir, 777) # Compose a good file name basename, ext = os.path.splitext(os.path.basename(rig_source)) if "auto_model_update" not in basename: _new_fname = "{}.auto_model_update.001{}".format(basename, ext) else: current_v = re.findall(".auto_model_update.(\\d+).", rig_source)[0] new_v = "{:03d}".format(int(current_v) + 1) _new_fname = "{}{}".format(basename, ext) _new_fname = _new_fname.replace(".{}.published.".format(current_v), ".{}.".format(new_v)) _save_to = os.path.join(_tmp_dir, _new_fname) cmds.file(rename=_save_to) cmds.file(force=True, save=True) print("Saved to : {}".format(_save_to)) # Publish pyblish.api.register_target("localhost") # Fix AvalonUUID before validate ValidateAvalonUUID = next(p for p in pyblish.api.discover() if p.__name__ == "ValidateAvalonUUID") for instance in pyblish.util.collect(): try: ValidateAvalonUUID.fix_invalid_missing(instance) except Exception as e: print("Fix uuid failed: {}.".format(e)) context = pyblish.util.collect() context.data["comment"] = "Auto update model to latest version." context = pyblish.util.validate(context=context) context = pyblish.util.extract(context=context) if not all(result["success"] for result in context.data["results"]): raise RuntimeError("Atomicity not held, aborting.") # Will run integration later.. self.contexts.append(context)