def _on_publish(self, publish_form): """ Slot called when publish signal is emitted from the UI """ # get list of tasks from UI: selected_tasks = publish_form.selected_tasks # stop if can't actually do the publish! if not selected_tasks: # TODO - replace with tank dialog QtGui.QMessageBox.information( publish_form, "Publish", "Nothing selected to publish - unable to continue!") return # split tasks into primary and secondary: primary_task = None secondary_tasks = [] for ti, task in enumerate(selected_tasks): if task.output == self._primary_output: if primary_task: raise TankError("Found multiple primary tasks to publish!") primary_task = task secondary_tasks = selected_tasks[:ti] + selected_tasks[(ti + 1):] if not primary_task: raise TankError("Couldn't find primary task to publish!") # pull rest of info from UI sg_task = publish_form.shotgun_task thumbnail = publish_form.thumbnail comment = publish_form.comment # create progress reporter and connect to UI: progress = TaskProgressReporter(selected_tasks) publish_form.set_progress_reporter(progress) # show pre-publish progress: publish_form.show_publish_progress("Doing Pre-Publish") progress.reset() # make dialog modal whilst we're doing work: """ (AD) - whilst this almost works, returning from modal state seems to completely mess up the window parenting in Maya so may need to have another way to do this or (more likely) move it to a separate dialog! geom = publish_form.window().geometry() publish_form.window().setWindowModality(QtCore.Qt.ApplicationModal) publish_form.window().hide() publish_form.window().show() publish_form.window().setGeometry(geom) """ # do pre-publish: try: self._do_pre_publish(primary_task, secondary_tasks, progress.report) except TankError, e: QtGui.QMessageBox.information(publish_form, "Pre-publish Failed", "Pre-Publish Failed!\n\n%s" % e) publish_form.show_publish_details() return
def _nuke_execute(self, tasks, work_template, comment, thumbnail_path, sg_task, primary_task, primary_publish_path, progress_cb, **kwargs): """ The Nuke specific secondary publish routine. """ results = [] # it's important that tasks for render output are processed # before tasks for quicktime output, so let's group the # task list by output. This can be controlled through the # configuration but we shouldn't rely on that being set up # correctly! output_order = ["render", "quicktime"] tasks_by_output = {} for task in tasks: output_name = task["output"]["name"] tasks_by_output.setdefault(output_name, list()).append(task) if output_name not in output_order: output_order.append(output_name) # make sure we have any apps required by the publish process: if "render" in tasks_by_output or "quicktime" in tasks_by_output: # we will need the write node app if we have any render outputs to validate if not self.__write_node_app: raise TankError( "Unable to publish Shotgun Write Nodes without the tk-nuke-writenode app!" ) if "quicktime" in tasks_by_output: # If we have the tk-multi-reviewsubmission app we can create versions if not self.__review_submission_app: raise TankError( "Unable to publish Review Versions without the tk-multi-reviewsubmission app!" ) # Keep of track of what has been published in shotgun # this is needed as input into the review creation code... render_publishes = {} # process outputs in order: for output_name in output_order: # process each task for this output: for task in tasks_by_output.get(output_name, []): # keep track of our errors for this task errors = [] # report progress: progress_cb(0.0, "Publishing", task) if output_name == "render": # Publish the rendered output for a Shotgun Write Node # each publish task is connected to a nuke write node # this value was populated via the scan scene hook write_node = task["item"].get("other_params", dict()).get("node") if not write_node: raise TankError( "Could not determine nuke write node for item '%s'!" % str(task)) # publish write-node rendered sequence try: (sg_publish, thumbnail_path) = self._publish_write_node_render( task, write_node, primary_publish_path, sg_task, comment, progress_cb) # keep track of our publish data so that we can pick it up later in review render_publishes[write_node.name()] = (sg_publish, thumbnail_path) except Exception, e: errors.append("Publish failed - %s" % e) elif output_name == "quicktime": # Publish the reviewable quicktime movie for a Shotgun Write Node # each publish task is connected to a nuke write node # this value was populated via the scan scene hook write_node = task["item"].get("other_params", dict()).get("node") if not write_node: raise TankError( "Could not determine nuke write node for item '%s'!" % str(task)) # Submit published sequence to Screening Room try: # pick up sg data from the render dict we are maintianing # note: we assume that the rendering tasks always happen # before the review tasks inside the publish... (sg_publish, thumbnail_path) = render_publishes[write_node.name()] self._send_to_screening_room(write_node, sg_publish, sg_task, comment, thumbnail_path, progress_cb) except Exception, e: errors.append("Submit to Screening Room failed - %s" % e)
def _publish_prop_for_item(self, item, output, work_template, primary_publish_path, sg_task, comment, thumbnail_path, progress_cb): """ Export an asset from the scene and publish it to Shotgun. """ assetName = item["name"] objectName = item["other_params"]["propName"] groupSelection = item["other_params"]["selectionDict"] assetType = item["type"] tank_type = output["tank_type"] publish_template = output["publish_template"] filters = [['code', 'is', 'Prop Template']] taskTemplate = self.parent.shotgun.find_one('TaskTemplate', filters) # get the current scene path and extract fields from it # using the work template: scene_path = os.path.abspath(cmds.file(query=True, sn=True)) # mainFields = work_template.get_fields(scene_path) version = 1 resolution = "lay" # fields needs : '@asset_root_step/work/maya/{Asset}_{Step}_v{version}.ma' fields = { "sg_asset_type": assetType, "Asset": assetName, "Step": "mod", "version": version, "resolution": resolution } #TEST TD RESOLUTION if 'Resolution' not in item: fields["Resolution"] = "lay" # create the publish path by applying the fields # with the publish template: work_version = fields["version"] publish_version = fields["version"] model_publish_path = publish_template.apply_fields(fields) model_workfile_path = work_template.apply_fields(fields) progress_cb(10) print "### model_publish_path = %s ###" % model_publish_path # if os.path.exists(model_workfile_path): work_version = self.versionUpAsset(model_workfile_path, work_template) # if os.path.exists(model_publish_path): publish_version = self.versionUpAsset(model_publish_path, publish_template) if work_version > publish_version: fields["version"] = work_version else: fields["version"] = publish_version + 1 publish_version = fields["version"] model_publish_path = publish_template.apply_fields(fields) workfileFields = fields workfileFields["version"] = fields["version"] + 1 model_workfile_path = work_template.apply_fields(workfileFields) if os.path.exists(model_publish_path): raise TankError("The published file named '%s' already exists!" % model_publish_path) # Do all the resetting transformations/saving/exporting and other stuff... uninstance() deleteUnusedGroups() RenameUnistanced() tempChildren = cmds.listRelatives(objectName, allDescendents=False, children=True, shapes=False, path=True) if objectName in tempChildren: tempChildren.remove(objectName) cmds.select(tempChildren, hierarchy=True, add=False) sel = cmds.ls(selection=True, excludeType="transform") cmds.select(sel) tempPos, tempRot, tempScl = getTransform(objectName) setTransform(objectName) progress_cb(25) try: print 'TRY to publish model : %s' % objectName publish_folder = os.path.dirname(model_publish_path) workfile_folder = os.path.dirname(model_workfile_path) self.parent.ensure_folder_exists(publish_folder) self.parent.ensure_folder_exists(workfile_folder) tk = self.parent.tank print 'implement the export of object :', assetName print 'Make group and put meshes in it...' # groupName = 'GRP_%s' %(objectName) groupName = 'GRP_%s' % (assetName) groupName = cmds.group(name=groupName, world=True) print 'Unparenting meshes for export' # tempChildObjects = cmds.parent(groupName, world = True) print 'exporting meshes' cmds.select(groupName, hierarchy=True, add=False) returnName = cmds.file(model_publish_path, type='mayaAscii', exportSelected=True) returnWorkName = cmds.file(model_workfile_path, type='mayaAscii', exportSelected=True) progress_cb(45) print 'parenting meshes again...' print sel print objectName for objChild in sel: objChildSelName = objChild.split('|')[-1] cmds.parent(groupName + '|' + objChildSelName, objectName) print groupName # cmds.parent(sel, objectName) # cmds.parent(groupName, objectName) cmds.delete(groupName) print 'export done...' except Exception, e: raise TankError("Failed to export model: %s" % e)
def execute_folder_creation_proxy(self): """ Runs the actual folder execution. :returns: A list of paths which were calculated to be created """ path_cache = PathCache(self._tk) try: # because the sync can make changes to the path cache, do not run in preview mode remote_items = [] if not self._preview_mode: # request that the path cache is synced against shotgun # new items that were not locally available are returned # as a list of dicts with keys id, type, name, configuration and path rd = path_cache.synchronize() # for each item we get back from the path cache synchronization, # issue a remote entity folder request and pass that down to # the folder creation hook. This way, folders can be auto created # across multiple locations if desirable. for i in rd: remote_items.append({ "action": "remote_entity_folder", "path": i["path"], "metadata": i["metadata"], "entity": i["entity"], }) # put together a list of entries we should pass to the database db_entries = [] for i in self._items: if i.get("action") == "entity_folder": db_entries.append({ "entity": i["entity"], "path": i["path"], "primary": True, "metadata": i["metadata"], }) for i in self._secondary_cache_entries: db_entries.append({ "entity": i["entity"], "path": i["path"], "primary": False, "metadata": i["metadata"], }) # now that we are synced up with all remote sites, # validate the data before we push it into the databse. # to properly cover some edge cases try: path_cache.validate_mappings(db_entries) except TankError as e: # ok folders created for synced stuff. Now re-raise validation error raise TankError("Folder creation aborted: %s" % e) # validation passed! # now request the IO operations to take place # note that we pass both the items that were created from syncing with remote # and the new folders that have been computed folder_creation_items = remote_items + self._items # database data was validated, folders on disk created # finally store all our new data in the path cache and in shotgun if not self._preview_mode: path_cache.add_mappings(db_entries, self._entity_type, self._entity_ids) # return all folders that were computed folders = [] for i in folder_creation_items: action = i.get("action") if action in [ "entity_folder", "create_file", "folder", "remote_entity_folder", ]: folders.append(i["path"]) elif action == "copy": folders.append(i["target_path"]) finally: path_cache.close() global g_paths_created g_paths_created = folders return folders
def execute(self, task, work_template, comment, thumbnail_path, sg_task, progress_cb, **kwargs): """ Main hook entry point :param task: Primary task to be published. This is a dictionary containing the following keys: { item: Dictionary This is the item returned by the scan hook { name: String description: String type: String other_params: Dictionary } output: Dictionary This is the output as defined in the configuration - the primary output will always be named 'primary' { name: String publish_template: template tank_type: String } } :param work_template: template This is the template defined in the config that represents the current work file :param comment: String The comment provided for the publish :param thumbnail: Path string The default thumbnail provided for the publish :param sg_task: Dictionary (shotgun entity description) The shotgun task to use for the publish :param progress_cb: Function A progress callback to log progress during pre-publish. Call: progress_cb(percentage, msg) to report progress to the UI :returns: Path String Hook should return the path of the primary publish so that it can be passed as a dependency to all secondary publishes :raises: Hook should raise a TankError if publish of the primary task fails """ # get the engine name from the parent object (app/engine/etc.) engine_name = self.parent.engine.name # depending on engine: if engine_name == "tk-maya": return self._do_maya_publish(task, work_template, comment, thumbnail_path, sg_task, progress_cb) elif engine_name == "tk-nuke": return self._do_nuke_publish(task, work_template, comment, thumbnail_path, sg_task, progress_cb) elif engine_name == "tk-3dsmax": return self._do_3dsmax_publish(task, work_template, comment, thumbnail_path, sg_task, progress_cb) elif engine_name == "tk-hiero": return self._do_hiero_publish(task, work_template, comment, thumbnail_path, sg_task, progress_cb) elif engine_name == "tk-houdini": return self._do_houdini_publish(task, work_template, comment, thumbnail_path, sg_task, progress_cb) elif engine_name == "tk-softimage": return self._do_softimage_publish(task, work_template, comment, thumbnail_path, sg_task, progress_cb) elif engine_name == "tk-photoshop": return self._do_photoshop_publish(task, work_template, comment, thumbnail_path, sg_task, progress_cb) elif engine_name == "tk-motionbuilder": return self._do_motionbuilder_publish(task, work_template, comment, thumbnail_path, sg_task, progress_cb) else: raise TankError( "Unable to perform publish for unhandled engine %s" % engine_name)
def __publish_geocache(self, item, output, work_template, primary_publish_path, sg_task, comment, thumbnail_path, progress_cb): """ Publish an Alembic cache file for the scene and publish it to Shotgun. :param item: The item to publish :param output: The output definition to publish with :param work_template: The work template for the current scene :param primary_publish_path: The path to the primary published file :param sg_task: The Shotgun task we are publishing for :param comment: The publish comment/description :param thumbnail_path: The path to the publish thumbnail :param progress_cb: A callback that can be used to report progress """ # determine the publish info to use # progress_cb(10, "Determining publish details") # get the current scene path and extract fields from it # using the work template: scene_path = os.path.abspath(cmds.file(query=True, sn=True)) fields = work_template.get_fields(scene_path) publish_version = fields["version"] tank_type = output["tank_type"] # create the publish path by applying the fields # with the publish template: publish_template = output["publish_template"] publish_path = publish_template.apply_fields(fields) # doCreateGeometryCache expects forward slashes geo_publish_path = publish_path.replace("\\", "/") # ensure the publish folder exists: publish_folder = os.path.dirname(publish_path) self.parent.ensure_folder_exists(publish_folder) # determine the publish name: publish_name = fields.get("name") if not publish_name: publish_name = os.path.basename(publish_path) # Find additional info from the scene: # progress_cb(10, "Analysing scene") # find the animated frame range to use: frame_start = int(cmds.playbackOptions(q=True, min=True)) frame_end = int(cmds.playbackOptions(q=True, max=True)) namespace = item["name"] setName = namespace + ":cache_SET" members = pymel.core.sets(setName, q=True) transforms = map( lambda m: pymel.core.listRelatives( m, type="transform", allDescendents=True) if not m.endswith("_GEO") else [m], members) geos = [ geo for geoList in transforms for geo in geoList if geo.endswith("_GEO") ] pymel.core.select(geos) # run the command: progress_cb(30, "Exporting GeoCache") try: # do it self.parent.log_debug("Executing command: aaPCGen.doExport(%s,%s,%s)"\ % ( publish_path, frame_start, frame_end ) ) aaPCGen.doExport(publish_path, frame_start, frame_end) except Exception, e: raise TankError("Failed to export GeoCache: %s" % e)
def execute(self, **kwargs): """ Main hook entry point :returns: A list of any items that were found to be published. Each item in the list should be a dictionary containing the following keys: { type: String This should match a scene_item_type defined in one of the outputs in the configuration and is used to determine the outputs that should be published for the item name: String Name to use for the item in the UI description: String Description of the item to use in the UI selected: Bool Initial selected state of item in the UI. Items are selected by default. required: Bool Required state of item in the UI. If True then item will not be deselectable. Items are not required by default. other_params: Dictionary Optional dictionary that will be passed to the pre-publish and publish hooks } """ items = [] # get the main scene: scene_name = cmds.file(query=True, sn=True) if not scene_name: raise TankError("Please Save your file before Publishing") scene_path = os.path.abspath(scene_name) name = os.path.basename(scene_path) # create the primary item - this will match the primary output 'scene_item_type': items.append({"type": "work_file", "name": name}) ### CLEANUP ################################################################################ ### NOW DO SCENE CRITICAL CHECKS LIKE DUPLICATE OBJECT NAMES ETC AND FAIL HARD IF THESE FAIL! ############################################################################################ ############################# ## INITAL HARD FAILS ## Do a quick check for geo_hrc and rig_hrc ## geo_hrc if not cmds.objExists('geo_hrc'): raise TankError( "Please Group all your geo under a geo_hrc group under the root node." ) ## rig_hrc ## UNCOMMENT FOR MDL STEP if cleanup.rigGroupCheck(): raise TankError( 'Rig group found!! Please use the RIG menus to publish rigs...' ) ## UNCOMMENT FOR RIG STEP # if not cleanup.rigGroupCheck(): # raise TankError('No rig group found!! Please make sure your animation controls are under rig_hrc.') ## Now check it's the right KIND of asset eg CHAR or PROP cleanup.assetCheckAndTag(type='BLD', customTag='staticBLD') ############################# ## SECONDARIES FOR PUBLISHING ## WE NEED TO FIND THE MAIN GROUP THAT HAS MESHES IN IT NOW AND PUSH THIS INTO THE ITEMS LIST FOR SECONDARY PUBLISHING ## Look for root level groups that have meshes as children: for grp in cmds.ls(assemblies=True, long=True): if cmds.ls(grp, dag=True, type="mesh"): # include this group as a 'mesh_group' type ### UNCOMMENT FOR PROP CHAR LND ASSETS # items.append({"type":"mesh_group", "name":grp}) ### UNCOMMENT FOR BLD MLD STEP if cleanup.BLDTransformCheck( grp ): ## Check for BLD step only to make sure the transforms are not frozen on the BLD grps items.append({"type": "mesh_group", "name": grp}) ############################# ## HARD FAILS ## Duplicate name check if not cleanup.duplicateNameCheck(): raise TankError( "Duplicate names found please fix before publishing.\nCheck the outliner for the duplicate name set." ) ## Incorrect Suffix check checkSceneGeo = cleanup._geoSuffixCheck(items) if not checkSceneGeo: raise TankError( "Incorrect Suffixes found! Fix suffixes before publishing.\nCheck the outliner for the duplicate name set." ) ## Incorrect root name if not utils.checkRoot_hrc_Naming(items): assetName = cmds.file(query=True, sn=True).split('/')[4] raise TankError( "YOUR ASSET IS NAMED INCORRECTLY! Remember it is CASE SENSITIVE!\nIt should be %s_hrc" % assetName) ############################# ## NOW PREP THE GEO FOR EXPORT!!! ## THESE CLEANUPS SHOULD NOT FAIL THEY SHOULD JUST BE PERFORMED ## UNCOMMENT FOR MDL STEP ## PEFORM MDL CLEANUP cleanup.cleanUp(items=items, checkShapes=True, history=True, pivots=True, freezeXFRM=True, smoothLvl=True, tagSmoothed=True, checkVerts=True, renderflags=True, deleteIntermediate=True, turnOffOpposite=True, instanceCheck=True, shaders=True) ## UNCOMMENT FOR RIG STEP ## PEFORM RIG CLEANUP # cleanup.cleanUp(items = items, checkShapes = False, history = False, pivots = False, freezeXFRM = False, smoothLvl = True, tagSmoothed = True, checkVerts = False, # renderflags = True, deleteIntermediate = False, turnOffOpposite = True, instanceCheck = False, shaders = True) ############################################################################# ## NOW MOVE ON TO PUBLISHING return items
def _publish_camera(self, item, publish_template, fields, comment, sg_task, primary_publish_path, progress_cb, thumbnail_path): """ Publishes the selected camera """ errors = [] print "<publish> publish camera called" # set publish path and publish name based on publish template and item name respectively. fields['name'] = item['name'].replace(":", "_") secondary_publish_path = publish_template.apply_fields(fields) secondary_publish_name = fields.get( "name").upper() # we want an uppercase name if not secondary_publish_name: secondary_publish_name = os.path.basename(secondary_publish_path) print "<publish> using name for publish: %s" % secondary_publish_name # check if camera has already been published. if os.path.exists(secondary_publish_path): print "<publish> The published camera named '%s' already exists!" % secondary_publish_path errors.append("The published camera named '%s' already exists!" % secondary_publish_path) return errors # select the camera try: cmds.select(item['name'], visible=True, hierarchy=True, replace=True) print "<publish> selected %s" % item['name'] except ValueError as e: errors.append('Unable to select camera [%s]' % item['name']) return errors # find the animated frame range to use: start_frame, end_frame = self._find_scene_animation_range() # bake out camera if grouped if not cmds.listRelatives(item['name'], allParents=True) is None: dup_name = self._bake_camera(item, start_frame, end_frame) else: dup_name = item['name'] self._unlock_camera(dup_name) if not cmds.getAttr(''.join([dup_name, '.sx'])) == 1: cmds.setAttr(''.join([dup_name, '.sx']), 1) cmds.setAttr(''.join([dup_name, '.sy']), 1) cmds.setAttr(''.join([dup_name, '.sz']), 1) alembic_args_string = ''.join(["-root |", dup_name]) alembic_args = [alembic_args_string] if start_frame and end_frame: alembic_args.append("-fr %d %d" % (start_frame, end_frame)) # Set the output path: # Note: The AbcExport command expects forward slashes! alembic_args.append("-file %s" % secondary_publish_path.replace("\\", "/")) # build the export command. Note, use AbcExport -help in Maya for # more detailed Alembic export help abc_export_cmd = ("AbcExport -j \"%s\"" % " ".join(alembic_args)) # ...and execute it: progress_cb(30, "Exporting Alembic cache") try: self.parent.log_debug("Executing command: %s" % abc_export_cmd) mel.eval(abc_export_cmd) except Exception, e: raise TankError("Failed to export Alembic Cache: %s" % e)
def _publish_geo(self, item, publish_template, fields, comment, sg_task, primary_publish_path, progress_cb, thumbnail_path): """ Publishes the mesh group as an alembic. """ errors = [] print "<publish> publish model called" # print item['name'] # fields['name'] = item['name'].split('|')[-1] fields['name'] = item['name'].split(':')[-1] # print fields['name'] secondary_publish_path = publish_template.apply_fields(fields) secondary_publish_name = fields.get("name") if not secondary_publish_name: secondary_publish_name = os.path.basename(secondary_publish_path) if os.path.exists(secondary_publish_path): print "<publish> The geoPublish named '%s' already exists!" % secondary_publish_path errors.append("The geoPublish named '%s' already exists!" % secondary_publish_path) return errors progress_cb(10, "Analysing scene") alembic_args = [ "-renderableOnly", # only renderable objects (visible and not templated) "-worldSpace", # root nodes will be stored in world space "-uvWrite", # write uv's (only the current uv set gets written) "-wholeFrameGeo", "-selection" ] # find the animated frame range to use: start_frame, end_frame = self._find_scene_animation_range() if start_frame and end_frame: alembic_args.append("-fr %d %d" % (start_frame, end_frame)) # Set the output path: # Note: The AbcExport command expects forward slashes! alembic_args.append("-file %s" % secondary_publish_path.replace("\\", "/")) # build the export command. Note, use AbcExport -help in Maya for # more detailed Alembic export help abc_export_cmd = ("AbcExport -j \"%s\"" % " ".join(alembic_args)) # select geo try: cmds.select(item['name'], hierarchy=True) except Exception as e: print e errors.append('Unable to select transform [%s]' % item['name']) return errors # export selection progress_cb(30, "Exporting Alembic cache") try: self.parent.log_debug("Executing command: %s" % abc_export_cmd) mel.eval(abc_export_cmd) except Exception, e: raise TankError("Failed to export Alembic Cache: %s" % e)
def __publish_rendered_images(self, item, output, work_template, primary_publish_path, sg_task, comment, thumbnail_path, progress_cb): """ Publish rendered images and register with Shotgun. :param item: The item to publish :param output: The output definition to publish with :param work_template: The work template for the current scene :param primary_publish_path: The path to the primary published file :param sg_task: The Shotgun task we are publishing for :param comment: The publish comment/description :param thumbnail_path: The path to the publish thumbnail :param progress_cb: A callback that can be used to report progress """ # determine the publish info to use # progress_cb(10, "Determining publish details") # get the current scene path and extract fields from it # using the work template: scene_name = str(hou.hipFile.name()) scene_path = os.path.abspath(scene_name) fields = work_template.get_fields(scene_path) publish_version = fields["version"] tank_type = output["tank_type"] # this is pretty straight forward since the publish file(s) have # already been created (rendered). We're really just populating the # arguments to send to the sg publish file registration below. publish_name = "%s_%s" % (fields["name"], item["name"]) # we already determined the path in the scan_scene code. so just # pull it from that dictionary. other_params = item["other_params"] paths = other_params["paths"] node = other_params["node"] if not paths: raise TankError( "No rendered images found for node '%s'." % (node.path(),) ) elif len(paths) > 1: raise TankError( "Found multiple potential rendered image paths for node '%s'." "Skipping these paths:\n '%s'" % (node.path(), "\n ".join(paths)) ) publish_path = paths[0] # register the publish: progress_cb(75, "Registering the publish") args = { "tk": self.parent.tank, "context": self.parent.context, "comment": comment, "path": publish_path, "name": publish_name, "version_number": publish_version, "thumbnail_path": thumbnail_path, "task": sg_task, "dependency_paths": [primary_publish_path], "published_file_type": tank_type } tank.util.register_publish(**args)
def execute(self, **kwargs): """ Main hook entry point :returns: A list of any items that were found to be published. Each item in the list should be a dictionary containing the following keys: { type: String This should match a scene_item_type defined in one of the outputs in the configuration and is used to determine the outputs that should be published for the item name: String Name to use for the item in the UI description: String Description of the item to use in the UI selected: Bool Initial selected state of item in the UI. Items are selected by default. required: Bool Required state of item in the UI. If True then item will not be deselectable. Items are not required by default. other_params: Dictionary Optional dictionary that will be passed to the pre-publish and publish hooks } """ # print dir(self.parent.context) # print dir(self.parent.context.entity) # print dir(self.parent.context.task) # print self.parent.context.task items = [] # get the main scene: scene_name = cmds.file(query=True, sn=True) if not scene_name: raise TankError("Please Save your file before Publishing") scene_path = os.path.abspath(scene_name) name = os.path.basename(scene_path) self.getAllObjects() # create the primary item - this will match the primary output 'scene_item_type': items.append({"type": "work_file", "name": name}) other_params = self.content if len(self.content) > 0: PoslistItem = { "type": 'poslist', "name": 'Position List', "description": "Positionlist for %s" % (scene_path), "selected": True, "required": False, "other_params": other_params } items.append(PoslistItem) return items
def _update_flame_clip(self, clip_path, write_node, sg_publish, progress_cb): """ Update the Flame open clip file for this shot with the published render. When a shot has been exported from flame, a clip file is available for each shot. We load that up, parse the xml and add a new entry to it. For docs on the clip format, see: http://knowledge.autodesk.com/support/flame-products/troubleshooting/caas/sfdcarticles/sfdcarticles/Creating-clip-Open-Clip-files-from-multi-EXR-assets.html http://docs.autodesk.com/flamepremium2015/index.html?url=files/GUID-1A051CEB-429B-413C-B6CA-256F4BB5D254.htm,topicNumber=d30e45343 When the clip file is updated, a new <version> tag and a new <feed> tag are inserted: <feed type="feed" vuid="v002" uid="DA62F3A2-BA3B-4939-8089-EC7FC603AC74"> <spans type="spans" version="4"> <span type="span" version="4"> <path encoding="pattern">/nuke/publish/path/mi001_scene_output_v001.[0100-0150].dpx</path> </span> </spans> </feed> <version type="version" uid="v002"> <name>Comp, scene.nk, v003</name> <creationDate>2014/12/09 22:30:04</creationDate> <userData type="dict"> </userData> </version> An example clip XML file would look something like this: <?xml version="1.0" encoding="UTF-8"?> <clip type="clip" version="4"> <handler type="handler"> ... </handler> <name type="string">mi001</name> <sourceName type="string">F004_C003_0228F8</sourceName> <userData type="dict"> ... </userData> <tracks type="tracks"> <track type="track" uid="video"> <trackType>video</trackType> <dropMode type="string">NDF</dropMode> <duration type="time" label="00:00:07+02"> <rate type="rate"> <numerator>24000</numerator> <denominator>1001</denominator> </rate> <nbTicks>170</nbTicks> <dropMode>NDF</dropMode> </duration> <name type="string">mi001</name> <userData type="dict"> <GATEWAY_NODE_ID type="binary">/mnt/projects/arizona_adventure/sequences/Mirage/mi001/editorial/flame/mi001.clip@TRACK(5)video</GATEWAY_NODE_ID> <GATEWAY_SERVER_ID type="binary">10.0.1.8:Gateway</GATEWAY_SERVER_ID> <GATEWAY_SERVER_NAME type="string">xxx</GATEWAY_SERVER_NAME> </userData> <feeds currentVersion="v002"> <feed type="feed" vuid="v000" uid="5E21801C-41C2-4B47-90B6-C1E25235F032"> <storageFormat type="format"> <type>video</type> <channelsDepth type="uint">10</channelsDepth> <channelsEncoding type="string">Integer</channelsEncoding> <channelsEndianess type="string">Big Endian</channelsEndianess> <fieldDominance type="int">2</fieldDominance> <height type="uint">1080</height> <nbChannels type="uint">3</nbChannels> <pixelLayout type="string">RGB</pixelLayout> <pixelRatio type="float">1</pixelRatio> <width type="uint">1920</width> </storageFormat> <sampleRate type="rate" version="4"> <numerator>24000</numerator> <denominator>1001</denominator> </sampleRate> <spans type="spans" version="4"> <span type="span" version="4"> <duration>170</duration> <path encoding="pattern">/mnt/projects/arizona_adventure/sequences/Mirage/mi001/editorial/dpx_plates/v000/F004_C003_0228F8/F004_C003_0228F8_mi001.v000.[0100-0269].dpx</path> </span> </spans> </feed> <feed type="feed" vuid="v001" uid="DA62F3A2-BA3B-4939-8089-EC7FC602AC74"> <storageFormat type="format"> <type>video</type> <channelsDepth type="uint">10</channelsDepth> <channelsEncoding type="string">Integer</channelsEncoding> <channelsEndianess type="string">Little Endian</channelsEndianess> <fieldDominance type="int">2</fieldDominance> <height type="uint">1080</height> <nbChannels type="uint">3</nbChannels> <pixelLayout type="string">RGB</pixelLayout> <pixelRatio type="float">1</pixelRatio> <rowOrdering type="string">down</rowOrdering> <width type="uint">1920</width> </storageFormat> <userData type="dict"> <recordTimecode type="time" label="00:00:00+00"> <rate type="rate">24</rate> <nbTicks>0</nbTicks> <dropMode>NDF</dropMode> </recordTimecode> </userData> <sampleRate type="rate" version="4"> <numerator>24000</numerator> <denominator>1001</denominator> </sampleRate> <startTimecode type="time"> <rate type="rate">24</rate> <nbTicks>1391414</nbTicks> <dropMode>NDF</dropMode> </startTimecode> <spans type="spans" version="4"> <span type="span" version="4"> <path encoding="pattern">/mnt/projects/arizona_adventure/sequences/Mirage/mi001/editorial/dpx_plates/v001/F004_C003_0228F8/F004_C003_0228F8_mi001.v001.[0100-0269].dpx</path> </span> </spans> </feed> </feeds> </track> </tracks> <versions type="versions" currentVersion="v002"> <version type="version" uid="v000"> <name>v000</name> <creationDate>2014/12/09 22:22:48</creationDate> <userData type="dict"> <batchSetup type="binary">/mnt/projects/arizona_adventure/sequences/Mirage/mi001/editorial/flame/batch/mi001.v000.batch</batchSetup> <versionNumber type="uint64">0</versionNumber> </userData> </version> <version type="version" uid="v001"> <name>v001</name> <creationDate>2014/12/09 22:30:04</creationDate> <userData type="dict"> <batchSetup type="binary">/mnt/projects/arizona_adventure/sequences/Mirage/mi001/editorial/flame/batch/mi001.v001.batch</batchSetup> <versionNumber type="uint64">1</versionNumber> </userData> </version> </versions> </clip> :param clip_path: path to the clip xml file to add the publish to :param write_node: current write node object :param sg_publish: shotgun publish :param progress_cb: progress callback """ progress_cb(1, "Updating Flame clip file...") # get the fields from the work file render_path = self.__write_node_app.get_node_render_path(write_node) render_template = self.__write_node_app.get_node_render_template( write_node) render_path_fields = render_template.get_fields(render_path) publish_template = self.__write_node_app.get_node_publish_template( write_node) # append extra fields needed by the publish template tank_type = self.__write_node_app.get_node_tank_type(write_node) render_path_fields["TankType"] = tank_type # set up the sequence token to be Flame friendly # e.g. mi001_scene_output_v001.[0100-0150].dpx # note - we cannot take the frame ranges from the write node - # because those values indicate the intended frame range rather # than the rendered frame range! In order for Flame to pick up # the media properly, it needs to contain the actual frame data # get all paths for all frames and all eyes paths = self.parent.sgtk.paths_from_template(publish_template, render_path_fields, skip_keys=["SEQ", "eye"]) # for each of them, extract the frame number. Track the min and the max min_frame = None max_frame = None for path in paths: fields = publish_template.get_fields(path) frame_number = fields["SEQ"] if min_frame is None or frame_number < min_frame: min_frame = frame_number if max_frame is None or frame_number > max_frame: max_frame = frame_number if min_frame is None or max_frame is None: # shouldn't really end up here - the validation checks that # stuff has actually been rendered. raise TankError( "Couldn't extract min and max frame from the published sequence! " "Will not update Flame clip xml.") # now when we have the real min/max frame, we can apply a proper sequence marker for the # Flame xml. Note that we cannot use the normal FORMAT: token in the template system, because # the Flame frame format is not totally "abstract" (e.g. %04d, ####, etc) but contains the frame # ranges themselves. # # the format spec is something like "04" sequence_key = publish_template.keys["SEQ"] # now compose the format string, eg. [%04d-%04d] format_str = "[%%%sd-%%%sd]" % (sequence_key.format_spec, sequence_key.format_spec) # and lastly plug in the values render_path_fields["SEQ"] = format_str % (min_frame, max_frame) # contruct the final path - because flame doesn't have any windows support and # because the "hub" platform is always linux (with potential flame assist and flare # satellite setups on macosx), request that the paths are written out on linux form # regardless of the operating system currently running. publish_path_flame = publish_template.apply_fields( render_path_fields, "linux2") # open up and update our xml file xml = minidom.parse(clip_path) # find first <track type="track" uid="video"> first_video_track = None for track in xml.getElementsByTagName("track"): if track.attributes["uid"].value == "video": first_video_track = track break if first_video_track is None: raise TankError( "Could not find <track type='track' uid='video'> in clip file!" ) # now contruct our feed xml chunk we want to insert # # this is the xml structure we want to insert: # # <feed type="feed" vuid="%s" uid="%s"> # <spans type="spans" version="4"> # <span type="span" version="4"> # <path encoding="pattern">%s</path> # </span> # </spans> # </feed> unique_id = str(uuid.uuid4()) # <feed type="feed" vuid="%s" uid="%s"> feed_node = xml.createElement("feed") feed_node.setAttribute("type", "feed") feed_node.setAttribute("uid", unique_id) feed_node.setAttribute("vuid", unique_id) # <spans type="spans" version="4"> spans_node = xml.createElement("spans") spans_node.setAttribute("type", "spans") spans_node.setAttribute("version", "4") feed_node.appendChild(spans_node) # <span type="span" version="4"> span_node = xml.createElement("span") span_node.setAttribute("type", "span") span_node.setAttribute("version", "4") spans_node.appendChild(span_node) # <path encoding="pattern">%s</path> path_node = xml.createElement("path") path_node.setAttribute("encoding", "pattern") path_node.appendChild(xml.createTextNode(publish_path_flame)) span_node.appendChild(path_node) # add new feed to first list of feeds inside of our track track.getElementsByTagName("feeds")[0].appendChild(feed_node) # now add same to the versions structure # # <version type="version" uid="%s"> # <name>%s</name> # <creationDate>%s</creationDate> # <userData type="dict"> # </userData> # </version> date_str = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") formatted_name = self._generate_flame_clip_name(render_path_fields) # <version type="version" uid="%s"> version_node = xml.createElement("version") version_node.setAttribute("type", "version") version_node.setAttribute("uid", unique_id) # <name>v003 Comp</name> child_node = xml.createElement("name") child_node.appendChild(xml.createTextNode(formatted_name)) version_node.appendChild(child_node) # <creationDate>1229-12-12 12:12:12</creationDate> child_node = xml.createElement("creationDate") child_node.appendChild(xml.createTextNode(date_str)) version_node.appendChild(child_node) # <userData type="dict"> child_node = xml.createElement("userData") child_node.setAttribute("type", "dict") version_node.appendChild(child_node) # add new feed to first list of versions xml.getElementsByTagName("versions")[0].appendChild(version_node) xml_string = xml.toxml(encoding="UTF-8") # make a backup of the clip file before we update it # # note - we are not using the template system here for simplicity # (user requiring customization can always modify this hook code themselves). # There is a potential edge case where the backup file cannot be written at this point # because you are on a different machine or running with different permissions. # backup_path = "%s.bak_%s" % ( clip_path, datetime.datetime.now().strftime("%Y%m%d_%H%M%S")) try: shutil.copy(clip_path, backup_path) except Exception, e: raise TankError( "Could not create backup copy of the Flame clip file '%s': %s" % (clip_path, e))
def __publish_alembic_cache(self, item, output, work_template, primary_publish_path, sg_task, comment, thumbnail_path, progress_cb): """ Publish an Alembic cache file for the scene and publish it to Shotgun. :param item: The item to publish :param output: The output definition to publish with :param work_template: The work template for the current scene :param primary_publish_path: The path to the primary published file :param sg_task: The Shotgun task we are publishing for :param comment: The publish comment/description :param thumbnail_path: The path to the publish thumbnail :param progress_cb: A callback that can be used to report progress """ # determine the publish info to use # progress_cb(10, "Determining publish details") # get the current scene path and extract fields from it # using the work template: scene_path = os.path.abspath(cmds.file(query=True, sn=True)) fields = work_template.get_fields(scene_path) publish_version = fields["version"] tank_type = output["tank_type"] # create the publish path by applying the fields # with the publish template: publish_template = output["publish_template"] publish_path = publish_template.apply_fields(fields) # ensure the publish folder exists: publish_folder = os.path.dirname(publish_path) self.parent.ensure_folder_exists(publish_folder) # determine the publish name: publish_name = fields.get("name") if not publish_name: publish_name = os.path.basename(publish_path) # Find additional info from the scene: # progress_cb(10, "Analysing scene") # set the alembic args that make the most sense when working with Mari. These flags # will ensure the export of an Alembic file that contains all visible geometry from # the current scene together with UV's and face sets for use in Mari. alembic_args = [ "-renderableOnly", # only renderable objects (visible and not templated) "-writeFaceSets", # write shading group set assignments (Maya 2015+) "-uvWrite" # write uv's (only the current uv set gets written) ] # find the animated frame range to use: start_frame, end_frame = self._find_scene_animation_range() if start_frame and end_frame: alembic_args.append("-fr %d %d" % (start_frame, end_frame)) # Set the output path: # Note: The AbcExport command expects forward slashes! alembic_args.append("-file %s" % publish_path.replace("\\", "/")) # build the export command. Note, use AbcExport -help in Maya for # more detailed Alembic export help abc_export_cmd = ("AbcExport -j \"%s\"" % " ".join(alembic_args)) # ...and execute it: progress_cb(30, "Exporting Alembic cache") try: self.parent.log_debug("Executing command: %s" % abc_export_cmd) mel.eval(abc_export_cmd) except Exception, e: raise TankError("Failed to export Alembic Cache: %s" % e)
def execute(self, **kwargs): """ Main hook entry point :returns: A list of any items that were found to be published. Each item in the list should be a dictionary containing the following keys: { type: String This should match a scene_item_type defined in one of the outputs in the configuration and is used to determine the outputs that should be published for the item name: String Name to use for the item in the UI description: String Description of the item to use in the UI selected: Bool Initial selected state of item in the UI. Items are selected by default. required: Bool Required state of item in the UI. If True then item will not be deselectable. Items are not required by default. other_params: Dictionary Optional dictionary that will be passed to the pre-publish and publish hooks } """ items = [] # get the main scene: doc = photoshop.app.activeDocument if doc is None: raise TankError("There is no currently active document!") if not doc.saved: raise TankError("Please Save your file before Publishing") scene_path = doc.fullName.nativePath name = os.path.basename(scene_path) # create the primary item - this will match the primary output 'scene_item_type': items.append({"type": "work_file", "name": name}) # create the secondary item - a flattened version saved as a tif: items.append({"type": "merged_layers", "name": name, "description":"Export as %s.tif" % name}) # finally, look for groups that we can handle: groups = doc.layerSets for group in [groups.index(i) for i in xrange(groups.length)]: # ignore layers that aren't visible: if not group.visible: continue # filter for just those layers that we can handle: group_name = group.name.encode("utf8") #if layer_name not in ["diffuse", "specular", "normal"]: # continue items.append({"type":"group", "name":group_name, "description":"Export as the %s texture" % group_name}) return items
try: # do it self.parent.log_debug("Executing command: aaPCGen.doExport(%s,%s,%s)"\ % ( publish_path, frame_start, frame_end ) ) aaPCGen.doExport(publish_path, frame_start, frame_end) except Exception, e: raise TankError("Failed to export GeoCache: %s" % e) geo_export_cmd = 'doCreateGeometryCache 6 {{ "0", "{}", "{}", "OneFile", "0", "{}/{}", "1", "", "0", "export", "0", "1", "1", "0", "1", "mcc", "1" }} ;'.format( frame_start, frame_end, geo_publish_path, namespace) try: # do it self.parent.log_debug("Executing command: " + geo_export_cmd) mel.eval(geo_export_cmd) except Exception, e: raise TankError("Failed to export GeoCache: %s" % e) # code will be the basename of path (017) # register the publish: progress_cb(75, "Registering the publish") args = { "tk": self.parent.tank, "context": self.parent.context, "comment": comment, "path": publish_path, "name": publish_name, "version_number": publish_version, "thumbnail_path": thumbnail_path, "task": sg_task, "dependency_paths": [primary_publish_path], "published_file_type": tank_type,
except Exception, e: raise TankError("Failed to submit arnold to deadline: %s" % e) # Finally, register this publish with Shotgun tank_type = 'CG Render' name = item["name"] for outputfile in publishFiles: try: self._register_publish(outputfile, name, sg_task, shotgunFields['version'], tank_type, comment, thumbnail_path, [primary_publish_path]) except Exception, e: raise TankError("Failed to register publish: %s" % e) def _ass_render(self, item, output, work_template, primary_publish_path, sg_task, comment, thumbnail_path, progress_cb): filePath = cmds.file(q=True, sn=True) name = os.path.basename(filePath) start = int(cmds.getAttr('defaultRenderGlobals.startFrame')) end = int(cmds.getAttr('defaultRenderGlobals.endFrame')) pluginArgs = [''] submitArgs = ['Comment=Shotgun Publish submit'] shotgunContext = self.parent.context #getting fields for version shot_temp = self.parent.sgtk.templates["maya_shot_work"]
def __publish_obj(self, item, output, work_template, primary_publish_path, sg_task, comment, thumbnail_path, progress_cb): """ Export an OBJ geo file for the scene and publish it to Shotgun. :param item: The item to publish :param output: The output definition to publish with :param work_template: The work template for the current scene :param primary_publish_path: The path to the primary published file :param sg_task: The Shotgun task we are publishing for :param comment: The publish comment/description :param thumbnail_path: The path to the publish thumbnail :param progress_cb: A callback that can be used to report progress """ # determine the publish info to use # progress_cb(10, "Determining publish details") # get the current scene path and extract fields from it # using the work template: scene_path = os.path.abspath(cmds.file(query=True, sn=True)) fields = work_template.get_fields(scene_path) publish_version = fields["version"] tank_type = output["tank_type"] # create the publish path by applying the fields # with the publish template: publish_template = output["publish_template"] publish_path = publish_template.apply_fields(fields) # ensure the publish folder exists: publish_folder = os.path.dirname(publish_path) self.parent.ensure_folder_exists(publish_folder) # determine the publish name: publish_name = fields.get("name") if not publish_name: publish_name = os.path.basename(publish_path) # Find additional info from the scene: # progress_cb(20, "Analysing scene") # build the export command. obj_export_cmd = "file -force -es -pr -typ \"OBJexport\"" obj_export_cmd += " -options \"groups=1;ptgroups=1;materials=0;smoothing=1;normals=1\"" obj_export_cmd += " \"%s\"" % (publish_path.replace("\\", "/")) # ...and execute it: progress_cb(30, "Exporting OBJ file") try: self.parent.log_debug("Executing command: %s" % obj_export_cmd) # make sure plugin is loaded if not cmds.pluginInfo('objExport', query=True, loaded=True): cmds.loadPlugin('objExport') # clear selection, select what's in the set sel = cmds.ls(sl=True) set_contents = cmds.sets('publish_SET', q=True) cmds.select(clear=True) for obj in set_contents: cmds.select(obj, add=True) # do the actual export mel.eval(obj_export_cmd) # then restore the selection cmds.select(clear=True) for obj in sel: cmds.select(obj, add=True) except Exception, e: raise TankError("Failed to export OBJ file: %s" % e)
def _arnold_render(self, item, output, work_template, primary_publish_path, sg_task, comment, thumbnail_path, progress_cb): filePath = cmds.file(q=True, sn=True) name = os.path.basename(filePath) start = int(cmds.getAttr('defaultRenderGlobals.startFrame')) end = int(cmds.getAttr('defaultRenderGlobals.endFrame')) pluginArgs = [''] submitArgs = ['Comment=Shotgun Publish submit'] shotgunContext = self.parent.context #getting fields for version shot_temp = self.parent.sgtk.templates["maya_shot_work"] shotgunFields = shot_temp.get_fields(filePath) #getting output path area_temp = self.parent.sgtk.templates['maya_shot_render_area'] outputPath = area_temp.apply_fields(shotgunFields).replace('\\', '/') #getting ass file path area_temp = self.parent.sgtk.templates['maya_ass_export'] inputFilepath = area_temp.apply_fields(shotgunFields).replace( '\\', '/') #getting ass file path area_temp = self.parent.sgtk.templates['maya_shot_publish'] mayaFile = area_temp.apply_fields(shotgunFields).replace('\\', '/') #hardcoded replace of sequence string--- inputFilepath = inputFilepath.replace('%05d', str(start).zfill(5)) inputFiles = [] for layer in cmds.ls(type='renderLayer'): #discarding referenced layers if ':' not in layer: #checking whether layer needs to be rendered if cmds.getAttr(layer + '.renderable') == 1: if layer == 'defaultRenderLayer': layer = 'masterLayer' #hardcoded replace of name string--- p = inputFilepath.replace('/data/', '/data/%s/' % layer) inputFiles.append(p) #getting output fields render_temp = self.parent.sgtk.templates['maya_shot_render'] outputFiles = render_temp.apply_fields(shotgunFields) outputFields = render_temp.get_fields(outputFiles) #replacing name with file name outputFields['name'] = '.'.join( filePath.split('/')[-1].split('.')[0:-2]) #generate outputFiles shotgunFiles = render_temp.apply_fields(outputFields) outputFiles = [] publishFiles = [] for layer in cmds.ls(type='renderLayer'): #discarding referenced layers if ':' not in layer: #checking whether layer needs to be rendered if cmds.getAttr(layer + '.renderable') == 1: if layer == 'defaultRenderLayer': layer = 'masterLayer' #clunky code to replace seq format with ? cmd = '' maxCount = int( shotgunFiles.split('%')[-1].split('.')[0].replace( 'd', '')) for count in xrange(0, maxCount): cmd += '?' path = shotgunFiles.split('%')[0][0:-1] ext = shotgunFiles.split('%')[-1].split('.')[-1] outputFile = '.'.join([path, cmd, ext]).replace('\\', '/') #adding renderlayer to outputfiles filename = os.path.basename(outputFile) dirpath = os.path.dirname(outputFile) outputFiles.append( os.path.join(dirpath, layer + '_' + filename)) #adding renderlayer to shotgunfiles filename = os.path.basename(shotgunFiles) dirpath = os.path.dirname(shotgunFiles) publishFiles.append( os.path.join(dirpath, layer + '_' + filename)) #getting login for user and replacing with user in shotgunContext shotgunUser = sgtk.util.get_current_user(self.parent.sgtk) #creating the folders for rendering for outputfile in outputFiles: dirpath = os.path.dirname(outputfile) if not os.path.exists(dirpath): os.makedirs(dirpath) #submit to deadline jobname = '.'.join(os.path.basename(mayaFile).split('.')[0:-1]) exportId = cdu.submit('arnoldExport', jobname, start, end, mayaFile, outputPath, [outputFiles[0]], ['ProjectPath=%s' % outputPath], submitArgs, shotgunContext='', shotgunFields='', shotgunUser='', mayaGUI=True) for f in inputFiles: count = inputFiles.index(f) #generate jobname basename = '.'.join(os.path.basename(f).split('.')[0:-2]) layer = f.split('/')[-2] jobname = basename + '.' + layer #execute deadline submittal try: cdu.submit('arnold', jobname, start, end, f, outputPath, [outputFiles[count]], pluginArgs, submitArgs=['JobDependencies=%s' % exportId], shotgunContext=shotgunContext, shotgunFields=shotgunFields, shotgunUser=shotgunUser, mayaGUI=True) except Exception, e: raise TankError("Failed to submit arnold to deadline: %s" % e)
def __publish_mayacamera(self, item, output, work_template, primary_publish_path, sg_task, comment, thumbnail_path, progress_cb): """ Export a Maya file for the camera and publish it to Shotgun. :param item: The item to publish :param output: The output definition to publish with :param work_template: The work template for the current scene :param primary_publish_path: The path to the primary published file :param sg_task: The Shotgun task we are publishing for :param comment: The publish comment/description :param thumbnail_path: The path to the publish thumbnail :param progress_cb: A callback that can be used to report progress """ # determine the publish info to use # progress_cb(10, "Determining publish details") # get the current scene path and extract fields from it # using the work template: scene_path = os.path.abspath(cmds.file(query=True, sn=True)) fields = work_template.get_fields(scene_path) publish_version = fields["version"] tank_type = output["tank_type"] # extract entity from camera node name # handle full paths, trim off everything after the _ # e.g. |pivot_GRP|master_CAM -> master fields["name"] = item["name"].split("|")[-1].split("_")[0] # create the publish path by applying the fields # with the publish template: fields["Step"] = "cam" # first force step to be camera publish_template = output["publish_template"] publish_path = publish_template.apply_fields(fields) # ensure the publish folder exists: publish_folder = os.path.dirname(publish_path) self.parent.ensure_folder_exists(publish_folder) # determine the publish name: publish_name = fields.get("name") if not publish_name: publish_name = os.path.basename(publish_path) progress_cb(50.0, "Exporting from scene") try: publish_folder = os.path.dirname(publish_path) self.parent.ensure_folder_exists(publish_folder) self.parent.log_debug("Exporting to %s..." % (publish_path)) # stash the selection sel = cmds.ls(sl=True) # clear it cmds.select(clear=True) # select just the specific camera we are processing cmds.select(item["name"], add=True) # do export selection once camera selected cmds.file( publish_path, type='mayaBinary', exportSelected=True, force=True, ) # reset the selection to what it was prior cmds.select(clear=True) for obj in sel: cmds.select(obj, add=True) except Exception, e: raise TankError("Failed to export to %s - %s" % (publish_path, e))
def _publish_alembic_cache_for_item(self, item, output, work_template, primary_publish_path, sg_task, comment, thumbnail_path, progress_cb): """ Export an Alembic cache for the specified item and publish it to Shotgun. """ #loading plugin cmds.loadPlugin('AbcExport.mll', quiet=True) group_name = item["name"].strip("|") publish_template = output["publish_template"] # get the current scene path and extract fields from it # using the work template: scene_path = os.path.abspath(cmds.file(query=True, sn=True)) fields = work_template.get_fields(scene_path) publish_version = fields["version"] # update fields with the group name: fields["Asset"] = group_name sg_step = fields["Step"] if sg_step == 'Anim': tank_type = 'Alembic Animation' elif sg_step == 'Sim': tank_type = 'Alembic Simulation' elif sg_step == 'FX': tank_type = 'Alembic FX' # create the publish path by applying the fields # with the publish template: publish_path = publish_template.apply_fields(fields) #export with assets attribute attrstring = '-a asset -a sim -a abcStep' # build and execute the Alembic export command for this item: frame_start = int(cmds.playbackOptions(q=True, min=True)) frame_end = int(cmds.playbackOptions(q=True, max=True)) sampling = 1.0 nodesString = '' rootSet = set([]) #The extras logic should eventually be removed. It's kept only for backwards compatibility--- if group_name != 'extras': # node loop for node in item["other_params"]: root = node.root() rootSet.add(root) for root in rootSet: nodesString += '-root ' + root + ' ' elif group_name == 'extras': for node in item["other_params"]: nodesString += '-root ' + node + ' ' fields["Asset"] = 'extras' publish_path = publish_template.apply_fields(fields) sampling = node.abcStep.get() print('exporting: ' + nodesString) try: #self.parent.log_debug("Executing command: %s" % abc_export_cmd) #mel.eval(abc_export_cmd) print 'exporting ABC' pm.AbcExport( j='-step %s -frameRange %s %s %s -stripNamespaces -uvWrite -worldSpace -writeVisibility %s-file %s' % (sampling, frame_start, frame_end, attrstring, nodesString, publish_path)) except Exception, e: raise TankError("Failed to export Alembic Cache: %s" % e)
def execute(self, tasks, work_template, comment, thumbnail_path, sg_task, primary_task, primary_publish_path, progress_cb, **kwargs): """ Main hook entry point :param tasks: List of secondary tasks to be published. Each task is a dictionary containing the following keys: { item: Dictionary This is the item returned by the scan hook { name: String description: String type: String other_params: Dictionary } output: Dictionary This is the output as defined in the configuration - the primary output will always be named 'primary' { name: String publish_template: template tank_type: String } } :param work_template: template This is the template defined in the config that represents the current work file :param comment: String The comment provided for the publish :param thumbnail: Path string The default thumbnail provided for the publish :param sg_task: Dictionary (shotgun entity description) The shotgun task to use for the publish :param primary_publish_path: Path string This is the path of the primary published file as returned by the primary publish hook :param progress_cb: Function A progress callback to log progress during pre-publish. Call: progress_cb(percentage, msg) to report progress to the UI :param primary_task: The primary task that was published by the primary publish hook. Passed in here for reference. This is a dictionary in the same format as the secondary tasks above. :returns: A list of any tasks that had problems that need to be reported in the UI. Each item in the list should be a dictionary containing the following keys: { task: Dictionary This is the task that was passed into the hook and should not be modified { item:... output:... } errors: List A list of error messages (strings) to report } """ results = [] # it's important that tasks for render output are processed # before tasks for quicktime output, so let's group the # task list by output. This can be controlled through the # configuration but we shouldn't rely on that being set up # correctly! output_order = ["render", "quicktime", 'AvidQuicktime'] tasks_by_output = {} for task in tasks: output_name = task["output"]["name"] tasks_by_output.setdefault(output_name, list()).append(task) if output_name not in output_order: output_order.append(output_name) # make sure we have any apps required by the publish process: write_node_app = None if tasks_by_output.get("render") or tasks_by_output.get( "quicktime") or tasks_by_output.get("AvidQuicktime"): # we will need the write node app if we have any render outputs to validate write_node_app = self.parent.engine.apps.get("tk-nuke-writenode") if not write_node_app: raise TankError( "Unable to publish Shotgun Write Nodes without the tk-nuke-writenode app!" ) review_submission_app = None if tasks_by_output.get("quicktime"): # If we have the tk-multi-reviewsubmission app we can create versions review_submission_app = self.parent.engine.apps.get( "tk-multi-reviewsubmission") if not review_submission_app: raise TankError( "Unable to publish Review Versions without the tk-multi-reviewsubmission app!" ) # Keep of track of what has been published in shotgun # this is needed as input into the review creation code... render_publishes = {} # process outputs in order: for output_name in output_order: # process each task for this output: for task in tasks_by_output.get(output_name, []): # keep track of our errors for this task errors = [] # report progress: progress_cb(0.0, "Publishing", task) if output_name == "render": # Publish the rendered output for a Shotgun Write Node # each publish task is connected to a nuke write node # this value was populated via the scan scene hook write_node = task["item"].get("other_params", dict()).get("node") if not write_node: raise TankError( "Could not determine nuke write node for item '%s'!" % str(task)) # publish write-node rendered sequence try: (sg_publish, thumbnail_path) = self._publish_write_node_render( task, write_node, write_node_app, primary_publish_path, sg_task, comment, progress_cb) # keep track of our publish data so that we can pick it up later in review render_publishes[write_node.name()] = (sg_publish, thumbnail_path) except Exception, e: errors.append("Publish failed - %s" % e) elif output_name == "quicktime": # Publish the reviewable quicktime movie for a Shotgun Write Node # each publish task is connected to a nuke write node # this value was populated via the scan scene hook write_node = task["item"].get("other_params", dict()).get("node") if not write_node: raise TankError( "Could not determine nuke write node for item '%s'!" % str(task)) # Submit published sequence to Screening Room try: # pick up sg data from the render dict we are maintianing # note: we assume that the rendering tasks always happen # before the review tasks inside the publish... (sg_publish, thumbnail_path) = render_publishes[write_node.name()] self._send_to_screening_room(write_node, write_node_app, review_submission_app, sg_publish, sg_task, comment, thumbnail_path, progress_cb) except Exception, e: errors.append("Submit to Screening Room failed - %s" % e)
def _do_hiero_publish(self, task, work_template, comment, thumbnail_path, sg_task, progress_cb, user_data): """ Publish the currently selected hiero project. :param task: The primary task to publish :param work_template: The primary work template to use :param comment: The publish description/comment :param thumbnail_path: The path to the thumbnail to associate with the published file :param sg_task: The Shotgun task that this publish should be associated with :param progress_cb: A callback to use when reporting any progress to the UI :param user_data: A dictionary containing any data shared by other hooks run prior to this hook. Additional data may be added to this dictionary that will then be accessible from user_data in any hooks run after this one. :returns: The path to the file that has been published """ import hiero.core # first find which the current project is. Hiero is a multi project # environment so we can ask the engine which project was clicked in order # to launch this publish. selection = self.parent.engine.get_menu_selection() # these values should in theory already be validated, but just in case... if len(selection) != 1: raise TankError("Please select a single Project!") if not isinstance(selection[0], hiero.core.Bin): raise TankError("Please select a Hiero Project!") project = selection[0].project() if project is None: # apparently bins can be without projects (child bins I think) raise TankError("Please select a Hiero Project!") progress_cb(0.0, "Finding scene dependencies", task) dependencies = self._hiero_find_additional_scene_dependencies() # get scene path scene_path = os.path.abspath(project.path().replace("/", os.path.sep)) if not work_template.validate(scene_path): raise TankError( "File '%s' is not a valid work path, unable to publish!" % scene_path) # use templates to convert to publish path: output = task["output"] fields = work_template.get_fields(scene_path) fields["TankType"] = output["tank_type"] publish_template = output["publish_template"] publish_path = publish_template.apply_fields(fields) if os.path.exists(publish_path): raise TankError("The published file named '%s' already exists!" % publish_path) # save the scene: progress_cb(10.0, "Saving the scene") self.parent.log_debug("Saving the scene...") project.save() # copy the file: progress_cb(50.0, "Copying the file") try: publish_folder = os.path.dirname(publish_path) self.parent.ensure_folder_exists(publish_folder) self.parent.log_debug("Copying %s --> %s..." % (scene_path, publish_path)) self.parent.copy_file(scene_path, publish_path, task) except Exception, e: raise TankError("Failed to copy file from %s to %s - %s" % (scene_path, publish_path, e))
def execute(self, **kwargs): """ Main hook entry point :returns: A list of any items that were found to be published. Each item in the list should be a dictionary containing the following keys: { type: String This should match a scene_item_type defined in one of the outputs in the configuration and is used to determine the outputs that should be published for the item name: String Name to use for the item in the UI description: String Description of the item to use in the UI selected: Bool Initial selected state of item in the UI. Items are selected by default. required: Bool Required state of item in the UI. If True then item will not be deselectable. Items are not required by default. other_params: Dictionary Optional dictionary that will be passed to the pre-publish and publish hooks } """ ctx = self.parent.context items = [] # get the main scene: scene_name = cmds.file(query=True, sn=True) if not scene_name: raise TankError("Please Save your file before Publishing") scene_path = os.path.abspath(scene_name) name = os.path.basename(scene_path) # create the primary item - this will match the primary output 'scene_item_type': items.append({"type": "work_file", "name": name}) #create alembic items assets = {} nodes = [] if ctx.step['name'] != 'Light': for node in pm.ls(type='transform'): if pm.PyNode(node).hasAttr('asset'): assetName = cmds.getAttr(node + '.asset') assets[assetName] = [] nodes.append(node) if pm.PyNode(node).hasAttr('abcStep'): assetName = 'extras' assets[assetName] = [] nodes.append(node) if len(nodes) > 0: for node in nodes: assetName = cmds.getAttr(node + '.asset') assets[assetName].append(node) if pm.PyNode(node).hasAttr('abcStep'): assetName = 'extras' assets[assetName].append(node) for asset in assets: items.append({ "type": "asset", "name": asset, "other_params": assets[asset] }) #create Preview items cameras = pm.ls(type='camera') for node in cameras: if node != 'frontShape' and node != 'sideShape' and node != 'topShape' and node != 'perspShape': items.append({ "type": "shotcam", "name": node.getParent().name().split(':')[-1], "other_params": [node.getParent()] }) #adding Render item if ctx.step['name'] == 'Light': items.append({"type": "render", "name": 'render'}) return items
def _do_nuke_publish(self, task, work_template, comment, thumbnail_path, sg_task, progress_cb, user_data): """ Publish the main Nuke script :param task: The primary task to publish :param work_template: The primary work template to use :param comment: The publish description/comment :param thumbnail_path: The path to the thumbnail to associate with the published file :param sg_task: The Shotgun task that this publish should be associated with :param progress_cb: A callback to use when reporting any progress to the UI :param user_data: A dictionary containing any data shared by other hooks run prior to this hook. Additional data may be added to this dictionary that will then be accessible from user_data in any hooks run after this one. :returns: The path to the file that has been published """ # If we're in Nuke Studio or Hiero, run those publish routines. engine = self.parent.engine if hasattr(engine, "studio_enabled") and engine.studio_enabled: return self._do_nukestudio_publish( task, work_template, comment, thumbnail_path, sg_task, progress_cb, user_data, ) elif hasattr(engine, "hiero_enabled") and engine.hiero_enabled: return self._do_hiero_publish( task, work_template, comment, thumbnail_path, sg_task, progress_cb, user_data, ) import nuke progress_cb(0.0, "Finding dependencies", task) dependencies = self._nuke_find_script_dependencies() # get scene path script_path = nuke.root().name().replace("/", os.path.sep) if script_path == "Root": script_path = "" script_path = os.path.abspath(script_path) if not work_template.validate(script_path): raise TankError( "File '%s' is not a valid work path, unable to publish!" % script_path) # use templates to convert to publish path: output = task["output"] fields = work_template.get_fields(script_path) fields["TankType"] = output["tank_type"] publish_template = output["publish_template"] publish_path = publish_template.apply_fields(fields) if os.path.exists(publish_path): raise TankError("The published file named '%s' already exists!" % publish_path) # save the scene: progress_cb(25.0, "Saving the script") self.parent.log_debug("Saving the Script...") nuke.scriptSave() # copy the file: progress_cb(50.0, "Copying the file") try: publish_folder = os.path.dirname(publish_path) self.parent.ensure_folder_exists(publish_folder) self.parent.log_debug("Copying %s --> %s..." % (script_path, publish_path)) self.parent.copy_file(script_path, publish_path, task) except Exception, e: raise TankError("Failed to copy file from %s to %s - %s" % (script_path, publish_path, e))
def _do_hiero_publish(self, task, work_template, comment, thumbnail_path, sg_task, progress_cb): """ Publish the currently selected hiero project. """ import hiero.core # first find which the current project is. Hiero is a multi project # environment so we can ask the engine which project was clicked in order # to launch this publish. selection = self.parent.engine.get_menu_selection() # these values should in theory already be validated, but just in case... if len(selection) != 1: raise TankError("Please select a single Project!") if not isinstance(selection[0], hiero.core.Bin): raise TankError("Please select a Hiero Project!") project = selection[0].project() if project is None: # apparently bins can be without projects (child bins I think) raise TankError("Please select a Hiero Project!") progress_cb(0.0, "Finding scene dependencies", task) dependencies = self._hiero_find_additional_scene_dependencies() # get scene path scene_path = os.path.abspath(project.path().replace("/", os.path.sep)) if not work_template.validate(scene_path): raise TankError( "File '%s' is not a valid work path, unable to publish!" % scene_path) # use templates to convert to publish path: output = task["output"] fields = work_template.get_fields(scene_path) fields["TankType"] = output["tank_type"] publish_template = output["publish_template"] publish_path = publish_template.apply_fields(fields) if os.path.exists(publish_path): raise TankError("The published file named '%s' already exists!" % publish_path) # save the scene: progress_cb(10.0, "Saving the scene") self.parent.log_debug("Saving the scene...") project.save() # copy the file: progress_cb(50.0, "Copying the file") try: publish_folder = os.path.dirname(publish_path) self.parent.ensure_folder_exists(publish_folder) self.parent.log_debug("Copying %s --> %s..." % (scene_path, publish_path)) self.parent.copy_file(scene_path, publish_path, task) except Exception, e: raise TankError("Failed to copy file from %s to %s - %s" % (scene_path, publish_path, e))
def _do_softimage_publish(self, task, work_template, comment, thumbnail_path, sg_task, progress_cb, user_data): """ Publish the main Softimage scene :param task: The primary task to publish :param work_template: The primary work template to use :param comment: The publish description/comment :param thumbnail_path: The path to the thumbnail to associate with the published file :param sg_task: The Shotgun task that this publish should be associated with :param progress_cb: A callback to use when reporting any progress to the UI :param user_data: A dictionary containing any data shared by other hooks run prior to this hook. Additional data may be added to this dictionary that will then be accessible from user_data in any hooks run after this one. :returns: The path to the file that has been published """ import win32com from win32com.client import Dispatch, constants from pywintypes import com_error Application = Dispatch("XSI.Application").Application progress_cb(0.0, "Finding scene dependencies", task) dependencies = self._softimage_find_additional_scene_dependencies() # get scene path scene_path = os.path.abspath( Application.ActiveProject.ActiveScene.filename.value) if not work_template.validate(scene_path): raise TankError( "File '%s' is not a valid work path, unable to publish!" % scene_path) # use templates to convert to publish path: output = task["output"] fields = work_template.get_fields(scene_path) fields["TankType"] = output["tank_type"] publish_template = output["publish_template"] publish_path = publish_template.apply_fields(fields) if os.path.exists(publish_path): raise TankError("The published file named '%s' already exists!" % publish_path) # save the scene: progress_cb(10.0, "Saving the scene") self.parent.log_debug("Saving the scene...") Application.SaveScene() # copy the file: progress_cb(50.0, "Copying the file") try: publish_folder = os.path.dirname(publish_path) self.parent.ensure_folder_exists(publish_folder) self.parent.log_debug("Copying %s --> %s..." % (scene_path, publish_path)) self.parent.copy_file(scene_path, publish_path, task) except Exception, e: raise TankError("Failed to copy file from %s to %s - %s" % (scene_path, publish_path, e))
def execute(self, **kwargs): """ Main hook entry point :returns: A list of any items that were found to be published. Each item in the list should be a dictionary containing the following keys: { type: String This should match a scene_item_type defined in one of the outputs in the configuration and is used to determine the outputs that should be published for the item name: String Name to use for the item in the UI description: String Description of the item to use in the UI selected: Bool Initial selected state of item in the UI. Items are selected by default. required: Bool Required state of item in the UI. If True then item will not be deselectable. Items are not required by default. other_params: Dictionary Optional dictionary that will be passed to the pre-publish and publish hooks } """ items = [] # get the main scene: scene_name = cmds.file(query=True, sn= True) if not scene_name: raise TankError("Please Save your file before Publishing") scene_path = os.path.abspath(scene_name) name = os.path.basename(scene_path) # create the primary item - this will match the primary output 'scene_item_type': items.append({"type": "work_file", "name": name}) ## DO MAIN CHECKING NOW ############################# ## HARD FAILS ## Duplicate name check ## geo_hrc if not cmds.objExists('geo_hrc'): raise TankError("Please Group all your geo under a geo_hrc group under the root node.") # rig_hrc # UNCOMMENT FOR MDL STEP if cleanup.rigGroupCheck(): raise TankError('Rig group found!! Please use the RIG menus to publish rigs...') # UNCOMMENT FOR RIG STEP # if not cleanup.rigGroupCheck(): # raise TankError('No rig group found!! Please make sure your animation controls are under rig_hrc.') ## Now check it's the right KIND of asset eg CHAR or PROP #cleanup.assetCheckAndTag(type = 'LIB', customTag = 'staticLIB') ############################# ## SECONDARIES FOR PUBLISHING ## WE NEED TO FIND THE MAIN GROUP THAT HAS MESHES IN IT NOW AND PUSH THIS INTO THE ITEMS LIST FOR SECONDARY PUBLISHING ## Look for root level groups that have meshes as children: for grp in cmds.ls(assemblies = True, long = True): if cmds.ls(grp, dag=True, type="mesh"): ### UNCOMMENT FOR PROP CHAR LND ASSETS # include this group as a 'mesh_group' type if '_hrc' in grp and 'SRF' not in grp: items.append({"type":"mesh_group", "name":grp}) ### UNCOMMENT FOR BLD MLD STEP # if cleanup.BLDTransformCheck(grp): ## Check for BLD step only to make sure the transforms are not frozen on the BLD grps # items.append({"type":"mesh_group", "name":grp}) # cleanup.assetCheckAndTag(type = 'BLD', customTag = 'staticBLD') if not cleanup.duplicateNameCheck(): raise TankError("Duplicate names found please fix before publishing.\nCheck the outliner for the duplicate name set.") ## Incorrect Suffix check checkSceneGeo = cleanup._geoSuffixCheck(items) if not checkSceneGeo: raise TankError("Incorrect Suffixes found! Fix suffixes before publishing.\nCheck the outliner for the duplicate name set.") ############################# if shd.sceneCheck(): raise TankError("You have errors in your scene, please fix.. check the script editor for details.") cleanup.cleanUp(items = items, checkShapes = True, history = False, pivots = False, freezeXFRM = False, smoothLvl = False, tagSmoothed = False, checkVerts = False, renderflags = False, deleteIntermediate = False, turnOffOpposite = False, instanceCheck = False, shaders = False, removeNS = False) mel.eval("MLdeleteUnused();") ## Fix pathing from work to publish for export shd.repathFileNodesForPublish() ## Now do the smartConn shd.smartConn() ## Fix remap and ramps color entry plugs and any incorrect ordering ## Leads to bad plugs being inserted when the XML recreates all the values. Querying also creates which makes black colour entry plugs. shd.fixRamps(cmds.ls(type = 'remapValue')) shd.fixRamps(cmds.ls(type = 'ramp')) ## NOW MOVE ON TO PUBLISHING return items
def execute_command(self, cmd_key, args): """ Executes a given command. """ cb = self.commands[cmd_key]["callback"] # make sure the number of parameters to the command are correct cb_arg_spec = inspect.getargspec(cb) cb_arg_list = cb_arg_spec[0] cb_var_args = cb_arg_spec[1] if hasattr(cb, "__self__"): # first argument to cb will be class instance: cb_arg_list = cb_arg_list[1:] # ensure the correct/minimum number of arguments have been passed: have_expected_args = False if cb_var_args: have_expected_args = (len(args) >= len(cb_arg_list)) else: have_expected_args = (len(args) == len(cb_arg_list)) if not have_expected_args: expected_args = list(cb_arg_list) if cb_var_args: expected_args.append("*%s" % cb_var_args) raise TankError("Cannot run command! Expected command arguments (%s)" % ", ".join(expected_args)) if not self._has_qt: # QT not available - just run the command straight return cb(*args) else: from tank.platform.qt import QtCore, QtGui # we got QT capabilities. Start a QT app and fire the command into the app tk_shell = self.import_module("tk_shell") t = tk_shell.Task(self, cb, args) # start up our QApp now, if none is already running qt_application = None if not QtGui.qApp: QtGui.QApplication.setLibraryPaths([]) qt_application = QtGui.QApplication([]) qt_application.setWindowIcon(QtGui.QIcon(self.icon_256)) self._initialize_dark_look_and_feel() # if we didn't start the QApplication here, let the responsability # to run the exec loop and quit to the initial creator of the QApplication if qt_application: # when the QApp starts, initialize our task code QtCore.QTimer.singleShot(0, t.run_command ) # and ask the main app to exit when the task emits its finished signal t.finished.connect(qt_application.quit) # start the application loop. This will block the process until the task # has completed - this is either triggered by a main window closing or # byt the finished signal being called from the task class above. qt_application.exec_() else: # we can run the command now, as the QApp is already started t.run_command()
def _do_maya_publish(self, task, work_template, comment, thumbnail_path, sg_task, progress_cb): """ Publish the main Maya scene """ import maya.cmds as cmds progress_cb(0.0, "Finding scene dependencies", task) dependencies = self._maya_find_additional_scene_dependencies() ## Get scene path scene_path = os.path.abspath(cmds.file(query=True, sn= True)) ## Test if it's a valid scene path if not work_template.validate(scene_path): raise TankError("File '%s' is not a valid work path, unable to publish!" % scene_path) ## Use templates to convert to publish path: output = task["output"] fields = work_template.get_fields(scene_path) fields["TankType"] = output["tank_type"] ## Now update the name to be the actual assetName from shotgun and remove the _ for saving fields['name'] = fields['Asset'].replace('_', '') publish_template = output["publish_template"] publish_path = publish_template.apply_fields(fields) if os.path.exists(publish_path): ## If it already exists version up one. ## We should never fail a publish just because a published asset already exists cmds.warning('Found existing publish_path: %s' % publish_path) cmds.warning('Adjusting publish_path now...') path = '\\'.join(publish_path.split('\\')[0:-1]) getfiles = os.listdir(path) if 'Keyboard' in getfiles: getfiles.remove('Keyboard') ## legacy check remove any ma files from the list as we're now publishing only to mb! for each in getfiles: if not each.endswith('mb'): getfiles.remove(each) ## Now process the rest of the list.. ## Get the max of the list highestVersFile = max(getfiles).split('.')[1].split('v')[-1] ## Update the fields with a new version number fields["version"] = int(highestVersFile) + 1 ## Apply the fields to the templates paths.. publish_path = publish_template.apply_fields(fields) ## Output the new publish path to the scripteditor cmds.warning('NewPublishPath: %s' % publish_path) ## PROCESS FOR SAVE progress_cb(10.0, "Saving the current working scene") self.parent.log_debug("Saving the current working scene...") publish_name = self._get_publish_name(publish_path, publish_template, fields) if fields['version'] < 10: padding = '00' elif fields['version'] < 100: padding = '0' else: padding = '' ## Save the current working file just in case cmds.file(save=True, force= True) ## Now rename and save the working file with the new version number for the publish ## Change the file type to mb for publish progress_cb(50.0, "Publishing the file to publish area") try: ## Now rename the file to the correct name and version number... cmds.file(rename = '%s.v%s%s' % (publish_name, padding, fields['version'])) ## Check for blendshapes group and delete it if cmds.objExists('blendshapes_hrc'): cmds.delete('blendshapes_hrc') ## Now save the file cmds.file(save=True, force=True, type = 'mayaBinary') ## Now put the published file into the publish folder publish_folder = os.path.dirname(publish_path) ## Make sure the folder exists self.parent.ensure_folder_exists(publish_folder) ## Find current scene path and rename the saved file using os.rename to move it into the publish folder getCurrentScenePath = os.path.abspath(cmds.file(query=True, sn= True)) os.rename(getCurrentScenePath, publish_path) self.parent.log_debug("Publishing %s --> %s..." % (getCurrentScenePath, publish_path)) progress_cb(65.0, "Moved the publish") except Exception, e: raise TankError("Failed to copy file: \n%s \nto\n%s\nError: %s" % (getCurrentScenePath, publish_path, e))
temp_file, thumbnail_path = tempfile.mkstemp(suffix=".png", prefix="tanktmp") if temp_file: os.close(temp_file) thumbnail.save(thumbnail_path) # do the publish publish_errors = [] do_post_publish = False try: # do primary publish: primary_path = self._do_primary_publish( primary_task, sg_task, thumbnail_path, comment, progress.report) if not primary_path: raise TankError("Primary publish didn't return a path!") do_post_publish = True # do secondary publishes: self._do_secondary_publish(secondary_tasks, primary_task, primary_path, sg_task, thumbnail_path, comment, progress.report) except TankError, e: self._app.log_exception("Publish Failed") publish_errors.append("%s" % e) except Exception, e: self._app.log_exception("Publish Failed") publish_errors.append("%s" % e) finally: