def create_change(p4, description): """ Helper method to create a new change """ # create a new changelist: new_change = None try: # fetch a new change, update the description, and save it: change_spec = p4.fetch_change() change_spec._description = str(description) # have to clear the file list as otherwise it would contain everything # in the default changelist! change_spec._files = [] p4_res = p4.save_change(change_spec) if p4_res: try: # p4_res should be like: ["Change 25 created."] new_change_id = int(p4_res[0].split()[1]) new_change = str(new_change_id) except ValueError: raise TankError("Perforce: Failed to extract new change id from '%s'" % p4_res) except P4Exception, e: raise TankError("Perforce: %s" % (p4.errors[0] if p4.errors else e))
def resolve_multiple_files(self, published_file_type, published_files): """ Decide which published file to return, or raise a TankError. Return the first published file matching one of the valid_extensions, otherwise raise a TankError. :param str published_file_type: PublishedFile or TankPublishedFile. :param list published_files: The published files. :returns: The first valid published file entity dict with the required fields. :raises: TankError """ valid_extensions = self.parent.get_setting("valid_extensions") if not valid_extensions: raise TankError( "Missing required value for setting 'valid_extensions'.") published_file_ids = [pf["id"] for pf in published_files] published_files = self.parent.shotgun.find( published_file_type, [["id", "in", published_file_ids]], self.PUBLISHED_FILE_FIELDS) for app_extension in valid_extensions: for published_file in published_files: try: # call base Hook implementation method. path_on_disk = self.get_publish_path(published_file) if path_on_disk and path_on_disk.endswith( ".%s" % app_extension): return published_file except (PublishPathNotDefinedError, PublishPathNotSupported): # if the path is invalid, just continue to the next # published file. pass raise TankError( "Could not find a published file matching valid extensions %s. Published files: %s" % (valid_extensions, published_files))
def _prepare_houdini_launch(context): """ Houdini specific pre-launch environment setup. :param context: The context that the application is being launched in """ # Retrieve the TK Application instance from the current bundle tk_app = sgtk.platform.current_bundle() engine_path = sgtk.platform.get_engine_path("tk-houdini", tk_app.sgtk, context) if engine_path is None: raise TankError( "Path to houdini engine (tk-houdini) could not be found.") # let the houdini engine take care of initializing itself sys.path.append(os.path.join(engine_path, "python")) try: import tk_houdini tk_houdini.bootstrap.bootstrap(tk_app.sgtk, context) except: tk_app.log_exception("Error executing engine bootstrap script.") raise TankError( "Error executing bootstrap script. Please see log for details.")
def resolve_single_file(self, published_file_type, published_files): """ Decide wether or not to return the published file, or raise a TankError. Returns the published file only if its path's extension matches one of the valid_extensions. :param str published_file_type: PublishedFile or TankPublishedFile. :param list published_files: A list of published file entity dicts, typically containing only one element. :returns: The published file entity dict with the required fields. :raises: TankError, PublishPathNotDefinedError, PublishPathNotSupported """ published_file = published_files[0] valid_extensions = self.parent.get_setting("valid_extensions") if not valid_extensions: raise TankError( "Missing required value for setting 'valid_extensions'.") sg_published_file = self.get_published_file(published_file_type, published_file["id"]) # call base Hook implementation method. path_on_disk = self.get_publish_path(sg_published_file) if path_on_disk: for app_extension in valid_extensions: if path_on_disk.endswith(".%s" % app_extension): return sg_published_file raise TankError( "PublishedFile path %s does not match valid extensions %s" % (path_on_disk, valid_extensions))
def sync_published_file(p4, published_file_entity, latest=True): #, dependencies=True): """ Sync the specified published file to the current workspace. """ # depot path is stored in the path as a url: p4_url = published_file_entity.get("path", {}).get("url") # convert from perforce url, validating server: path_and_revision = depot_path_from_url(p4_url) depot_path = path_and_revision[0] if path_and_revision else None if not depot_path: # either an invalid path or different server so skip raise TankError("Failed to find Perforce file revision for %s" % p4_url) revision = None if not latest: revision = published_file_entity.get("version_number") sync_args = [] sync_path = depot_path if revision: sync_path = "%s#%d" % (depot_path, revision) # sync file: try: p4.run_sync(sync_args, sync_path) except P4Exception, e: raise TankError("Perforce: Failed to sync file %s - %s" % (sync_path, p4.errors[0] if p4.errors else e))
def load_geometry(self, sg_publish, options, objects_to_load): """ Wraps the Mari GeoManager.load() method and additionally tags newly loaded geometry with Shotgun specific metadata. See Mari API documentation for more information on GeoManager.load(). :param sg_publish: The shotgun publish to load. This should be a Shotgun entity dictionary containing at least the entity "type" and "id". :param options: [Mari arg] - Options to be passed to the file loader when loading the geometry :param objects_to_load: [Mari arg] - A list of objects to load from the file :returns: A list of the loaded GeoEntity instances that were created """ # ensure that sg_publish contains the information we need: update_publish_records([sg_publish]) # extract the file path for the publish publish_path = self.__get_publish_path(sg_publish) if not publish_path or not os.path.exists(publish_path): raise TankError("Publish '%s' couldn't be found on disk!" % publish_path) # load everything: new_geo = [] try: # (AD) Note - passing options as a named parameter (e.g. options=options) seems to # stop any channels specified in the options list from being created so just pass # as indexed parameters instead! new_geo = mari.geo.load(publish_path, options, objects_to_load) except Exception, e: raise TankError("Failed to load published geometry from '%s': %s" % (publish_path, e))
def __get_wiretap_central_binary(self, binary_name): """ Returns the path to a binary in the wiretap central binary collection. This is standard on all Flame installations. :param binary_name: Name of desired binary :returns: Absolute path as a string """ if sys.platform == "darwin": if int(self.flame_major_version) <= 2017: wtc_path = "/Library/WebServer/CGI-Executables/WiretapCentral" else: wtc_path = "/Library/WebServer/Documents/WiretapCentral/cgi-bin" elif sys.platform == "linux2": if int(self.flame_major_version) <= 2017: wtc_path = "/var/www/cgi-bin/WiretapCentral" else: wtc_path = "/var/www/html/WiretapCentral/cgi-bin" else: raise TankError( "Your operating system does not support wiretap central!") path = os.path.join(wtc_path, binary_name) if not os.path.exists(path): raise TankError("Cannot find binary '%s'!" % path) return path
def _update_geometry_items(self, items): """ Update specified geo items in the current project :param items: List of geometry items to update """ mari_engine = self.parent.engine # set the geometry load options - default (None) uses the same options as the current # version of the geometry: options = None # first pass, find the publish details for all the paths of all the items # we need to update: all_paths = set([item["path"] for item in items]) try: fields = ["id", "path", "version_number"] found_publishes = sgtk.util.find_publish(self.parent.sgtk, all_paths, fields=fields) except TankError as e: raise TankError("Failed to query publishes from Shotgun: %s" % e) # now we have all the info we need to update geometry: for item in items: publish_path = item["path"] geo_name = item["node"] # find the publish details: sg_publish_data = found_publishes.get(publish_path) if not sg_publish_data: raise TankError( "Failed to find Shotgun publish record for '%s'" % publish_path) # find geo in project: geo = mari.geo.find(geo_name) if not geo: raise TankError( "Failed to find geometry '%s' in the current project" % geo_name) # check to see if this version is already loaded: already_loaded = False all_geo_versions = mari_engine.list_geometry_versions(geo) for geo_version, path in [(v["geo_version"], v.get("path")) for v in all_geo_versions]: if path == publish_path: # we already have this version laoded so just set it as current: geo.setCurrentVersion(geo_version.name()) already_loaded = True break if not already_loaded: # add the new version: new_version = mari_engine.add_geometry_version( geo, sg_publish_data, options) if new_version: geo.setCurrentVersion(new_version.name())
def _launch_callback(self, menu_name, app_engine, app_path, app_args, version=None, file_to_open=None): """ Default method to launch DCC application command based on the current context. :param menu_name: Menu name displayed to launch this DCC. :param app_engine: The TK engine associated with the DCC to be launched. :param app_path: Full path to the DCC. May contain environment variables and/or the locally supported {version}, {v0}, {v1}, ... variables. :param app_args: Args string to pass to the DCC at launch time. :param version: (Optional) Specific version of DCC to launch. Used to parse {version}, {v0}, {v1}, ... information from. """ # Verify a Project is defined in the context. if self._tk_app.context.project is None: raise TankError( "Your context does not have a project defined. Cannot continue." ) # Extract an entity type and id from the context. entity_type = self._tk_app.context.project["type"] entity_id = self._tk_app.context.project["id"] # if there is an entity then that takes precedence if self._tk_app.context.entity: entity_type = self._tk_app.context.entity["type"] entity_id = self._tk_app.context.entity["id"] # and if there is a task that is even better if self._tk_app.context.task: entity_type = self._tk_app.context.task["type"] entity_id = self._tk_app.context.task["id"] if len(self._tk_app.sgtk.roots) == 0: # configuration doesn't have any filesystem roots defined self._tk_app.log_debug( "Configuration does not have any filesystem roots defined. " "Skipping folder creation.") else: # Do the folder creation. If there is a specific defer keyword, # this takes precedence. Otherwise, use the engine name for the # DCC application by default. defer_keyword = self._tk_app.get_setting( "defer_keyword") or app_engine try: self._tk_app.log_debug( "Creating folders for %s %s. Defer keyword: '%s'" % (entity_type, entity_id, defer_keyword)) self._tk_app.sgtk.create_filesystem_structure( entity_type, entity_id, engine=defer_keyword) except sgtk.TankError, err: raise TankError( "Could not create folders on disk. Error reported: %s" % err)
def connect(self, allow_ui=True, user=None, password=None, workspace=None): """ Utility method that returns a connection using the current configuration. If a connection can't be established and the user is in ui mode then they will be prompted to edit the connection details. :param allow_ui: If True and the engine can show UI then we can prompt the user through dialogs if needed. If False then UI is not allowed so this method will just raise if it can't connect for some reason. :param user: The username of the user to use when connecting with the Perforce server :param password: The password for the specified user to use when connecting to the server :param workspace: The name of the workspace/client-spec to use for this user when connecting to the server :returns: A new connected P4 instance if successful or None if the user cancels. :raises: TankError if connecting failed for some reason other than the user cancelling. """ server = self._fw.get_setting("server") if not user: sg_user = sgtk.util.get_current_user(self._fw.sgtk) user = self._fw.execute_hook("hook_get_perforce_user", sg_user=sg_user) if not user: raise TankError( "Perforce: Failed to find Perforce user for Shotgun user '%s'" % (sg_user if sg_user else "<unknown>")) workspace = workspace if workspace != None else self._get_current_workspace( ) # lock around attempting to connect so that only one thread will attempt # to connect at a time. global _g_connection_lock _g_connection_lock.acquire() try: # first, attempt to connect to the server: try: self.connect_to_server() except SgtkP4Error, e: raise TankError( "Perforce: Failed to connect to perforce server '%s' - %s" % (server, e)) # then ensure that the connection is trusted: try: is_trusted, show_details = self._ensure_connection_is_trusted( allow_ui) if show_details: # switch to connection dialog - raise a TankError here which will get # raised if we aren't able to show the connection details dialog raise TankError( "Perforce: Failed to establish trust with server!") elif not is_trusted: # user decided not to trust!: return None except SgtkP4Error, e: raise TankError( "Perforce: Connection to server '%s' is not trusted: %s" % (server, e))
def _generate_path(self, env, name, version, use_next_version, ext, require_path=False): """ :returns: Tuple containing (path, min_version) :raises: Error if something goes wrong! """ app = sgtk.platform.current_bundle() # first make sure the environment is complete: if not env or not env.context: raise TankError("Please select a work area to save into.") templates = env.get_missing_templates() if templates: raise MissingTemplatesError(templates) # build the fields dictionary from the environment: fields = {} name_is_used = "name" in env.work_template.keys if name_is_used: if not env.work_template.is_optional("name") and not name: raise TankError("Name is required, please enter a valid name!") if name: if not env.work_template.keys["name"].validate(name): raise TankError("Name contains illegal characters!") fields["name"] = name ext_is_used = "extension" in env.work_template.keys if ext_is_used and ext != None: fields["extension"] = ext # query the context fields: ctx_fields = {} try: ctx_fields = env.context.as_template_fields(env.work_template, validate=True) fields = dict(chain(fields.iteritems(), ctx_fields.iteritems())) except TankError, e: app.log_debug("Unable to generate preview path: %s" % e) if require_path: # log the original exception (useful for tracking down the problem) app.log_exception("Unable to resolve template fields!") # and raise a new, clearer exception for this specific use case: raise TankError( "Unable to resolve template fields! This could mean there is a mismatch " "between your folder schema and templates. Please email " "[email protected] if you need help fixing this." ) # it's ok not to have a path preview at this point! return {}
def create_project(self, name, sg_publishes, channels_to_create, channels_to_import, project_meta_options, objects_to_load): """ Wraps the Mari ProjectManager.create() method and additionally tags newly created project and all loaded geometry & versions with Shotgun specific metadata. See Mari API documentation for more information on ProjectManager.create(). :param name: [Mari arg] - The name to use for the new project :param sg_publishes: A list of publishes to load into the new project. At least one publish must be specified! :param channels_to_create: [Mari arg] - A list of channels to create for geometry in the new project :param channels_to_import: [Mari arg] - A list of channels to import for geometry in the new project :param project_meta_options: [Mari arg] - A dictionary of project creation meta options - these are typically the mesh options used when loading the geometry :param objects_to_load: [Mari arg] - A list of objects to load from the files :returns: The newly created Project instance """ engine = sgtk.platform.current_bundle() # make sure that a project with this name doesn't already exist: if name in mari.projects.names(): raise TankError("A project called '%s' already exists!" % name) # ensure at least one publish was specified: if not sg_publishes: raise TankError( "Must specify at least one valid geometry publish to create a new project with!" ) # ensure that all sg_publishes contain the information we need: update_publish_records(sg_publishes) # extract the file path for the first publish: # (TODO) - move this to use a centralized method in core publish_path = sg_publishes[0].get("path", {}).get("local_path") if not publish_path or not os.path.exists(publish_path): raise TankError("Publish '%s' couldn't be found on disk!" % publish_path) # close existing project if it's open: if mari.projects.current(): mari.projects.close() if mari.projects.current(): # the user cancelled and the project wasn't closed return # create the project with the first geometry specified: try: engine.log_debug("Creating a new project called: %s" % name) mari.projects.create(name, publish_path, channels_to_create, channels_to_import, project_meta_options, objects_to_load) except Exception, e: raise TankError("Failed to create new project: %s" % e)
def _prepare_flame_flare_launch(engine_name, context, app_path, app_args): """ Flame specific pre-launch environment setup. :param engine_name: The name of the engine being launched (tk-flame or tk-flare) :param context: The context that the application is being launched in :param app_path: Path to DCC executable or launch script :param app_args: External app arguments :returns: Tuple (app_path, app_args) Potentially modified app_path or app_args value, depending on preparation requirements for flame. """ # Retrieve the TK Application instance from the current bundle tk_app = sgtk.platform.current_bundle() # find the path to the engine on disk where the startup script can be found: engine_path = sgtk.platform.get_engine_path(engine_name, tk_app.sgtk, context) if engine_path is None: raise TankError("Path to '%s' engine could not be found." % engine_name) # find bootstrap file located in the engine and load that up startup_path = os.path.join(engine_path, "python", "startup", "bootstrap.py") if not os.path.exists(startup_path): raise Exception("Cannot find bootstrap script '%s'" % startup_path) python_path = os.path.dirname(startup_path) # add our bootstrap location to the pythonpath sys.path.insert(0, python_path) try: import bootstrap (app_path, new_args) = bootstrap.bootstrap(engine_name, context, app_path, app_args) except Exception as e: tk_app.log_exception("Error executing engine bootstrap script.") if tk_app.engine.has_ui: # got UI support. Launch dialog with nice message not_found_dialog = tk_app.import_module("not_found_dialog") not_found_dialog.show_generic_error_dialog(tk_app, str(e)) raise TankError( "Error executing bootstrap script. Please see log for details.") finally: # remove bootstrap from sys.path sys.path.pop(0) return (app_path, new_args)
def execute(self, **kwargs): """ Main hook entry point :returns: A list of any items that were found to be published. Each item in the list should be a dictionary containing the following keys: { type: String This should match a scene_item_type defined in one of the outputs in the configuration and is used to determine the outputs that should be published for the item name: String Name to use for the item in the UI description: String Description of the item to use in the UI selected: Bool Initial selected state of item in the UI. Items are selected by default. required: Bool Required state of item in the UI. If True then item will not be deselectable. Items are not required by default. other_params: Dictionary Optional dictionary that will be passed to the pre-publish and publish hooks } """ items = [] adobe = self.parent.engine.adobe # get the main scene: try: doc = adobe.app.activeDocument except RuntimeError: raise TankError("There is no active document!") try: scene_path = doc.fullName.fsName except RuntimeError: raise TankError("Please save your file before publishing!") name = os.path.basename(scene_path) # create the primary item - this will match the primary output 'scene_item_type': items.append({"type": "work_file", "name": name}) return items
def _get_publish_symlink_path(self, item, task_settings): """ Get a publish symlink path for the supplied item. :param item: The item to determine the publish type for :return: A string representing the symlink path to supply when registering a publish for the supplied item Extracts the publish symlink path via the configured publish templates if possible. """ publisher = self.parent # Start with the item's fields fields = copy.copy(item.properties.get("fields", {})) publish_symlink_template = task_settings.get( "publish_symlink_template") publish_symlink_path = None # If a template is defined, get the publish symlink path from it if publish_symlink_template: pub_symlink_tmpl = publisher.get_template_by_name( publish_symlink_template) if not pub_symlink_tmpl: # this template was not found in the template config! raise TankError("The Template '%s' does not exist!" % publish_symlink_template) # First get the fields from the context try: fields.update( item.context.as_template_fields(pub_symlink_tmpl)) except TankError, e: self.logger.debug( "Unable to get context fields for publish_symlink_template." ) missing_keys = pub_symlink_tmpl.missing_keys(fields, True) if missing_keys: raise TankError( "Cannot resolve publish_symlink_template (%s). Missing keys: %s" % (publish_symlink_template, pprint.pformat(missing_keys))) # Apply fields to publish_symlink_template to get publish symlink path publish_symlink_path = pub_symlink_tmpl.apply_fields(fields) self.logger.debug( "Used publish_symlink_template to determine the publish path: %s" % (publish_symlink_path, ))
def _get_publish_linked_entity_name(self, item, task_settings): """ Get the linked entity name for the supplied item. :param item: The item to determine the publish version for """ publisher = self.parent # Start with the item's fields fields = copy.copy(item.properties.get("fields", {})) publish_linked_entity_name_template = task_settings.get( "publish_linked_entity_name_template") publish_linked_entity_name = None # check if we have a publish_linked_entity_name_template defined if publish_linked_entity_name_template: pub_linked_entity_name_tmpl = publisher.get_template_by_name( publish_linked_entity_name_template) if not pub_linked_entity_name_tmpl: # this template was not found in the template config! raise TankError("The Template '%s' does not exist!" % publish_linked_entity_name_template) # First get the fields from the context try: fields.update( item.context.as_template_fields( pub_linked_entity_name_tmpl)) except TankError, e: self.logger.debug( "Unable to get context fields for publish_linked_entity_name_template." ) missing_keys = pub_linked_entity_name_tmpl.missing_keys( fields, True) if missing_keys: raise TankError( "Cannot resolve publish_linked_entity_name_template (%s). Missing keys: %s" % (publish_linked_entity_name_template, pprint.pformat(missing_keys))) publish_linked_entity_name = pub_linked_entity_name_tmpl.apply_fields( fields) self.logger.debug( "Retrieved publish_linked_entity_name via publish_linked_entity_name_template." )
def _get_hook_value(self, method_name, hook_key): """ Validate that value is correct and return it """ if method_name not in self._hook_data: raise TankError("Unknown shotgun_fields hook method %s" % method_name) data = self._hook_data[method_name] if hook_key not in data: raise TankError("Hook shotgun_fields.%s does not return " "required dictionary key '%s'!" % (method_name, hook_key)) return data[hook_key]
def _on_save(self): """ """ app = sgtk.platform.current_bundle() if not self._current_env: return # generate the path to save to and do any pre-save preparation: path_to_save = "" try: # create folders if needed: try: SaveAsFileAction.create_folders_if_needed( self._current_env.context, self._current_env.work_template) except TankError, e: app.log_exception( "File Save - failed to create folders for context '%s'!" % self._current_env.context) raise TankError( "Failed to create folders for context '%s' - %s" % (self._current_env.context, e)) # get the name, version and extension from the UI: name = value_to_str(self._ui.name_edit.text()) version = self._ui.version_spinner.value() use_next_version = self._ui.use_next_available_cb.isChecked() ext_idx = self._ui.file_type_menu.currentIndex() ext = self._extension_choices[ext_idx] if ext_idx >= 0 else "" # now attempt to generate the path to save to: version_to_save = None try: # try to generate a path from these details: result = self._generate_path(self._current_env, name, version, use_next_version, ext, require_path=True) path_to_save = result.get("path") if not path_to_save: raise TankError("Path generation returned an empty path!") version_to_save = result.get("version") except TankError, e: app.log_exception( "File Save - failed to generate path to save to!") raise TankError("Failed to generate a path to save to - %s" % e)
def execute(self, path, context, associated_entity, **kwargs): """ Launches the associated app and starts tank. :param path: full path to the published file :param context: context object representing the publish :param associated_entity: same as context.entity """ status = False if context is None: raise TankError("Context cannot be None!") ######################################################################## # Example implementation below: if path.endswith(".nk"): # nuke status = True self._do_launch("launchnuke", "tk-nuke", path, context) elif path.endswith(".ma") or path.endswith(".mb"): # maya status = True self._do_launch("launchmaya", "tk-maya", path, context) elif path.endswith(".fbx"): # Motionbuilder status = True self._do_launch("launchmotionbuilder", "tk-motionbuilder", path, context) elif path.endswith(".hrox"): # Hiero status = True self._do_launch("launchhiero", "tk-hiero", path, context) elif path.endswith(".max"): # 3ds Max status = True self._do_launch("launch3dsmax", "tk-3dsmaxplus", path, context) elif path.endswith(".psd") or path.endswith(".psb"): # Photoshop status = True self._do_launch("launchphotoshop", "tk-photoshopcc", path, context) elif path.endswith(".aep"): # Photoshop status = True self._do_launch("launchaftereffects", "tk-aftereffects", path, context) elif path.endswith(".hip"): # Photoshop status = True self._do_launch("launchhoudini", "tk-houdini", path, context) # return an indication to the app whether we launched or not # if we return True here, the app will just exit # if we return False, the app may try other ways to launch the file. return status
def get_export_template(self, settings, item): """ Retrieves and and validates the export template from the settings. :param settings: Dictionary of Settings. The keys are strings, matching the keys returned in the settings property. The values are `Setting` instances. :param item: Item to process :returns: A template representing the export path of the item or None if no template could be identified. """ publisher = self.parent export_template = item.get_property("export_template") if export_template: return export_template export_template = None export_template_setting = settings.get("Export Template") if export_template_setting and export_template_setting.value: export_template = publisher.engine.get_template_by_name( export_template_setting.value) if not export_template: raise TankError( "Missing Export Template in templates.yml: %s " % export_template_setting.value) # cache it for later use item.properties["export_template"] = export_template return export_template
def execute(self, operation, file_path, **kwargs): """ Main hook entry point :operation: String Scene operation to perform :file_path: String File path to use if the operation requires it (e.g. open) :returns: Depends on operation: 'current_path' - Return the current scene file path as a String all others - None """ adobe = self.parent.engine.adobe if operation == "current_path": file_obj = adobe.app.project.file if file_obj != None: return file_obj.fsName raise TankError("The active document must be saved!") elif operation == "open": adobe.app.project.close(adobe.CloseOptions.DO_NOT_SAVE_CHANGES) adobe.app.open(adobe.File(file_path)) elif operation == "save": # save the current script adobe.app.project.save()
def _submit(self): """ Creates items in Shotgun and clears the widget. """ if self.ui.text_entry.toPlainText() == "": QtGui.QMessageBox.information(self, "Please Add Note", "Please add some content before submitting.") return # Call our pre-submit callback if we have one registered. if self.pre_submit_callback: self.pre_submit_callback(self) # hide hint label for better ux. self.ui.hint_label.hide() self.__overlay.start_spin() # get all publish details from the UI # and submit an async request data = {} data["pixmap"] = self._pixmap data["text"] = self.ui.text_entry.toPlainText() data["recipient_links"] = self.ui.text_entry.get_recipient_links() data["entity"] = {"id": self._entity_id, "type": self._entity_type } data["project"] = self._bundle.context.project data["attachments"] = self._attachments # ask the data retriever to execute an async callback if self.__sg_data_retriever: self._processing_id = self.__sg_data_retriever.execute_method(self._async_submit, data) else: raise TankError("Please associate this class with a background task processor.")
def _on_browse_for_publishes(self, new_project_form): """ Called when the user clicks the 'Add Publishes' button in the new project form. Opens the loader so that the user can select a publish to be loaded into the new project. :param new_project_form: The new project form that the button was clicked in """ loader_app = self._app.engine.apps.get("tk-multi-loader2") if not loader_app: raise TankError( "The tk-multi-loader2 app needs to be available to browse for publishes!" ) # browse for publishes: publish_types = self._app.get_setting("publish_types") selected_publishes = loader_app.open_publish( "Select Published Geometry", "Select", publish_types) # make sure we keep this list of publishes unique: current_ids = set([p["id"] for p in self.__new_project_publishes]) for sg_publish in selected_publishes: publish_id = sg_publish.get("id") if publish_id != None and publish_id not in current_ids: current_ids.add(publish_id) self.__new_project_publishes.append(sg_publish) # update new project form with selected geometry: new_project_form.update_publishes(self.__new_project_publishes)
def _validate_hiero_export_template(self, template_str): """ Validate that a template_str only contains Hiero substitution keywords or custom keywords created via the resolve_custom_strings hook. """ # build list of valid tokens custom_substitution_keywords = [ x["keyword"] for x in self.get_setting("custom_template_fields") ] valid_substitution_keywords = (HIERO_SUBSTITUTION_KEYWORDS + custom_substitution_keywords) hiero_resolver_tokens = [ "{%s}" % x for x in valid_substitution_keywords ] # replace all tokens we know about in the template for x in hiero_resolver_tokens: template_str = template_str.replace(x, "") # find any remaining {xyz} tokens in the template regex = r"(?<={)[a-zA-Z_ 0-9]+(?=})" key_names = re.findall(regex, template_str) if len(key_names) > 0: raise TankError( "The configuration template '%s' contains keywords %s which are " "not recognized by Hiero. Either remove them from the sgtk template " "or adjust the hook that converts a template to a hiero export " "path to convert these fields into fixed strings or hiero " "substitution tokens." % (template_str, ",".join(key_names)))
def backburner_upload_quicktime(self, full_path, sg_version_id): """ This method is called via backburner and therefore runs in the background. It uploads the quicktime to the version """ if not os.path.exists(full_path): raise TankError("Cannot find quicktime '%s'! Aborting upload." % full_path) self.log_debug("Begin Shotgun processing for %s..." % full_path) self.log_debug("File size is %s bytes." % os.path.getsize(full_path)) # upload quicktime to Shotgun if self.get_setting("bypass_shotgun_transcoding"): self.log_debug( "Begin upload of explicit mp4 quicktime to Shotgun...") field_name = "sg_uploaded_movie_mp4" else: self.log_debug("Begin upload of quicktime to Shotgun...") field_name = "sg_uploaded_movie" self.shotgun.upload("Version", sg_version_id, full_path, field_name) self.log_debug("Upload complete!") # clean up try: self.log_debug("Trying to remove temporary quicktime file...") os.remove(full_path) self.log_debug("Temporary quicktime file successfully deleted.") except Exception, e: self.log_warning("Could not remove temporary file '%s': %s" % (full_path, e))
def _get_work_file_path(self, item, task_settings): """ Get a work file path for the supplied item. :param item: The item to determine the work file path for :return: A string representing the output path to supply when registering a work file for the supplied item Extracts the work file path via the configured work_path_template. """ publisher = self.parent fields = {} work_path_template = task_settings.get("work_path_template") if not work_path_template: self.logger.info( "work_path_template not defined. Skipping conform.") return item.properties.path work_tmpl = publisher.get_template_by_name(work_path_template) if not work_tmpl: # this template was not found in the template config! raise TankError("The Template '%s' does not exist!" % work_path_template) # First get the fields from the context try: fields = item.context.as_template_fields(work_tmpl, validate=True) except TankError, e: self.logger.debug( "Unable to get context fields from work_path_template.")
def _accept_work_path(self, item, work_path_template): """ Compares the item's path with the input work_path_template. If the template is not defined or the template and the path match, we do not accept. If the path and the template do not match, then we accept the plugin. """ path = item.properties.path publisher = self.parent if not work_path_template: self.logger.error("No work_path_template defined for item: '%s'" % item.name) return False tmpl = publisher.get_template_by_name(work_path_template) if not tmpl: # this template was not found in the template config! raise TankError("The Template '%s' does not exist!" % work_path_template) # If path doesn't match this template, then we should accept this plugin if not tmpl.validate(path): return True return False
def _login_user(self, user, parent_widget=None): """ Log-in the specified Perforce user if required. """ if not self._p4 or not self._p4.connected(): raise TankError( "Unable to log user in without an open Perforce connection!") self._p4.user = str(user) login_req = self._login_required() if login_req: logged_in, _ = self._do_login(True, parent_widget) if not logged_in: raise TankError("Unable to login user %s without a password!" % user)
def quicktime_path_from_render_path(self, render_path): """ Given a render path, generate a quicktime path. This will break up the render path in template fields given by the render template and then use those fields to create a quicktime path. Note! This method means that the fields of the quicktime path need to be a subset of the fields available via the render path. This is because we don't always have access to the raw metadata fields that were originally used to compose the render path; for example when the batch render hooks trigger, all we have access to is the raw render path. :path render_path: A render path associated with this preset :returns: Path to a quicktime, resolved via the quicktime template """ if self.get_quicktime_template() is None: raise TankError("%s: Cannot evaluate quicktime path because no " "quicktime template has been defined." % self) render_template = self.get_render_template() fields = render_template.get_fields(render_path) # plug in the fields into the quicktime template quicktime_template = self.get_quicktime_template() return quicktime_template.apply_fields(fields)
def _show_in_fs(self, path): """ """ # find the deepest path that actually exists: while path and not os.path.exists(path): path = os.path.dirname(path) if not path: return # ensure the slashes are correct: path = path.replace("/", os.path.sep) # build the command: if sys.platform == "linux2": # TODO - figure out how to open the parent and select the file/path if os.path.isfile(path): path = os.path.dirname(path) cmd = "xdg-open \"%s\"" % path elif sys.platform.startswith("darwin"): cmd = "open -R \"%s\"" % path elif sys.platform == "win32": # TODO - figure out how to open the parent and select the file/path if os.path.isfile(path): path = os.path.dirname(path) cmd = "cmd.exe /C start \"Folder\" \"%s\"" % path else: raise TankError("Platform '%s' is not supported." % system) # run the command: exit_code = os.system(cmd) if exit_code != 0: self._app.log_error("Failed to launch '%s'!" % cmd)