def get_output_nodes(self): """returns the rop nodes in the scene """ ropContext = hou.node('/out') # get the children outNodes = ropContext.children() exclude_node_types = [ hou.nodeType(hou.nodeTypeCategories()["Driver"], "wedge") ] # remove nodes in type in exclude_node_types list new_out_nodes = [node for node in outNodes if node.type() not in exclude_node_types] return new_out_nodes
def test_typeSetDefaultIcon(self): nodeType = hou.nodeType(hou.objNodeTypeCategory(), "geo") nodeType.setIcon("SOP_box") nodeType.setDefaultIcon() self.assertEqual(nodeType.icon(), "OBJ_geo")
def get_all_tk_alembic_nodes(cls): """ Returns a list of all tk-houdini-alembicnode instances in the current session. """ tk_node_type = TkAlembicNodeHandler.TK_ALEMBIC_NODE_TYPE # get all instances of tk alembic rop/sop nodes tk_alembic_nodes = [] tk_alembic_nodes.extend( hou.nodeType(hou.sopNodeTypeCategory(), tk_node_type).instances()) tk_alembic_nodes.extend( hou.nodeType(hou.ropNodeTypeCategory(), tk_node_type).instances()) return tk_alembic_nodes
def get_nodes(): """ Returns a list of all SGTK Mantra nodes. :returns: All SGTK Mantra nodes. :rtype: List """ node_class = ToolkitMantraNodeHandler.SG_NODE_CLASS rop_nodes = hou.nodeType(hou.ropNodeTypeCategory(), node_class).instances() return rop_nodes
def collect_node_outputs(self, parent_item): """ Creates items for known output nodes :param parent_item: Parent Item instance """ for node_category in _HOUDINI_OUTPUTS: for node_type in _HOUDINI_OUTPUTS[node_category]: if node_type == "alembic" and self._alembic_nodes_collected: self.logger.debug( "Skipping regular alembic node collection since tk " "alembic nodes were collected. " ) continue if node_type == "ifd" and self._mantra_nodes_collected: self.logger.debug( "Skipping regular mantra node collection since tk " "mantra nodes were collected. " ) continue path_parm_name = _HOUDINI_OUTPUTS[node_category][node_type] # get all the nodes for the category and type nodes = hou.nodeType(node_category, node_type).instances() # iterate over each node for node in nodes: # get the evaluated path parm value path = node.parm(path_parm_name).eval() # ensure the output path exists if not os.path.exists(path): continue self.logger.info( "Processing %s node: %s" % (node_type, node.path())) # allow the base class to collect and create the item. it # should know how to handle the output path item = super(HoudiniSessionCollector, self)._collect_file( parent_item, path, frame_sequence=True ) # the item has been created. update the display name to # include the node path to make it clear to the user how it # was collected within the current session. item.name = "%s (%s)" % (item.name, node.path())
def process(self, context): node_type = hou.nodeType(hou.ropNodeTypeCategory(), 'alembic') abc_nodes = node_type.instances() for node in list(abc_nodes): instance = context.create_instance(name=node.name()) instance.set_data('family', value='cache') instance.set_data('path', value=node.path()) instance.add(node)
def process(self, context): node_type = hou.nodeType('Object/cam') cam_nodes = node_type.instances() for node in list(cam_nodes): if 'ipr' not in node.name(): instance = context.create_instance(name=node.name()) instance.set_data('family', value='camera') instance.set_data('path', value=node.path()) instance.add(node)
def findSelectedMantraNodes(): """Find any currently selected Mantra (ifd) nodes.""" nodes = hou.selectedNodes() mantra_type = hou.nodeType("Driver/ifd") nodes = [node for node in nodes if node.type() == mantra_type] if not nodes: hou.ui.displayMessage( "No mantra nodes selected.", severity=hou.severityType.Error ) return tuple(nodes)
def _get_alembic_items(self): """Scan the file for tk alembic nodes to potentially publish.""" app = self.parent # see if the alembicnode app is installed alembic_app = app.engine.apps.get("tk-houdini-alembicnode", None) if not alembic_app: app.log_info( "Will not attempt to scan for alembic caches." "The 'tk-houdini-alembicnode' app is not installed." ) return [] # get all the tk alembic nodes in the scene tk_alembic_nodes = hou.nodeType(hou.ropNodeTypeCategory(), "sgtk_alembic").instances() alembic_items = [] # add all tk alembic nodes to the list of secondary publish items. for tk_alembic_node in tk_alembic_nodes: is_bypassed = tk_alembic_node.isBypassed() out_path_parm = tk_alembic_node.parm("filename") out_path = out_path_parm.menuLabels()[out_path_parm.eval()] # normalize the path out_path = os.path.normpath(out_path) # only select the item if the path exists and the node is not # bypassed should_select = out_path and os.path.exists(out_path) and \ not is_bypassed alembic_items.append({ "name": tk_alembic_node.name(), "type": "alembic_cache", "description": "Full Path: %s" % (tk_alembic_node.path(),), "selected": should_select, "other_params": { "path": out_path, "node": tk_alembic_node, }, }) return alembic_items
def _get_hdadef(table, scopeop, ns, name, version): import hou # HOM doesn't like unicode :( scopeop = str(scopeop) if scopeop else None ns = str(ns) if ns else None name = str(name) version = str(version) if version else '' fullname = hou.hda.fullNodeTypeNameFromComponents( scopeop, ns, name, version ) type_cat = hou.nodeTypeCategories()[table] nodetype = hou.nodeType(type_cat, str(fullname)) if nodetype: return nodetype.definition()
def applyLocalOverrides(objnode): """ Promote all material parameters from the Material SHOP to the node. """ # Get the path to the material. material_path = objnode.evalParm("shop_materialpath") # Try to find the node. material_node = objnode.node(material_path) # The node type for the SHOP material node. material_type = hou.nodeType(hou.shopNodeTypeCategory(), "vopmaterial") # If the material node doesn't exist we need to throw an error. if not material_node: msg = "{0} is not a valid node.".format(material_path) raise hou.OperationFailed(msg) # If the material node is not actually a material we need to throw # an error. if material_node.type() != material_type: msg = "{0} is not a material.".format(material_path) raise hou.OperationFailed(msg) # Grab the parm template groups for the material and object. material_template_group = material_node.parmTemplateGroup() template_group = objnode.parmTemplateGroup() # We need to loop over every entry in the material group and apply the # 'material_spare' tag with a value of '1'. for template in material_template_group.entriesWithoutFolders(): # Grab any existing tags. tags = template.tags() # Apply the tag to signify it is a material spare property. tags["material_spare"] = "1" # Set the tags back to the template. template.setTags(tags) # Replace the original template with the modified one. material_template_group.replace(template.name(), template) # Find the folder to place the parameters into. # DOESN'T WORK with appendToFolder for some reason #material_folder = template_group.findFolder("Material") # For each entry in the template group, append it to the material # folder. for template in material_template_group.entries(): template_group.appendToFolder(template_group.findFolder("Material"), template) # Set the parm template group with the new parms to the object. objnode.setParmTemplateGroup(template_group)
def get_node_instances(self, type_name = ADDITIONAL_AOVS_NODE_TYPE): """ Return list of all node instances in the scene with type type_name. """ #node_type node_type = hou.nodeType(hou.vopNodeTypeCategory(), type_name) #check if not (node_type): #log self.logger.debug('Wrong node_type {0}. Could not set aov attributes.'.format(type_name)) return [] #node_instances_list node_instances_list = list(node_type.instances()) #return return node_instances_list
def update_all_node_login(node_type): """Updates login data in all instances of the type. Args: node_type: hou.NodeType, Type of the nodes to be updated """ zync_node_types = dict( zync_render='Driver', zync_sim='Dop', zync_sim_sop='Sop' ) for zync_node_name in zync_node_types.keys(): category_name = zync_node_types[zync_node_name] category = hou.nodeTypeCategories()[category_name] node_type = hou.nodeType(category, zync_node_name) if node_type: for node in node_type.instances(): update_node_login(node)
def scan_scene(self): """ The scan scene method is executed once at startup and its purpose is to analyze the current scene and return a list of references that are to be potentially operated on. The return data structure is a list of dictionaries. Each scene reference that is returned should be represented by a dictionary with three keys: - "node": The name of the 'node' that is to be operated on. Most DCCs have a concept of a node, path or some other way to address a particular object in the scene. - "type": The object type that this is. This is later passed to the update method so that it knows how to handle the object. - "path": Path on disk to the referenced object. Toolkit will scan the list of items, see if any of the objects matches any templates and try to determine if there is a more recent version available. Any such versions are then displayed in the UI as out of date. """ items = [] # get a list of all regular lembic nodes in the file alembic_nodes = hou.nodeType(hou.sopNodeTypeCategory(), "alembic").instances() # return an item for each alembic node found. the breakdown app will check # the paths of each looking for a template match and a newer version. for alembic_node in alembic_nodes: file_parm = alembic_node.parm("fileName") file_path = os.path.normpath(file_parm.eval()) items.append({ "node": alembic_node.path(), "type": "alembic", "path": file_path, }) return items
def test_getMetaSource(self): TARGET = "Scanned Asset Library Directories" node_type = hou.nodeType(hou.sopNodeTypeCategory(), "explodedview") self.assertEqual(node_type.definition().metaSource(), TARGET)
import hou import os import shutil nodes = hou.selectedNodes() geoList = [] fileList = [] fileListDepend = [] alembicList = [] alembicPath = [] alembicArchives = [] filePath = [] geoList = [i for i in nodes if i.type() == hou.nodeType("Object/geo")] fileList = [ i.children() for i in geoList if i.children().type() == hou.nodeType("Sop/file") and i.children().isTimeDependent() == True ] for i in nodes: typeData = i.type() if "Object geo" in typeData: geoList.append(i) for a in i.children(): typeData = str(a.type()) if "Sop file" in typeData: if a.isTimeDependent() == True: fileListDepend.append(a)
def convert_mantra_to_sg_nodes(self): """ Utility function to convert all Mantra nodes to Shotgun Mantra nodes (only converts Mantra nodes that were previously Shotgun Mantra nodes) # Example use: import sgtk eng = sgtk.platform.current_engine() app = eng.apps["tk-houdini-mantranode"] # Convert previously converted Mantra nodes back to # Shotgun Mantra nodes: app.convert_from_write_nodes() """ # get write nodes: nodes = hou.nodeType(hou.ropNodeTypeCategory(), 'ifd').instances() for n in nodes: user_dict = n.userDataDict() profile = user_dict.get('tk_profile_name') if not profile: # can't convert to a Shotgun Mantra Node # as we have missing parameters! continue node_name = n.name() node_pos = n.position() # create new Shotgun Write node: node_class = ToolkitMantraNodeHandler.SG_NODE_CLASS new_sg_n = n.parent().createNode(node_class) # set the profile try: parm = new_sg_n.parm(ToolkitMantraNodeHandler.PARM_CONFIG) index = parm.menuLabels().index(profile) parm.set(index) except ValueError: pass # copy across and knob values from the internal write node. exclude = [] self.__copy_parm_values(n, new_sg_n, exclude) # explicitly copy some settings to the new Shotgun Mantra Node: # AOV Names nums = self.__get_all_extra_plane_numbers(n) for num in nums: parm_name = 'sgtk__aov_name{0}'.format(num) user_data_name = 'tk_aov_name{0}'.format(num) aov_name = user_dict.get(user_data_name) new_sg_n.parm(parm_name).set(aov_name) # Copy inputs and move outputs self.__copy_inputs_to_node(n, new_sg_n) self.__move_outputs_to_node(n, new_sg_n) self.__copy_color(n, new_sg_n) # delete original node: n.destroy() # rename new node: new_sg_n.setName(node_name) new_sg_n.setPosition(node_pos)
def test_typeIsSubnet(self): nodeType = hou.nodeType(hou.objNodeTypeCategory(), "subnet") self.assertTrue(nodeType.isSubnetType())
def _all_job_nodes(): return hou.nodeType( hou.ropNodeTypeCategory(), "conductor::job::0.1").instances()
import hou selection = hou.selectedNodes() names = [n.name() for n in selection] my_list = "" if len(selection) < 1 : my_list = "nothing selected, please select at least one object" else: a=1 for sel in selection: my_list = my_list + "\n" + "selected object " + str(a) + ":" + sel.name() a=a+1 hou.ui.displayMessage(my_list, buttons=('OK',)) length = len(selection) print "Number of nodes selected: " + str(length) paths = [n.path() for n in hou.node('/out').allSubChildren() if n.type() == hou.nodeType('Driver/ifd')] selected = hou.ui.selectFromTree(choices=paths) mantras = [hou.node(path) for path in selected] for m in mantras: m.parm('matte_objects').set(' '.join(n.name() for n in hou.selectedNodes())) # CHANGE PARAMETER NAME AS REQURIED print ("Selected mantra nodes : ") + m.name() new_name=' '.join(names) hou.ui.displayMessage("( "+ (new_name) + " ) - added to selected mantra(s) matte objects.", buttons=('*Claps*',))
def convert_to_regular_alembic_nodes(cls, app): """Convert Toolkit Alembic nodes to regular Alembic nodes. :param app: The calling Toolkit Application """ tk_node_type = TkAlembicNodeHandler.TK_ALEMBIC_NODE_TYPE # determine the surface operator type for this class of node sop_types = hou.sopNodeTypeCategory().nodeTypes() sop_type = sop_types[tk_node_type] # determine the render operator type for this class of node rop_types = hou.ropNodeTypeCategory().nodeTypes() rop_type = rop_types[tk_node_type] # get all instances of tk alembic rop/sop nodes tk_alembic_nodes = [] tk_alembic_nodes.extend( hou.nodeType(hou.sopNodeTypeCategory(), tk_node_type).instances()) tk_alembic_nodes.extend( hou.nodeType(hou.ropNodeTypeCategory(), tk_node_type).instances()) if not tk_alembic_nodes: app.log_debug("No Toolkit Alembic Nodes found for conversion.") return # iterate over all the tk alembic nodes and attempt to convert them for tk_alembic_node in tk_alembic_nodes: # determine the corresponding, built-in operator type if tk_alembic_node.type() == sop_type: alembic_operator = cls.HOU_SOP_ALEMBIC_TYPE elif tk_alembic_node.type() == rop_type: alembic_operator = cls.HOU_ROP_ALEMBIC_TYPE else: app.log_warning( "Unknown type for node '%s': %s'" % (tk_alembic_node.name(), tk_alembic_node.type())) continue # create a new, regular Alembic node alembic_node = tk_alembic_node.parent().createNode( alembic_operator) # copy the file parms value to the new node filename = _get_output_menu_label( tk_alembic_node.parm(cls.NODE_OUTPUT_PATH_PARM)) alembic_node.parm(cls.NODE_OUTPUT_PATH_PARM).set(filename) # copy across knob values _copy_parm_values(tk_alembic_node, alembic_node, excludes=[cls.NODE_OUTPUT_PATH_PARM]) # store the alembic output profile name in the user data so that we # can retrieve it later. output_profile_parm = tk_alembic_node.parm( cls.TK_OUTPUT_PROFILE_PARM) tk_output_profile_name = \ output_profile_parm.menuLabels()[output_profile_parm.eval()] alembic_node.setUserData(cls.TK_OUTPUT_PROFILE_NAME_KEY, tk_output_profile_name) # copy the inputs and move the outputs _copy_inputs(tk_alembic_node, alembic_node) if alembic_operator == cls.HOU_SOP_ALEMBIC_TYPE: _save_outputs_to_user_data(tk_alembic_node, alembic_node) elif alembic_operator == cls.HOU_ROP_ALEMBIC_TYPE: _move_outputs(tk_alembic_node, alembic_node) # make the new node the same color alembic_node.setColor(tk_alembic_node.color()) # remember the name and position of the original tk alembic node tk_alembic_node_name = tk_alembic_node.name() tk_alembic_node_pos = tk_alembic_node.position() # destroy the original tk alembic node tk_alembic_node.destroy() # name and reposition the new, regular alembic node to match the # original alembic_node.setName(tk_alembic_node_name) alembic_node.setPosition(tk_alembic_node_pos) app.log_debug("Converted: Tk Alembic node '%s' to Alembic node." % (tk_alembic_node_name, ))
def process(self, context): # storing plugin data plugin_data = {} node_type = hou.nodeType("Driver/ifd") render_nodes = node_type.instances() # creating instances per mantra node for node in list(render_nodes): instance = context.create_instance(name=node.name()) instance.set_data("family", value="deadline.render") instance.set_data("outputPathExpanded", value=node.parm("vm_picture").eval()) # instance.set_data('outputPath', value=node.parm('vm_picture').unexpandedString()) instance.add(node) output = node.parm("vm_picture").eval() output_path = os.path.dirname(output) # setting job data job_data = {} if instance.has_data("deadlineJobData"): job_data = instance.data("deadlineJobData").copy() paddedNumberRegex = re.compile("([0-9]+)", re.IGNORECASE) paddedOutputFile = "" # Check the output file output_file = output matches = paddedNumberRegex.findall(os.path.basename(output_file)) if matches != None and len(matches) > 0: paddingString = matches[len(matches) - 1] paddingSize = len(paddingString) padding = "#" while len(padding) < paddingSize: padding = padding + "#" paddedOutputFile = self.right_replace(output_file, paddingString, padding, 1) job_data["OutputFilename0"] = paddedOutputFile job_data["Plugin"] = "Houdini" # frame range start_frame = int(node.parm("f1").eval()) end_frame = int(node.parm("f2").eval()) frames = "{}-{}\n".format(start_frame, end_frame) instance.set_data("deadlineFrames", value=frames) # setting plugin data plugin_data = plugin_data.copy() plugin_data["OutputDriver"] = node.path() plugin_data["Version"] = "14" plugin_data["IgnoreInputs"] = "0" # setting data data = {"job": job_data, "plugin": plugin_data} instance.set_data("deadlineData", value=data) # adding ftrack data to activate processing instance.set_data("ftrackComponents", value={}) instance.set_data("ftrackAssetType", value="img") instance.add(node)
def getAllCameraNames(): ''' Returns a list of all camera names ''' return [ i.name() for i in hou.nodeType(hou.nodeTypeCategories()['Object'], 'cam').instances() ]
def onStateManagerOpen(self, origin): if platform.system() == "Darwin": origin.menubar.setNativeMenuBar(False) origin.enabledCol = QColor(204, 204, 204) origin.scrollArea.setStyleSheet(hou.qt.styleSheet().replace( "QLabel", "QScrollArea")) origin.f_import.setStyleSheet("QFrame { border: 0px; }") origin.f_export.setStyleSheet("QFrame { border: 0px; }") ssheet = "" ssheet += "QTreeWidget::indicator::checked\n{\n image: url(%s/Plugins/Apps/Houdini/UserInterfaces/checkbox_on.svg);\n}" % self.core.prismRoot ssheet += "QTreeWidget::indicator::unchecked\n{\n image: url(%s/Plugins/Apps/Houdini/UserInterfaces/checkbox_off.svg);\n}" % self.core.prismRoot ssheet += "QTreeWidget::indicator { width: 16px; height: 16px;}" origin.tw_export.setStyleSheet(ssheet) origin.b_stateFromNode.setVisible(True) # origin.b_createDependency.setVisible(True) origin.layout().setContentsMargins(0, 0, 0, 0) origin.b_createExport.setText("Exp") origin.b_createRender.setText("Rnd") origin.b_createPlayblast.setText("Pb") origin.b_createImport.setMinimumWidth(80 * self.core.uiScaleFactor) origin.b_createImport.setMaximumWidth(80 * self.core.uiScaleFactor) origin.b_createExport.setMinimumWidth(55 * self.core.uiScaleFactor) origin.b_createExport.setMaximumWidth(55 * self.core.uiScaleFactor) origin.b_createRender.setMinimumWidth(55 * self.core.uiScaleFactor) origin.b_createRender.setMaximumWidth(55 * self.core.uiScaleFactor) origin.b_createPlayblast.setMinimumWidth(50 * self.core.uiScaleFactor) origin.b_createPlayblast.setMaximumWidth(50 * self.core.uiScaleFactor) origin.b_createDependency.setMinimumWidth(50 * self.core.uiScaleFactor) origin.b_createDependency.setMaximumWidth(50 * self.core.uiScaleFactor) origin.b_stateFromNode.setMinimumWidth(130 * self.core.uiScaleFactor) origin.b_stateFromNode.setMaximumWidth(130 * self.core.uiScaleFactor) origin.b_getRange.setMaximumWidth(200 * self.core.uiScaleFactor) origin.b_setRange.setMaximumWidth(200 * self.core.uiScaleFactor) startframe = hou.playbar.playbackRange()[0] endframe = hou.playbar.playbackRange()[1] origin.sp_rangeStart.setValue(startframe) origin.sp_rangeEnd.setValue(endframe) usdType = hou.nodeType(hou.sopNodeTypeCategory(), "pixar::usdrop") if usdType is not None and ".usd" not in self.plugin.outputFormats: self.plugin.outputFormats.insert(-2, ".usd") elif usdType is None and ".usd" in self.plugin.outputFormats: self.plugin.outputFormats.pop( self.plugin.outputFormats.index(".usd")) rsType = hou.nodeType(hou.sopNodeTypeCategory(), "Redshift_Proxy_Output") if rsType is not None and ".rs" not in self.plugin.outputFormats: self.plugin.outputFormats.insert(-2, ".rs") elif rsType is None and ".rs" in self.plugin.outputFormats: self.plugin.outputFormats.pop( self.plugin.outputFormats.index(".rs"))
def getOutputDriverNames(): ''' Returns a list of all output driver names ''' return [ i.name() for i in hou.nodeType(hou.nodeTypeCategories()['Driver'], 'arnold').instances() ]
def stringToNodes(s: str, hou_parent=None, ne=None, ignore_hdas_if_already_defined=None, force_prefer_hdas=None, override_network_position=None, key=None): """ TODO: here to be a docstring :param s: :param hou_parent: :param ne: :param ignore_hdas_if_already_defined: :param force_prefer_hdas: :override_network_position: hou.Vector2 - position in networkview pane :return: """ if ignore_hdas_if_already_defined is None: ignore_hdas_if_already_defined = True if opt is not None: ignore_hdas_if_already_defined = opt.getOption( 'hpaste.ignore_hdas_if_already_defined', ignore_hdas_if_already_defined) if force_prefer_hdas is None: force_prefer_hdas = False if opt is not None: force_prefer_hdas = opt.getOption('hpaste.force_prefer_hdas', force_prefer_hdas) s = s.encode( 'UTF-8') # ununicode. there should not be any unicode in it anyways try: data = json.loads( bz2.decompress(base64.urlsafe_b64decode(s)).decode('UTF-8')) except Exception as e: raise RuntimeError( "input data is either corrupted or just not a nodecode: " + repr(e)) houver1 = hou.applicationVersion() paste_to_position = ne is not None or override_network_position is not None if hou_parent is None: if ne is None: nes = [ x for x in hou.ui.paneTabs() if x.type() == hou.paneTabType.NetworkEditor and getChildContext(x.pwd(), houver1) == data['context'] ] if len(nes) == 0: nes = [ x for x in hou.ui.paneTabs() if x.type() == hou.paneTabType.NetworkEditor ] if len(nes) == 0: raise RuntimeError( "this snippet has '{0}' context. cannot find opened network editor with context '{0}' to paste in" .format(data['context'])) ne = nes[0] hou_parent = ne.pwd() # check version formatVersion = data['version'] if formatVersion > current_format_version[0]: raise RuntimeError( "unsupported version of data format. Try updating hpaste to the latest version" ) if data.get('version.minor', 0) > current_format_version[1]: print( 'HPaste: Warning!! snippet has later format version than hpaste. Consider updating hpaste to the latest version' ) if data.get('signed', False): print( 'HPaste: Warning!! this snippet seem to be signed, but this version of HPaste has no idea how to check signatures! so signature check will be skipped!' ) # check accepted algtypes supportedAlgs = set() if houver1[0] == 15: supportedAlgs.add(0) supportedAlgs.add(1) supportedAlgs.add(2) # WITH BIG WARNING!!! if houver1[0] >= 16: supportedAlgs.add(0) supportedAlgs.add(1) supportedAlgs.add(2) algtype = data['algtype'] if algtype not in supportedAlgs: raise RuntimeError( "algorithm type is not supported by this houdini version, :( ") # check hou version houver2 = data['houver'] if not opt.getOption('hpaste.ignore_houversion_warning', False) and ( houver1[0] != houver2[0] or houver1[1] != houver2[1]): print( "HPaste: WARNING!! nodes were copied from a different houdini version: " + str(houver2)) # check context context = getChildContext(hou_parent, houver1) if context != data['context']: raise InvalidContextError( hou_parent, data['context'] ) # RuntimeError("this snippet has '%s' context" % data['context']) # check sum code = data['code'].encode('UTF-8') if hashlib.sha1(code).hexdigest() != data['chsum']: raise RuntimeError("checksum failed!") if paste_to_position: if houver1[0] >= 16: olditems = hou_parent.allItems() else: olditems = hou_parent.children() deserialize = getDeserializer(enctype=data.get('encryptionType', None), key=key, **(data.get('encryptionData', None) or {})) # do the work for hdaitem in data.get('hdaList', []): # added in version 2.1 hdacode = deserialize(hdaitem['code']) ntype = hdaitem['type'] ncategory = hdaitem['category'] if ignore_hdas_if_already_defined: nodeType = hou.nodeType(hou.nodeTypeCategories()[ncategory], ntype) if nodeType is not None: # well, that's already a bad sign, means it is installed continue fd, temppath = tempfile.mkstemp() try: with open(temppath, 'wb') as f: f.write(hdacode) for hdadef in hou.hda.definitionsInFile(temppath): hdadef.copyToHDAFile('Embedded') # hdadef.save('Embedded') finally: os.close(fd) if force_prefer_hdas: embhdas = [ x for x in hou.hda.definitionsInFile("Embedded") if (x.nodeType().name() == ntype and x.nodeTypeCategory().name() == ncategory) ] if len(embhdas) == 1: embhdas[0].setIsPreferred(True) # now nodes themselves if formatVersion == 1: code = binascii.a2b_qp(code) elif formatVersion >= 2: code = deserialize(code) else: raise RuntimeError( "Very unexpected format version in a very unexpected place!") load_warnings = [] if algtype == 0: # high security risk!! if hou.isUiAvailable(): ok = hou.ui.displayMessage( "WARNING! The algorithm type used by the pasted snipped is legacy and present HIGH SECURITY RISK!\n be sure you TRUST THE SOURCE of the snippet!", ("CANCEL", "ok"), severity=hou.severityType.Warning, close_choice=0, title="SECURITY WARNING") else: ok = 0 print( "for now u cannot paste SECURITY RISK snippets in non-interactive mode" ) if ok != 1: return exec(code, {}, {'hou': hou, 'hou_parent': hou_parent}) elif algtype == 1 or algtype == 2: # get temp file fd, temppath = tempfile.mkstemp() try: with open(temppath, "wb") as f: f.write(code) try: if algtype == 1: hou_parent.loadChildrenFromFile(temppath) if algtype == 2: try: hou_parent.loadItemsFromFile(temppath) except AttributeError: print( "WARNING!!! your hou version does not support algorithm used for copying, TRYING possibly partly backward-INCOMPATIBLE method!" ) print("CHECK SCENE INTEGRITY") hou_parent.loadChildrenFromFile(temppath) except hou.LoadWarning as e: msg = e.instanceMessage() print(msg) # truncate just for display with random number 253 msgtrunc = False if len(msg) > 253: msgtrunc = True msg = msg[:253] + "..." load_warnings.append("There were warnings during load" + ( "(see console for full message)" if msgtrunc else "") + "\n" + msg) finally: os.close(fd) else: raise RuntimeError( "algorithm type is not supported. Try updating hpaste to the latest version" ) if paste_to_position: # now collect pasted nodes if houver1[0] >= 16: newitems = [x for x in hou_parent.allItems() if x not in olditems] else: newitems = [x for x in hou_parent.children() if x not in olditems] if len(newitems) == 0: return # calc center cpos = hou.Vector2() bbmin = hou.Vector2() bbmax = hou.Vector2() cnt = 0 for item in newitems: cnt += 1 pos = item.position() cpos += pos for i in [0, 1]: if pos[i] > bbmax[i] or cnt == 1: bbmax[i] = pos[i] if pos[i] < bbmin[i] or cnt == 1: bbmin[i] = pos[i] cpos = cpos / cnt cpos[1] = bbmax[1] if override_network_position is None: offset = ne.cursorPosition() - cpos else: offset = override_network_position - cpos for item in newitems: if houver1[0] >= 16 and item.parentNetworkBox() in newitems: continue item.move(offset) if len(load_warnings) > 0: raise RuntimeWarning('snippet loaded with following warnings:\n' + '\n'.join(load_warnings))
def _all_submitter_nodes(): return hou.nodeType( hou.ropNodeTypeCategory(), "conductor::submitter::0.1").instances()
def process(self, context): # storing plugin data plugin_data = {} node_type = hou.nodeType('Driver/ifd') render_nodes = node_type.instances() # creating instances per mantra node for node in list(render_nodes): instance = context.create_instance(name=node.name()) instance.set_data('family', value='deadline.render') instance.set_data('outputPathExpanded', value=node.parm('vm_picture').eval()) # instance.set_data('outputPath', value=node.parm('vm_picture').unexpandedString()) instance.add(node) output = node.parm('vm_picture').eval() output_path = os.path.dirname(output) # setting job data job_data = {} if instance.has_data('deadlineJobData'): job_data = instance.data('deadlineJobData').copy() paddedNumberRegex = re.compile( "([0-9]+)", re.IGNORECASE ) paddedOutputFile = "" # Check the output file output_file = output matches = paddedNumberRegex.findall( os.path.basename( output_file ) ) if matches != None and len( matches ) > 0: paddingString = matches[ len( matches ) - 1 ] paddingSize = len( paddingString ) padding = "#" while len(padding) < paddingSize: padding = padding + "#" paddedOutputFile = self.right_replace( output_file, paddingString, padding, 1 ) job_data['OutputFilename0'] = paddedOutputFile job_data['Plugin'] = 'Houdini' # frame range start_frame = int(node.parm('f1').eval()) end_frame = int(node.parm('f2').eval()) frames = '{}-{}\n'.format(start_frame, end_frame) instance.set_data('deadlineFrames', value=frames) # setting plugin data plugin_data = plugin_data.copy() plugin_data['OutputDriver'] = node.path() plugin_data['Version'] = '14' plugin_data['IgnoreInputs'] = '0' # setting data data = {'job': job_data, 'plugin': plugin_data} instance.set_data('deadlineData', value=data) # adding ftrack data to activate processing instance.set_data('ftrackComponents', value={}) instance.set_data('ftrackAssetType', value='img') instance.add(node)
def importObject(self, taskName=None, objMerge=True): fileName = self.core.getCurrentFileName() impFileName = self.e_file.text().replace("\\", "/") if self.e_file.text() != "": versionInfoPath = os.path.join( os.path.dirname(os.path.dirname(self.e_file.text())), "versioninfo.ini") if os.path.exists(versionInfoPath): vConfig = ConfigParser() vConfig.read(versionInfoPath) if vConfig.has_option("information", "fps"): impFPS = float(vConfig.get("information", "fps")) curFPS = self.core.getFPS() if impFPS != curFPS: fString = "The FPS of the import doesn't match the FPS of the current scene:\n\nCurrent scene FPS:\t%s\nImport FPS:\t\t%s" % ( curFPS, impFPS) msg = QMessageBox(QMessageBox.Warning, "FPS mismatch", fString, QMessageBox.Cancel) msg.addButton("Continue", QMessageBox.YesRole) self.core.parentWindow(msg) action = msg.exec_() if action != 0: return False if taskName is None: vPath = os.path.dirname(self.e_file.text()) if os.path.basename(vPath) in ["centimeter", "meter"]: vName = os.path.basename(os.path.dirname(vPath)) vPath = os.path.dirname(vPath) else: vName = os.path.basename(vPath) if len(vName.split(self.core.filenameSeperator)) == 3 and ( os.path.join( self.core.projectPath, self.core.getConfig( 'paths', "scenes", configPath=self.core.prismIni)).replace( "\\", "/") in self.e_file.text().replace( "\\", "/") or (self.core.useLocalFiles and os.path.join( self.core.localProjectPath, self.core.getConfig( 'paths', "scenes", configPath=self.core.prismIni)).replace("\\", "/") in self.e_file.text().replace("\\", "/"))): taskName = os.path.basename(os.path.dirname(vPath)) if taskName == "_ShotCam": taskName = "ShotCam" else: taskName = vName taskName = taskName.replace("$", "_") self.taskName = taskName parDirName = os.path.basename(os.path.dirname(impFileName)) if parDirName in ["centimeter", "meter"]: prefFile = os.path.join( os.path.dirname(os.path.dirname(impFileName)), self.preferredUnit, os.path.basename(impFileName)) if parDirName == self.unpreferredUnit and os.path.exists( prefFile): impFileName = prefFile self.e_file.setText(impFileName) self.core.callHook("preImport", args={ "prismCore": self.core, "scenefile": fileName, "importfile": impFileName }) try: self.node.path() except: self.node = None self.fileNode = None if os.path.splitext(impFileName)[1] == ".hda": try: self.node.destroy() except: pass if os.path.exists(impFileName): hou.hda.installFile(impFileName) elif self.node is None or self.fileNode is None or not self.chb_updateOnly.isChecked( ) or (self.fileNode is not None and (self.fileNode.type().name() == "alembic") == (os.path.splitext(impFileName)[1] != ".abc") ) or self.node.type().name() == "subnet": if self.node is not None: try: self.node.destroy() except: pass nwBox = hou.node("/obj").findNetworkBox("Import") if nwBox is None: nwBox = hou.node("/obj").createNetworkBox("Import") nwBox.setComment("Imports") #nwBox.setMinimized(True) if os.path.splitext(impFileName)[ 1] == ".abc" and "_ShotCam_" in impFileName: self.fileNode = None self.node = hou.node("/obj").createNode( "alembicarchive", "IMPORT_ShotCam") self.node.parm("fileName").set(impFileName) self.node.parm("buildHierarchy").pressButton() self.node.moveToGoodPosition() else: self.node = hou.node("/obj").createNode( "geo", "IMPORT_" + taskName) self.node.moveToGoodPosition() if len(self.node.children()) > 0: self.node.children()[0].destroy() if os.path.splitext(impFileName)[1] == ".abc": self.fileNode = self.node.createNode("alembic") self.fileNode.moveToGoodPosition() self.fileNode.parm("fileName").set(impFileName) self.fileNode.parm("loadmode").set(1) self.fileNode.parm("groupnames").set(3) elif os.path.splitext(impFileName)[1] == ".fbx": self.node.destroy() tlSettings = [hou.frame()] tlSettings += hou.playbar.playbackRange() self.node = hou.hipFile.importFBX(impFileName)[0] setGobalFrangeExpr = "tset `(%d-1)/$FPS` `%d/$FPS`" % ( tlSettings[1], tlSettings[2]) hou.hscript(setGobalFrangeExpr) hou.playbar.setPlaybackRange(tlSettings[1], tlSettings[2]) hou.setFrame(tlSettings[0]) self.node.setName("IMPORT_" + taskName, unique_name=True) fbxObjs = [ x for x in self.node.children() if x.type().name() == "geo" ] mergeGeo = self.node.createNode("geo", "FBX_Objects") mergeGeo.moveToGoodPosition() if len(mergeGeo.children()) > 0: mergeGeo.children()[0].destroy() self.fileNode = mergeGeo.createNode( "merge", "Merged_Objects") self.fileNode.moveToGoodPosition() for i in fbxObjs: i.setDisplayFlag(False) objmerge = mergeGeo.createNode( "object_merge", i.name()) objmerge.moveToGoodPosition() objmerge.parm("objpath1").set(i.path()) objmerge.parm("xformtype").set(1) self.fileNode.setNextInput(objmerge) mergeGeo.layoutChildren() self.node.layoutChildren() elif os.path.splitext(impFileName)[1] == ".usd": self.fileNode = self.node.createNode( "pixar::usdimport") self.fileNode.moveToGoodPosition() self.fileNode.parm("import_file").set(impFileName) self.fileNode.parm("import_primpath").set("/") self.fileNode.parm("import_time").setExpression("$F") elif os.path.splitext(impFileName)[1] == ".rs": if hou.nodeType(hou.sopNodeTypeCategory(), "Redshift_Proxy_Output") is None: QMessageBox.warning( self.core.messageParent, "ImportFile", "Format is not supported, because Redshift is not available in Houdini." ) if nwBox is not None: if len(nwBox.nodes()) == 0: nwBox.destroy() try: self.node.destroy() except: pass self.fileNode = None return self.fileNode = self.node.createNode( "redshift_proxySOP") self.fileNode.moveToGoodPosition() self.node.setCurrent(True, clear_all_selected=True) hou.hscript("Redshift_objectSpareParameters") self.node.parm("RS_objprop_proxy_enable").set(True) self.node.parm("RS_objprop_proxy_file").set( impFileName) else: self.fileNode = self.node.createNode("file") self.fileNode.moveToGoodPosition() self.fileNode.parm("file").set(impFileName) outNode = self.fileNode.createOutputNode( "null", "OUT_" + taskName) outNode.setDisplayFlag(True) outNode.setRenderFlag(True) nwBox.addNode(self.node) self.node.moveToGoodPosition() nwBox.fitAroundContents() self.node.setDisplayFlag(False) self.node.setColor(hou.Color(0.451, 0.369, 0.796)) if self.chb_autoNameSpaces.isChecked(): self.removeNameSpaces() if objMerge and "outNode" in locals(): self.objMerge() else: prevData = self.node.name().split("IMPORT_") if len(prevData) > 1: prevTaskName = prevData[1] else: prevTaskName = self.node.name() self.node.setName("IMPORT_" + taskName, unique_name=True) for i in self.node.children(): if prevTaskName in i.name(): i.setName(i.name().replace(prevTaskName, taskName), unique_name=True) if os.path.splitext(impFileName)[ 1] == ".abc" and "_ShotCam_" in impFileName: self.node.parm("fileName").set(impFileName) self.node.parm("buildHierarchy").pressButton() else: if os.path.splitext(impFileName)[1] == ".abc": self.fileNode.parm("fileName").set(impFileName) elif os.path.splitext(impFileName)[1] == ".usd": self.fileNode.parm("import_file").set(impFileName) else: self.fileNode.parm("file").set(impFileName) impNodes = [] try: curNode = self.node.path() impNodes.append(curNode) except: pass try: fNode = self.fileNode.path() impNodes.append(fNode) except: pass self.core.callHook("postImport", args={ "prismCore": self.core, "scenefile": fileName, "importfile": impFileName, "importedObjects": impNodes }) self.stateManager.saveImports() self.updateUi() self.stateManager.saveStatesToScene() return True
def importDataFromXmlToAbc(): # select Alembic archive # abc = lastSelectedNode() if abc is None: hou.ui.setStatusMessage("Select one root Alembic Archive node.") hou.ui.displayMessage("No root Alembic Archive node has been selected. Select one.", title="Attention, monsieur!") return None if abc.type() == hou.nodeType(hou.objNodeTypeCategory(), 'alembicarchive'): print "Alembic Archive found" else: print "No Alembic Archive selected" # XML stuff # #file_name = "/home/max/exported_data.xml" hou.ui.setStatusMessage("Choose XML file to import textures from.") file_name = hou.ui.selectFile(start_directory="/mnt/karramba/", title="Choose XML", pattern="*.xml") if file_name == "": return None file_name = os.path.expandvars(file_name) xml = parse(file_name) xmlobjects = xml.getElementsByTagName("object") # parse and assign textures # stat_assigned = 0 stat_noshader = 0 stat_newshader = 0 i = 0 for obj in xmlobjects: object_name = obj.getElementsByTagName("object_name")[0] object_path = object_name.childNodes[0].data #print object_path object_path = object_path.replace('|', '/') print object_path object_fullpath = abc.path() + object_path print object_fullpath #get texture data from XML object_texture_color = obj.getElementsByTagName("texture_color")[0] texture_path_color = object_texture_color.childNodes[0].data object_texture_spec = obj.getElementsByTagName("texture_spec")[0] texture_path_spec = object_texture_spec.childNodes[0].data object_texture_bump = obj.getElementsByTagName("texture_bump")[0] texture_path_bump = object_texture_bump.childNodes[0].data #create shader if needed if hou.parm(str(object_fullpath) + "/shop_materialpath") is None: shader_template = hou.galleries.galleryEntries("mantrasurface")[0] shader[i] = shader_template.createChildNode(hou.node("shop")) shader[i].setName(object_name) stat_newshader = stat_newshader + 1 if hou.parm(str(object_fullpath) + "/shop_materialpath") is not None: object_shader = hou.parm(str(object_fullpath) + "/shop_materialpath").evalAsString() #object_shader = hou.node(str(object_fullpath)).evalParm("shop_materialpath") if hou.node(object_shader) is not None: if hou.node(object_shader).parm("baseColorMap") is not None: hou.node(object_shader).parm("baseColorMap").set(str(texture_path_color)) if hou.node(object_shader).parm("baseSpecMap") is not None: hou.node(object_shader).parm("baseSpecMap").set(str(texture_path_spec)) if hou.node(object_shader).parm("baseDispMap") is not None: hou.node(object_shader).parm("baseDispMap").set(str(texture_path_bump)) stat_assigned = stat_assigned + 1 else: print object_fullpath + ": Shader assigned, but doesn't exist." stat_noshader = stat_noshader + 1 i+=1 print str(stat_assigned) + " textures assigned successfully." print str(stat_newshader) + " shaders created." print str(stat_noshader) + " shaders assigned, but not found."
def convert_back_to_tk_alembic_nodes(cls, app): """Convert Alembic nodes back to Toolkit Alembic nodes. :param app: The calling Toolkit Application Note: only converts nodes that had previously been Toolkit Alembic nodes. """ # get all rop/sop alembic nodes in the session alembic_nodes = [] alembic_nodes.extend( hou.nodeType(hou.sopNodeTypeCategory(), cls.HOU_SOP_ALEMBIC_TYPE).instances()) alembic_nodes.extend( hou.nodeType(hou.ropNodeTypeCategory(), cls.HOU_ROP_ALEMBIC_TYPE).instances()) if not alembic_nodes: app.log_debug("No Alembic Nodes found for conversion.") return # the tk node type we'll be converting to tk_node_type = TkAlembicNodeHandler.TK_ALEMBIC_NODE_TYPE # iterate over all the alembic nodes and attempt to convert them for alembic_node in alembic_nodes: # get the user data dictionary stored on the node user_dict = alembic_node.userDataDict() # get the output_profile from the dictionary tk_output_profile_name = user_dict.get( cls.TK_OUTPUT_PROFILE_NAME_KEY) if not tk_output_profile_name: app.log_warning( "Almbic node '%s' does not have an output profile name. " "Can't convert to Tk Alembic node. Continuing." % (alembic_node.name(), )) continue # create a new, Toolkit Alembic node: tk_alembic_node = alembic_node.parent().createNode(tk_node_type) # find the index of the stored name on the new tk alembic node # and set that item in the menu. try: output_profile_parm = tk_alembic_node.parm( TkAlembicNodeHandler.TK_OUTPUT_PROFILE_PARM) output_profile_index = output_profile_parm.menuLabels().index( tk_output_profile_name) output_profile_parm.set(output_profile_index) except ValueError: app.log_warning("No output profile found named: %s" % (tk_output_profile_name, )) # copy over all parameter values except the output path _copy_parm_values(alembic_node, tk_alembic_node, excludes=[cls.NODE_OUTPUT_PATH_PARM]) # copy the inputs and move the outputs _copy_inputs(alembic_node, tk_alembic_node) # determine the built-in operator type if alembic_node.type().name() == cls.HOU_SOP_ALEMBIC_TYPE: _restore_outputs_from_user_data(alembic_node, tk_alembic_node) elif alembic_node.type().name() == cls.HOU_ROP_ALEMBIC_TYPE: _move_outputs(alembic_node, tk_alembic_node) # make the new node the same color. the profile will set a color, # but do this just in case the user changed the color manually # prior to the conversion. tk_alembic_node.setColor(alembic_node.color()) # remember the name and position of the original alembic node alembic_node_name = alembic_node.name() alembic_node_pos = alembic_node.position() # destroy the original alembic node alembic_node.destroy() # name and reposition the new, regular alembic node to match the # original tk_alembic_node.setName(alembic_node_name) tk_alembic_node.setPosition(alembic_node_pos) app.log_debug("Converted: Alembic node '%s' to TK Alembic node." % (alembic_node_name, ))
def importDataFromXmlToAbc(): # select Alembic archive # abc = lastSelectedNode() if abc is None: hou.ui.setStatusMessage("Select one root Alembic Archive node.") hou.ui.displayMessage("No root Alembic Archive node has been selected. Select one.", title="Attention, monsieur!") return None if abc.type() == hou.nodeType(hou.objNodeTypeCategory(), 'alembicarchive'): print "Alembic Archive found" else: print "No Alembic Archive selected" # XML stuff # #file_name = "/home/max/exported_data.xml" hou.ui.setStatusMessage("Choose XML file to import textures from.") file_name = hou.ui.selectFile(start_directory="/mnt/karramba/", title="Choose XML", pattern="*.xml") if file_name == "": return None file_name = os.path.expandvars(file_name) xml = parse(file_name) xmlobjects = xml.getElementsByTagName("object") # parse and assign textures # stat_assigned = 0 stat_noshader = 0 for obj in xmlobjects: object_name = obj.getElementsByTagName("object_name")[0] object_path = object_name.childNodes[0].data print object_path object_path = object_path.replace('|', '/') print object_path object_fullpath = abc.path() + object_path print object_fullpath object_texture_color = obj.getElementsByTagName("texture_color")[0] texture_path_color = object_texture_color.childNodes[0].data object_texture_spec = obj.getElementsByTagName("texture_spec")[0] texture_path_spec = object_texture_spec.childNodes[0].data object_texture_bump = obj.getElementsByTagName("texture_bump")[0] texture_path_bump = object_texture_bump.childNodes[0].data if hou.parm(str(object_fullpath) + "/shop_materialpath") is not None: object_shader = hou.parm(str(object_fullpath) + "/shop_materialpath").evalAsString() #object_shader = hou.node(str(object_fullpath)).evalParm("shop_materialpath") if hou.node(object_shader) is not None: hou.node(object_shader).parm("baseColorMap").set(str(texture_path_color)) # if hou.node(object_shader).parm("baseSpecMap") is not None: # hou.node(object_shader).parm("baseSpecMap").set(str(texture_path_color)) # else: # continue # if hou.node(object_shader).parm("baseDispMap") is not None: # hou.node(object_shader).parm("baseDispMap").set(str(texture_path_bump)) # else: # continue stat_assigned = stat_assigned + 1 else: print object_fullpath + ": No shader found. Could not assign the texture." stat_noshader = stat_noshader + 1 else: #print "Could not find assigned shader on " + object_fullpath continue print str(stat_assigned) + " textures assigned successfully." print str(stat_noshader) + " destination shaders not found."
def convert_geometry_to_sg_nodes(self): """ Utility function to convert all Geometry nodes to Shotgun Geometry nodes (only converts Geometry nodes that were previously Shotgun Geometry nodes) # Example use: import sgtk eng = sgtk.platform.current_engine() app = eng.apps["tk-houdini-geometrynode"] # Convert previously converted Geometry nodes back to # Shotgun Geometry nodes: app.convert_from_geometry_nodes() """ # get geometry nodes: sop_nodes = hou.nodeType(hou.sopNodeTypeCategory(), 'rop_geometry').instances() rop_nodes = hou.nodeType(hou.ropNodeTypeCategory(), 'geometry').instances() nodes = sop_nodes + rop_nodes for n in nodes: try: user_dict = n.userDataDict() profile = user_dict.get('tk_profile_name') if not profile: # can't convert to a Shotgun Geometry Node # as we have missing parameters! continue # set as selected: # wn.setSelected(True) node_name = n.name() node_pos = n.position() self._app.log_debug('Converting node: {0}'.format(n.name())) self._app.log_debug('path: {0}'.format(n.path())) # create new Shotgun Geometry node: node_class = ToolkitGeometryNodeHandler.SG_NODE_CLASS new_sg_n = n.parent().createNode(node_class) # set the profile try: parm = new_sg_n.parm(ToolkitGeometryNodeHandler.PARM_CONFIG) index = parm.menuLabels().index(profile) parm.set(index) except ValueError: pass # copy across and knob values from the internal geometry node. exclude = ['sopoutput'] self.__copy_parm_values(n, new_sg_n, exclude) # Copy inputs and move outputs self.__copy_inputs_to_node(n, new_sg_n) self.__move_outputs_to_node(n, new_sg_n) self.__move_outputs_from_user_data_to_node(n, new_sg_n) self.__copy_color(n, new_sg_n) # delete original node: n.destroy() # rename new node: new_sg_n.setName(node_name) new_sg_n.setPosition(node_pos) except Exception as err: self._app.log_warning(err) msg = 'Problems converting node: {0}'.format(n.path()) self._app.log_warning(msg)
def convert_to_regular_alembic_nodes(cls, app): """Convert Toolkit Alembic nodes to regular Alembic nodes. :param app: The calling Toolkit Application """ tk_node_type = TkAlembicNodeHandler.TK_ALEMBIC_NODE_TYPE # determine the surface operator type for this class of node sop_types = hou.sopNodeTypeCategory().nodeTypes() sop_type = sop_types[tk_node_type] # determine the render operator type for this class of node rop_types = hou.ropNodeTypeCategory().nodeTypes() rop_type = rop_types[tk_node_type] # get all instances of tk alembic rop/sop nodes tk_alembic_nodes = [] tk_alembic_nodes.extend( hou.nodeType(hou.sopNodeTypeCategory(), tk_node_type).instances()) tk_alembic_nodes.extend( hou.nodeType(hou.ropNodeTypeCategory(), tk_node_type).instances()) if not tk_alembic_nodes: app.log_debug("No Toolkit Alembic Nodes found for conversion.") return # iterate over all the tk alembic nodes and attempt to convert them for tk_alembic_node in tk_alembic_nodes: # determine the corresponding, built-in operator type if tk_alembic_node.type() == sop_type: alembic_operator = cls.HOU_SOP_ALEMBIC_TYPE elif tk_alembic_node.type() == rop_type: alembic_operator = cls.HOU_ROP_ALEMBIC_TYPE else: app.log_warning("Unknown type for node '%s': %s'" % (tk_alembic_node.name(), tk_alembic_node.type())) continue # create a new, regular Alembic node alembic_node = tk_alembic_node.parent().createNode(alembic_operator) # copy the file parms value to the new node filename = _get_output_menu_label( tk_alembic_node.parm(cls.NODE_OUTPUT_PATH_PARM)) alembic_node.parm(cls.NODE_OUTPUT_PATH_PARM).set(filename) # copy across knob values _copy_parm_values(tk_alembic_node, alembic_node, excludes=[cls.NODE_OUTPUT_PATH_PARM]) # store the alembic output profile name in the user data so that we # can retrieve it later. output_profile_parm = tk_alembic_node.parm( cls.TK_OUTPUT_PROFILE_PARM) tk_output_profile_name = \ output_profile_parm.menuLabels()[output_profile_parm.eval()] alembic_node.setUserData(cls.TK_OUTPUT_PROFILE_NAME_KEY, tk_output_profile_name) # copy the inputs and move the outputs _copy_inputs(tk_alembic_node, alembic_node) if alembic_operator == cls.HOU_SOP_ALEMBIC_TYPE: _save_outputs_to_user_data(tk_alembic_node, alembic_node) elif alembic_operator == cls.HOU_ROP_ALEMBIC_TYPE: _move_outputs(tk_alembic_node, alembic_node) # make the new node the same color alembic_node.setColor(tk_alembic_node.color()) # remember the name and position of the original tk alembic node tk_alembic_node_name = tk_alembic_node.name() tk_alembic_node_pos = tk_alembic_node.position() # destroy the original tk alembic node tk_alembic_node.destroy() # name and reposition the new, regular alembic node to match the # original alembic_node.setName(tk_alembic_node_name) alembic_node.setPosition(tk_alembic_node_pos) app.log_debug("Converted: Tk Alembic node '%s' to Alembic node." % (tk_alembic_node_name,))
def process(self, context): import os import clique import hou # Find nodes by class. nodes = [] node_type = hou.nodeType("Driver/ifd") nodes.extend(node_type.instances()) node_type = hou.nodeType(hou.ropNodeTypeCategory(), "alembic") nodes.extend(node_type.instances()) node_type = hou.nodeType(hou.ropNodeTypeCategory(), "dop") nodes.extend(node_type.instances()) node_type = hou.nodeType(hou.ropNodeTypeCategory(), 'geometry') nodes.extend(node_type.instances()) # Categorize nodes based on whether they are in a network box starting # with "remote". nodes_local = list(nodes) for box in hou.node("out").networkBoxes(): if box.name().lower().startswith("remote"): for node in box.nodes(): if node in nodes_local: nodes_local.remove(node) # Creating instances per node. for node in nodes: # Haven't figured out distributed simulation yet, so ignoring it as # a special case. if node.type().name() == "dop" and node not in nodes_local: continue instance = context.create_instance(name=node.name()) instance.data["publish"] = not node.isBypassed() instance.add(node) # Determine node type specifics. node_type = "" category = "" output_parm = "" if node.type().name() == "ifd": node_type = "mantra" category = "img" output_parm = "vm_picture" # Rendering *.ifd files. if node.parm("soho_outputmode").eval(): category = "render" output_parm = "soho_diskfile" if node.type().name() == "alembic": node_type = "alembic" category = "cache" output_parm = "filename" if node.type().name() == "dop": node_type = "dynamics" category = "cache" output_parm = "dopoutput" if node.type().name() == "geometry": node_type = "geometry" category = "cache" output_parm = "sopoutput" # Get expected output files. files = [] if node.parm("trange").eval() == 0: frame = int(hou.frame()) files.append(node.parm(output_parm).evalAtFrame(frame)) else: start = node.parm("f1").eval() end = node.parm("f2").eval() step = node.parm("f3").eval() for frame in range(int(start), int(end) + 1, int(step)): files.append(node.parm(output_parm).evalAtFrame(frame)) # Except for alembic output that only ever outputs to a single file if node_type == "alembic": files = [files[0]] # Get extension ext = os.path.splitext(files[0])[1] # Special case for *.bgeo.sc files since it was two "extensions". if files[0].endswith(".bgeo.sc"): ext = ".bgeo.sc" # Create output collection. collections = clique.assemble(files, minimum_items=1)[0] collection = None for col in collections: if col.format("{tail}") == ext: collection = col instance.data["collection"] = collection # Assigning families. families = [node_type, category, ext[1:]] label = node.name() + " - " + category if node in nodes_local: families += ["local"] instance.data["label"] = label + " - local" else: families += ["remote"] instance.data["label"] = label + " - remote" instance.data["families"] = families instance.data["family"] = category
def convert_back_to_tk_alembic_nodes(cls, app): """Convert Alembic nodes back to Toolkit Alembic nodes. :param app: The calling Toolkit Application Note: only converts nodes that had previously been Toolkit Alembic nodes. """ # get all rop/sop alembic nodes in the session alembic_nodes = [] alembic_nodes.extend(hou.nodeType(hou.sopNodeTypeCategory(), cls.HOU_SOP_ALEMBIC_TYPE).instances()) alembic_nodes.extend(hou.nodeType(hou.ropNodeTypeCategory(), cls.HOU_ROP_ALEMBIC_TYPE).instances()) if not alembic_nodes: app.log_debug("No Alembic Nodes found for conversion.") return # the tk node type we'll be converting to tk_node_type = TkAlembicNodeHandler.TK_ALEMBIC_NODE_TYPE # iterate over all the alembic nodes and attempt to convert them for alembic_node in alembic_nodes: # get the user data dictionary stored on the node user_dict = alembic_node.userDataDict() # get the output_profile from the dictionary tk_output_profile_name = user_dict.get( cls.TK_OUTPUT_PROFILE_NAME_KEY) if not tk_output_profile_name: app.log_warning( "Almbic node '%s' does not have an output profile name. " "Can't convert to Tk Alembic node. Continuing." % (alembic_node.name(),) ) continue # create a new, Toolkit Alembic node: tk_alembic_node = alembic_node.parent().createNode(tk_node_type) # find the index of the stored name on the new tk alembic node # and set that item in the menu. try: output_profile_parm = tk_alembic_node.parm( TkAlembicNodeHandler.TK_OUTPUT_PROFILE_PARM) output_profile_index = output_profile_parm.menuLabels().index( tk_output_profile_name) output_profile_parm.set(output_profile_index) except ValueError: app.log_warning("No output profile found named: %s" % (tk_output_profile_name,)) # copy over all parameter values except the output path _copy_parm_values(alembic_node, tk_alembic_node, excludes=[cls.NODE_OUTPUT_PATH_PARM]) # copy the inputs and move the outputs _copy_inputs(alembic_node, tk_alembic_node) # determine the built-in operator type if alembic_node.type().name() == cls.HOU_SOP_ALEMBIC_TYPE: _restore_outputs_from_user_data(alembic_node, tk_alembic_node) elif alembic_node.type().name() == cls.HOU_ROP_ALEMBIC_TYPE: _move_outputs(alembic_node, tk_alembic_node) # make the new node the same color. the profile will set a color, # but do this just in case the user changed the color manually # prior to the conversion. tk_alembic_node.setColor(alembic_node.color()) # remember the name and position of the original alembic node alembic_node_name = alembic_node.name() alembic_node_pos = alembic_node.position() # destroy the original alembic node alembic_node.destroy() # name and reposition the new, regular alembic node to match the # original tk_alembic_node.setName(alembic_node_name) tk_alembic_node.setPosition(alembic_node_pos) app.log_debug("Converted: Alembic node '%s' to TK Alembic node." % (alembic_node_name,))
def test_typeIsPython(self): nodeType = hou.nodeType(hou.sopNodeTypeCategory(), "tableimport") self.assertTrue(nodeType.isPython())
def camImport(): node = hou.pwd() clearKeys(node) file = node.parm("json").eval() if os.path.isfile(file): jsonData = open(file, "r") camData = json.load(jsonData) res = camData[1]["resolution"] fr = camData[1]["frames"] pos = camData[1]["camPos"] look = camData[1]["camCentInt"] up = camData[1]["camUpVector"] focal = camData[1]["camFocal"] aperture = camData[1]["camAperture"] clip = camData[1]["camClip"] fdist = camData[1]["camFocusDistance"] fstop = camData[1]["camFStop"] mblur = camData[1]["motionBlurBy"] p_dist = camData[1]["p_distance"] p_rad = camData[1]["p_radius"] camType = "stereocamrig" paralax = camData[1]["camStereoParallax"] ditance = camData[1]["camStereoDistance"] if not paralax: camType = "cam" origin = node.node("origin") lookat = node.node("lookat") cams = [None, None, None] for child in node.children(): if child.type() == hou.nodeType(hou.objNodeTypeCategory(), "cam") and child.name() == 'cam': cams[0] = child if child.type() == hou.nodeType(hou.objNodeTypeCategory(), "cam") and child.name() == 'persp': cams[1] = child if child.type() == hou.nodeType(hou.objNodeTypeCategory(), "stereocamrig"): cams[2] = child for cam in cams: cam.parm("resx").set(res[0]) cam.parm("resy").set(res[1]) cam.parm("shutter").set(mblur[0] * 0.4) cam.parm("lookatpath").set("../lookat") lens = cam.node('./shopnet1/miDOF_lens') if lens: cam.parm('projection').set(4) cam.parm('vm_lensshader').set('./shopnet1/miDOF_lens') iter = 0 for frame in fr: objSetKey(cam.parm("upx"), frame, up[iter][0]) objSetKey(cam.parm("upy"), frame, up[iter][1]) objSetKey(cam.parm("upz"), frame, up[iter][2]) objSetKey(cam.parm("focal"), frame, focal[iter]) objSetKey(cam.parm("aperture"), frame, aperture[iter][0] * 25.4) objSetKey(cam.parm("near"), frame, clip[iter][0]) objSetKey(cam.parm("far"), frame, clip[iter][1]) objSetKey(cam.parm("focus"), frame, fdist[iter]) objSetKey(cam.parm("fstop"), frame, fstop[iter]) if lens: objSetKey(lens.parm("focus"), frame, p_dist[iter]) objSetKey(lens.parm("radius"), frame, p_rad[iter]) if camType == "stereocamrig" and cam == cams[1]: objSetKey(cam.parm("ZPS"), frame, paralax[iter]) objSetKey(cam.parm("interaxial"), frame, ditance[iter]) iter += 1 cams[2].parm("lookatpath").set("../lookat_stereo") iter = 0 for frame in fr: objSetKey(origin.parm("tx"), frame, pos[iter][0]) objSetKey(origin.parm("ty"), frame, pos[iter][1]) objSetKey(origin.parm("tz"), frame, pos[iter][2]) objSetKey(lookat.parm("tx"), frame, look[iter][0]) objSetKey(lookat.parm("ty"), frame, look[iter][1]) objSetKey(lookat.parm("tz"), frame, look[iter][2]) iter += 1
def _get_rendered_image_items(self): """Scan the file for tk mantra nodes with already rendered images.""" app = self.parent # see if the mantranode app is installed mantra_app = app.engine.apps.get("tk-houdini-mantranode", None) if not mantra_app: app.log_info( "Will not attempt to scan for rendered images." "The 'tk-houdini-mantranode' app is not installed." ) return [] # find all the tk mantra nodes tk_mantra_nodes = hou.nodeType(hou.ropNodeTypeCategory(), "sgtk_mantra").instances() # get the current version from the work file work_template = mantra_app.get_template("work_file_template") scene_name = str(hou.hipFile.name()) scene_path = os.path.abspath(scene_name) fields = work_template.get_fields(scene_path) cur_version = fields["version"] # get the output_profiles for the app output_profiles = {} for output_profile in mantra_app.get_setting("output_profiles"): name = output_profile["name"] output_profiles[name] = output_profile render_items = [] # for each mantra node, see which output profile is selected. # get the template for the selected profile and see if there are # any images on disk matching the pattern. if so, add them to the # list of rendered items to be returned. for tk_mantra_node in tk_mantra_nodes: output_profile_parm = tk_mantra_node.parm("sgtk_output_profile") output_profile_name = \ output_profile_parm.menuLabels()[output_profile_parm.eval()] output_profile = output_profiles[output_profile_name] output_template = mantra_app.get_template_by_name( output_profile["output_render_template"]) paths = mantra_app.engine.tank.abstract_paths_from_template( output_template, {"SEQ": "FORMAT: %d", "version": cur_version}) if not paths: continue if len(paths) == 1: if paths: render_items.append({ "type": "rendered_image", "name": tk_mantra_node.name(), "other_params": {'path': paths[0]}, }) else: app.log_warning( "Found multiple potential rendered image paths for " "mantra node '%s'. Skipping these paths:\n '%s'" % (tk_mantra_node.name(), "\n ".join(paths)) ) return render_items
import hou #Set DefaultShape & Color Redshift Lights hou.nodeType(hou.objNodeTypeCategory(), "rslight").setDefaultColor(hou.Color(1, 1, 0)) hou.nodeType(hou.objNodeTypeCategory(), "rslight").setDefaultShape('light') hou.nodeType(hou.objNodeTypeCategory(), "rslighties").setDefaultColor(hou.Color(1, 1, 0)) hou.nodeType(hou.objNodeTypeCategory(), "rslighties").setDefaultShape('light') hou.nodeType(hou.objNodeTypeCategory(), "rslightportal").setDefaultColor(hou.Color(1, 1, 0)) hou.nodeType(hou.objNodeTypeCategory(), "rslightportal").setDefaultShape('light') hou.nodeType(hou.objNodeTypeCategory(), "rslightsun").setDefaultColor(hou.Color(1, 1, 0)) hou.nodeType(hou.objNodeTypeCategory(), "rslightsun").setDefaultShape('light') hou.nodeType(hou.objNodeTypeCategory(), "rslightdome::2.0").setDefaultColor(hou.Color(1, 1, 0)) hou.nodeType(hou.objNodeTypeCategory(), "rslightdome::2.0").setDefaultShape('light')
def isActive(): return hou.nodeType(hou.ropNodeTypeCategory(), "Octane_ROP") is not None
def convert_back_to_tk_mantra_nodes(cls, app): """Convert Mantra nodes back to Toolkit Mantra nodes. :param app: The calling Toolkit Application Note: only converts nodes that had previously been Toolkit Mantra nodes. """ # get all instances of the built-in mantra nodes mantra_nodes = hou.nodeType(hou.ropNodeTypeCategory(), cls.HOU_MANTRA_NODE_TYPE).instances() if not mantra_nodes: app.log_debug("No Mantra Nodes found for conversion.") return # iterate over all the mantra nodes and attempt to convert them for mantra_node in mantra_nodes: # get the user data dictionary stored on the node user_dict = mantra_node.userDataDict() # get the output_profile from the dictionary tk_output_profile_name = user_dict.get( cls.TK_OUTPUT_PROFILE_NAME_KEY) if not tk_output_profile_name: app.log_warning( "Mantra node '%s' does not have an output profile name. " "Can't convert to Tk Mantra node. Continuing." % (mantra_node.name(), )) continue # create new Shotgun Write node: tk_node_type = TkMantraNodeHandler.TK_MANTRA_NODE_TYPE tk_mantra_node = mantra_node.parent().createNode(tk_node_type) # find the index of the stored name on the new tk mantra node # and set that item in the menu. try: output_profile_parm = tk_mantra_node.parm( TkMantraNodeHandler.TK_OUTPUT_PROFILE_PARM) output_profile_index = output_profile_parm.menuLabels().index( tk_output_profile_name) output_profile_parm.set(output_profile_index) except ValueError: app.log_warning("No output profile found named: %s" % (tk_output_profile_name, )) # copy over all parameter values except the output path _copy_parm_values(mantra_node, tk_mantra_node, excludes=[]) # explicitly copy AOV settings to the new tk mantra node plane_numbers = _get_extra_plane_numbers(mantra_node) for plane_number in plane_numbers: plane_parm_name = cls.TK_EXTRA_PLANES_NAME % (plane_number, ) aov_name = user_dict.get(plane_parm_name) tk_mantra_node.parm(plane_parm_name).set(aov_name) # copy the inputs and move the outputs _copy_inputs(mantra_node, tk_mantra_node) _move_outputs(mantra_node, tk_mantra_node) # make the new node the same color. the profile will set a color, # but do this just in case the user changed the color manually # prior to the conversion. tk_mantra_node.setColor(mantra_node.color()) # remember the name and position of the original mantra node mantra_node_name = mantra_node.name() mantra_node_pos = mantra_node.position() # destroy the original mantra node mantra_node.destroy() # name and reposition the new, regular mantra node to match the # original tk_mantra_node.setName(mantra_node_name) tk_mantra_node.setPosition(mantra_node_pos) app.log_debug("Converted: Mantra node '%s' to TK Mantra node." % (mantra_node_name, ))
def test_typeIsNotPython(self): nodeType = hou.nodeType(hou.sopNodeTypeCategory(), "file") self.assertFalse(nodeType.isPython())
def convert_to_regular_mantra_nodes(cls, app): """Convert Toolkit Mantra nodes to regular Mantra nodes. :param app: The calling Toolkit Application """ # get all instances of tk mantra nodes tk_node_type = TkMantraNodeHandler.TK_MANTRA_NODE_TYPE tk_mantra_nodes = hou.nodeType( hou.ropNodeTypeCategory(), tk_node_type).instances() if not tk_mantra_nodes: app.log_debug("No Toolkit Mantra Nodes found for conversion.") return for tk_mantra_node in tk_mantra_nodes: # create a new, regular Mantra node mantra_node = tk_mantra_node.parent().createNode( cls.HOU_MANTRA_NODE_TYPE) # copy across knob values exclude_parms = [parm for parm in tk_mantra_node.parms() if parm.name().startswith("sgtk_")] _copy_parm_values(tk_mantra_node, mantra_node, excludes=exclude_parms) # store the mantra output profile name in the user data so that we # can retrieve it later. output_profile_parm = tk_mantra_node.parm( cls.TK_OUTPUT_PROFILE_PARM) tk_output_profile_name = \ output_profile_parm.menuLabels()[output_profile_parm.eval()] mantra_node.setUserData(cls.TK_OUTPUT_PROFILE_NAME_KEY, tk_output_profile_name) # store AOV info on the new node plane_numbers = _get_extra_plane_numbers(tk_mantra_node) for plane_number in plane_numbers: plane_parm_name = cls.TK_EXTRA_PLANES_NAME % (plane_number,) mantra_node.setUserData(plane_parm_name, tk_mantra_node.parm(plane_parm_name).eval()) # copy the inputs and move the outputs _copy_inputs(tk_mantra_node, mantra_node) _move_outputs(tk_mantra_node, mantra_node) # make the new node the same color mantra_node.setColor(tk_mantra_node.color()) # remember the name and position of the original tk mantra node tk_mantra_node_name = tk_mantra_node.name() tk_mantra_node_pos = tk_mantra_node.position() # destroy the original tk mantra node tk_mantra_node.destroy() # name and reposition the new, regular mantra node to match the # original mantra_node.setName(tk_mantra_node_name) mantra_node.setPosition(tk_mantra_node_pos) app.log_debug("Converted: Tk Mantra node '%s' to Mantra node." % (tk_mantra_node_name,))
def test_typeIsNotSubnet(self): nodeType = hou.nodeType(hou.objNodeTypeCategory(), "geo") self.assertFalse(nodeType.isSubnetType())
def _get_rendered_image_items(self): """Scan the file for tk mantra nodes to potentially publish.""" app = self.parent # see if the mantranode app is installed mantra_app = app.engine.apps.get("tk-houdini-mantranode", None) if not mantra_app: app.log_info( "Will not attempt to scan for rendered images." "The 'tk-houdini-mantranode' app is not installed." ) return [] # find all the tk mantra nodes tk_mantra_nodes = hou.nodeType(hou.ropNodeTypeCategory(), "sgtk_mantra").instances() render_items = [] # get the current version from the work file work_template = mantra_app.get_template("work_file_template") scene_name = str(hou.hipFile.name()) scene_path = os.path.abspath(scene_name) fields = work_template.get_fields(scene_path) cur_version = fields["version"] fields["SEQ"] = "FORMAT: %d" # get the output_profiles for the app. More efficient to do this here # than to repeat this logic per item in the secondary publish hook. output_profiles = {} for output_profile in mantra_app.get_setting("output_profiles"): name = output_profile["name"] output_profiles[name] = output_profile # for each mantra node, see which output profile is selected. # get the template for the selected profile. the validation hook will # check see if there are any images on disk matching the pattern for tk_mantra_node in tk_mantra_nodes: output_profile_parm = tk_mantra_node.parm("sgtk_output_profile") output_profile_name = \ output_profile_parm.menuLabels()[output_profile_parm.eval()] output_profile = output_profiles[output_profile_name] output_template = mantra_app.get_template_by_name( output_profile["output_render_template"]) is_bypassed = tk_mantra_node.isBypassed() paths = mantra_app.engine.tank.abstract_paths_from_template( output_template, fields) # normalize the paths paths = [os.path.normpath(p) for p in paths] # only select the item if the output path exists and the node is # not bypassed. should_select = len(paths) == 1 and not is_bypassed render_items.append({ "type": "rendered_image", "name": tk_mantra_node.name(), "description": "Full Path: %s" % (tk_mantra_node.path(),), "selected": should_select, "other_params": { "paths": paths, "node": tk_mantra_node, }, }) return render_items
def isActive(): return hou.nodeType(hou.ropNodeTypeCategory(), "arnold") is not None
def stringToNodes(s, hou_parent=None, ne=None, ignore_hdas_if_already_defined=None, force_prefer_hdas=None): ''' TODO: here to be a docstring :param s: :param hou_parent: :param ne: :param ignore_hdas_if_already_defined: :param force_prefer_hdas: :return: ''' if (ignore_hdas_if_already_defined is None): ignore_hdas_if_already_defined = True if (opt is not None): ignore_hdas_if_already_defined = opt.getOption( 'hpaste.ignore_hdas_if_already_defined', ignore_hdas_if_already_defined) if (force_prefer_hdas is None): force_prefer_hdas = False if (opt is not None): force_prefer_hdas = opt.getOption('hpaste.force_prefer_hdas', force_prefer_hdas) paste_to_cursor = ne is not None if (hou_parent is None): if (ne is None): ne = hou.ui.paneTabOfType(hou.paneTabType.NetworkEditor) if (ne is None): raise RuntimeError("cannot find opened network editor") hou_parent = ne.pwd() s = str(s) # ununicode. there should not be any unicode in it anyways try: data = json.loads(bz2.decompress(base64.urlsafe_b64decode(s))) except Exception as e: raise RuntimeError( "input data is either corrupted or just not a nodecode: " + str(e.message)) # check version formatVersion = data['version'] if (formatVersion > 2): raise RuntimeError( "unsupported version of data format. Try updating hpaste to the latest version" ) # check accepted algtypes houver1 = hou.applicationVersion() supportedAlgs = set() if (houver1[0] == 15): supportedAlgs.add(0) supportedAlgs.add(1) supportedAlgs.add(2) #WITH BIG WARNING!!! if (houver1[0] >= 16): supportedAlgs.add(0) supportedAlgs.add(1) supportedAlgs.add(2) algtype = data['algtype'] if (algtype not in supportedAlgs): raise RuntimeError( "algorithm type is not supported by this houdini version, :( ") # check hou version houver2 = data['houver'] if (houver1[0] != houver2[0] or houver1[1] != houver2[1]): print( "HPaste: WARNING!! nodes were copied from a different houdini version: " + str(houver2)) # check context context = getChildContext(hou_parent, houver1) if (context != data['context']): raise RuntimeError("this snippet has '%s' context" % data['context']) # check sum code = data['code'] if (hashlib.sha1(code).hexdigest() != data['chsum']): raise RuntimeError("checksum failed!") if (paste_to_cursor): if (houver1[0] == 16): olditems = hou_parent.allItems() else: olditems = hou_parent.children() # do the work for hdaitem in data.get('hdaList', []): # added in version 2.1 hdacode = base64.b64decode(hdaitem['code']) ntype = hdaitem['type'] ncategory = hdaitem['category'] if (ignore_hdas_if_already_defined): nodeType = hou.nodeType(hou.nodeTypeCategories()[ncategory], ntype) if (nodeType is not None): #well, that's already a bad sign, means it is installed continue fd, temppath = tempfile.mkstemp() try: with open(temppath, 'wb') as f: f.write(hdacode) for hdadef in hou.hda.definitionsInFile(temppath): hdadef.copyToHDAFile('Embedded') #hdadef.save('Embedded') finally: os.close(fd) if (force_prefer_hdas): embhdas = [ x for x in hou.hda.definitionsInFile("Embedded") if (x.nodeType().name() == ntype and x.nodeTypeCategory().name() == ncategory) ] if (len(embhdas) == 1): embhdas[0].setIsPreferred(True) #now nodes themselves if (formatVersion == 1): code = binascii.a2b_qp(code) elif (formatVersion >= 2): code = base64.b64decode(code) else: raise RuntimeError( "Very unexpected format version in a very inexpected place!") if (algtype == 0): # high security risk!! if (hou.isUiAvailable()): ok = hou.ui.displayMessage( "WARNING! The algorithm type used by the pasted snipped is legacy and present HIGH SECURITY RISK!\n be sure you TRUST THE SOURCE of the snippet!", ("CANCEL", "ok"), severity=hou.severityType.Warning, close_choice=0, title="SECURITY WARNING") else: ok = 0 print( "for now u cannot paste SECURITY RISK snippets in non-interactive mode" ) if (ok != 1): return exec(code, {}, {'hou': hou, 'hou_parent': hou_parent}) elif (algtype == 1 or algtype == 2): # get temp file fd, temppath = tempfile.mkstemp() try: with open(temppath, "wb") as f: f.write(code) try: if (algtype == 1): hou_parent.loadChildrenFromFile(temppath) if (algtype == 2): try: hou_parent.loadItemsFromFile(temppath) except AttributeError: print( "WARNING!!! your hou version does not support algorithm used for copying, TRYING possibly partly backward-INCOMPATIBLE method!" ) print("CHECK SCENE INTEGRITY") hou_parent.loadChildrenFromFile(temppath) except hou.LoadWarning as e: msg = e.instanceMessage() print(msg) # truncate just for display with random number 253 msgtrunc = False if (len(msg) > 253): msgtrunc = True msg = msg[:253] + "..." raise RuntimeWarning("There were warnings during load" + ( "(see console for full message)" if msgtrunc else "") + "\n" + msg) finally: os.close(fd) else: raise RuntimeError( "algorithm type is not supported. Try updating hpaste to the latest version" ) if (paste_to_cursor): #now collect pasted nodes if (houver1[0] >= 16): newitems = [x for x in hou_parent.allItems() if x not in olditems] else: newitems = [x for x in hou_parent.children() if x not in olditems] if (len(newitems) == 0): return #calc center cpos = hou.Vector2() bbmin = hou.Vector2() bbmax = hou.Vector2() cnt = 0 for item in newitems: cnt += 1 pos = item.position() cpos += pos for i in [0, 1]: if (pos[i] > bbmax[i] or cnt == 1): bbmax[i] = pos[i] if (pos[i] < bbmin[i] or cnt == 1): bbmin[i] = pos[i] cpos = cpos / cnt cpos[1] = bbmax[1] offset = ne.cursorPosition() - cpos for item in newitems: if (houver1[0] >= 16 and item.parentNetworkBox() in newitems): continue item.move(offset)