def _checkRenderParameters(parms): """Check the values of the render-specific parameters. Return True if the values are valid and False otherwise. """ if parms["max_hosts_per_job"] < parms["min_hosts_per_job"]: hqrop.displayError( "Max. Hosts Per Job must be greater than or equal to " "Min. Hosts Per Job.") return False # Check IFD file path. if parms["make_ifds"] or not parms["use_output_driver"]: if not parms["ifd_path"]: if parms["make_ifds"]: ifd_path_parm = hou.parm("hq_outputifd") elif not parms["use_output_driver"]: ifd_path_parm = hou.parm("hq_input_ifd") ifd_parm_label = ifd_path_parm.parmTemplate().label() hqrop.displayError(" ".join(["The value of the", ifd_parm_label, "parameter in\n", hou.pwd().path(), "\n" "must not be blank."])) return False return True
def _checkRenderParameters(parms): """Check the values of the render-specific parameters. Return True if the values are valid and False otherwise. """ if parms["max_hosts_per_job"] < parms["min_hosts_per_job"]: hqrop.displayError( "Max. Hosts Per Job must be greater than or equal to " "Min. Hosts Per Job.") return False # Check IFD file path. if parms["make_ifds"] or not parms["use_output_driver"]: if not parms["ifd_path"]: if parms["make_ifds"]: ifd_path_parm = hou.parm("hq_outputifd") elif not parms["use_output_driver"]: ifd_path_parm = hou.parm("hq_input_ifd") ifd_parm_label = ifd_path_parm.parmTemplate().label() hqrop.displayError(" ".join([ "The value of the", ifd_parm_label, "parameter in\n", hou.pwd().path(), "\n" "must not be blank." ])) return False return True
def render(): """Evaluate and package the HDA parameters and submit a job to HQueue.""" # Build a dictionary of base parameters and add the HQueue Render-specific # ones. parms = hqrop.getBaseParameters() use_cloud = (hou.ch("hq_use_cloud1") if hou.parm("hq_use_cloud1") is not None else 0) num_cloud_machines = (hou.ch("hq_num_cloud_machines") if hou.parm("hq_num_cloud_machines") is not None else 0) machine_type = (hou.ch("hq_cloud_machine_type") if hou.parm("hq_cloud_machine_type") is not None else "") use_output_driver = bool(use_cloud) or parms["hip_action"] != "use_ifd" # validate the machine type if machine_type not in ['c1.medium', 'c1.xlarge', 'm1.small', 'm1.large', 'm1.xlarge']: machine_type = 'c1.xlarge' parms.update({ "assign_ifdgen_to" : hou.parm("hq_assign_ifdgen_to").evalAsString(), "ifdgen_clients": hou.ch("hq_ifdgen_clients").strip(), "ifdgen_client_groups" : hou.ch("hq_ifdgen_client_groups").strip(), "batch_all_frames": hou.ch("hq_batch_all_frames"), "frames_per_job": hou.ch("hq_framesperjob"), "render_frame_order": hou.parm("hq_render_frame_order").evalAsString(), "make_ifds": hou.ch("hq_makeifds"), "max_hosts_per_job": hou.ch("hq_max_hosts"), "min_hosts_per_job": hou.ch("hq_min_hosts"), "is_CPU_number_set": bool(hou.ch("hq_is_CPU_number_set")), "CPUs_to_use": hou.ch("hq_CPUs_to_use"), "output_ifd": hou.parm("hq_outputifd").unexpandedString().strip(), "use_output_driver" : use_output_driver, "use_cloud": use_cloud, "num_cloud_machines": num_cloud_machines, "cloud_machine_type" : machine_type, "use_render_tracker" : hou.ch("hq_use_render_tracker"), "delete_ifds": hou.ch("hq_delete_ifds"), "render_single_tile": bool(hou.ch("hq_render_single_tile")), }) if use_output_driver: # Convert output_driver path to an absolute path. parms["output_driver"] = hou.ch("hq_driver").strip() rop_node = hou.pwd().node(parms["output_driver"]) if rop_node: parms["output_driver"] = rop_node.path() parms["ifd_path"] = hou.parm("hq_outputifd").unexpandedString().strip() output_driver = hqrop.getOutputDriver(hou.pwd()) # Turn "off" Mantra-specific parameters if there is an output driver # and it is not a Mantra ROP. if output_driver and output_driver.type().name() != "ifd": parms["make_ifds"] = False parms["min_hosts_per_job"] = 1 parms["max_hosts_per_job"] = 1 else: parms.update({ "ifd_path" : hou.parm("hq_input_ifd").unexpandedString().strip(), "start_frame" : hou.ch("hq_frame_range_1"), "end_frame" : hou.ch("hq_frame_range_2"), "frame_skip" : hou.ch("hq_frame_range_3"), # If we are not using an output driver we are using IFDs and so # we won't be making them "make_ifds" : False, }) if parms["frame_skip"] <= 0: parms["frame_skip"] = 1 # We stop if we cannot establish a connection with the server if (not parms["use_cloud"] and not hqrop.doesHQServerExists(parms["hq_server"])): return None if "ifd_path" in parms and not parms["use_cloud"]: expand_frame_variables = False parms["ifd_path"] = hqrop.substituteWithHQROOT( parms["hq_server"], parms["ifd_path"], expand_frame_variables) # Validate parameter values. if (not hqrop.checkBaseParameters(parms) or not _checkRenderParameters(parms)): return if use_output_driver and parms["hip_action"] == "use_current_hip": if not hqrop.checkOutputDriver(parms["output_driver"]): return if hqrop.checkForRecursiveChain(hou.pwd()): hqrop.displayError(("Cannot submit HQueue job because" " %s is in the input chain of %s.") % (hou.pwd().path(), parms["output_driver"])) return # If we're not supposed to run this job on the cloud, submit the job. # Otherwise, we'll display the file dependency dialog. if parms["use_cloud"]: # We don't want to keep the interrupt dialog open, so we exit this soho # script so the dialog closes and schedule an event to run the code to # display the dialog. import soho rop_node = hou.node(soho.getOutputDriver().getName()) cloud.selectProjectParmsForCloudRender( rop_node, parms["num_cloud_machines"], parms["cloud_machine_type"]) return # Automatically save changes to the .hip file, # or at least warn the user about unsaved changes. should_continue = hqrop.warnOrAutoSaveHipFile(parms) if not should_continue: return hqrop.submitJob(parms, _byu_troubleshoot_hq)
def render(): """Evaluate and package the HDA parameters and submit a job to HQueue.""" # Build a dictionary of base parameters and add the HQueue Render-specific # ones. parms = hqrop.getBaseParameters() use_cloud = (hou.ch("hq_use_cloud1") if hou.parm("hq_use_cloud1") is not None else 0) num_cloud_machines = (hou.ch("hq_num_cloud_machines") if hou.parm("hq_num_cloud_machines") is not None else 0) machine_type = (hou.ch("hq_cloud_machine_type") if hou.parm("hq_cloud_machine_type") is not None else "") use_output_driver = bool(use_cloud) or parms["hip_action"] != "use_ifd" # validate the machine type if machine_type not in [ 'c1.medium', 'c1.xlarge', 'm1.small', 'm1.large', 'm1.xlarge' ]: machine_type = 'c1.xlarge' parms.update({ "assign_ifdgen_to": hou.parm("hq_assign_ifdgen_to").evalAsString(), "ifdgen_clients": hou.ch("hq_ifdgen_clients").strip(), "ifdgen_client_groups": hou.ch("hq_ifdgen_client_groups").strip(), "batch_all_frames": hou.ch("hq_batch_all_frames"), "frames_per_job": hou.ch("hq_framesperjob"), "render_frame_order": hou.parm("hq_render_frame_order").evalAsString(), "make_ifds": hou.ch("hq_makeifds"), "max_hosts_per_job": hou.ch("hq_max_hosts"), "min_hosts_per_job": hou.ch("hq_min_hosts"), "is_CPU_number_set": bool(hou.ch("hq_is_CPU_number_set")), "CPUs_to_use": hou.ch("hq_CPUs_to_use"), "output_ifd": hou.parm("hq_outputifd").unexpandedString().strip(), "use_output_driver": use_output_driver, "use_cloud": use_cloud, "num_cloud_machines": num_cloud_machines, "cloud_machine_type": machine_type, "use_render_tracker": hou.ch("hq_use_render_tracker"), "delete_ifds": hou.ch("hq_delete_ifds"), "render_single_tile": bool(hou.ch("hq_render_single_tile")), }) if use_output_driver: # Convert output_driver path to an absolute path. parms["output_driver"] = hou.ch("hq_driver").strip() rop_node = hou.pwd().node(parms["output_driver"]) if rop_node: parms["output_driver"] = rop_node.path() parms["ifd_path"] = hou.parm("hq_outputifd").unexpandedString().strip() output_driver = hqrop.getOutputDriver(hou.pwd()) # Turn "off" Mantra-specific parameters if there is an output driver # and it is not a Mantra ROP. if output_driver and output_driver.type().name() != "ifd": parms["make_ifds"] = False parms["min_hosts_per_job"] = 1 parms["max_hosts_per_job"] = 1 else: parms.update({ "ifd_path": hou.parm("hq_input_ifd").unexpandedString().strip(), "start_frame": hou.ch("hq_frame_range_1"), "end_frame": hou.ch("hq_frame_range_2"), "frame_skip": hou.ch("hq_frame_range_3"), # If we are not using an output driver we are using IFDs and so # we won't be making them "make_ifds": False, }) if parms["frame_skip"] <= 0: parms["frame_skip"] = 1 # We stop if we cannot establish a connection with the server if (not parms["use_cloud"] and not hqrop.doesHQServerExists(parms["hq_server"])): return None if "ifd_path" in parms and not parms["use_cloud"]: expand_frame_variables = False parms["ifd_path"] = hqrop.substituteWithHQROOT(parms["hq_server"], parms["ifd_path"], expand_frame_variables) # Validate parameter values. if (not hqrop.checkBaseParameters(parms) or not _checkRenderParameters(parms)): return if use_output_driver and parms["hip_action"] == "use_current_hip": if not hqrop.checkOutputDriver(parms["output_driver"]): return if hqrop.checkForRecursiveChain(hou.pwd()): hqrop.displayError(("Cannot submit HQueue job because" " %s is in the input chain of %s.") % (hou.pwd().path(), parms["output_driver"])) return # If we're not supposed to run this job on the cloud, submit the job. # Otherwise, we'll display the file dependency dialog. if parms["use_cloud"]: # We don't want to keep the interrupt dialog open, so we exit this soho # script so the dialog closes and schedule an event to run the code to # display the dialog. import soho rop_node = hou.node(soho.getOutputDriver().getName()) cloud.selectProjectParmsForCloudRender(rop_node, parms["num_cloud_machines"], parms["cloud_machine_type"]) return # Automatically save changes to the .hip file, # or at least warn the user about unsaved changes. should_continue = hqrop.warnOrAutoSaveHipFile(parms) if not should_continue: return hqrop.submitJob(parms, _byu_troubleshoot_hq)