def test(self): p = GafferScene.Plane() a = GafferScene.Attributes() a["in"].setInput(p["out"]) a["attributes"].addMember("user:something", IECore.StringData("$a")) d = Gaffer.DeleteContextVariables() d.setup(GafferScene.ScenePlug()) d["in"].setInput(a["out"]) c = Gaffer.ContextVariables() c.setup(GafferScene.ScenePlug()) c["in"].setInput(d["out"]) c["variables"].addMember("a", IECore.StringData("aardvark")) self.assertEqual(a["out"].attributes("/plane")["user:something"], IECore.StringData("")) self.assertEqual(c["out"].attributes("/plane")["user:something"], IECore.StringData("aardvark")) d["variables"].setValue("a") self.assertEqual(c["out"].attributes("/plane")["user:something"], IECore.StringData(""))
def testPatternMatching( self ) : n = GafferTest.StringInOutNode() self.assertHashesValid( n ) d = Gaffer.DeleteContextVariables() d.setup( Gaffer.StringPlug() ) d["in"].setInput( n["out"] ) c = Gaffer.ContextVariables() c.setup( Gaffer.StringPlug() ) c["in"].setInput( d["out"] ) n["in"].setValue( "$a1_$a2_$b1_$b2_$c1_$c2" ) self.assertEqual( c["out"].getValue(), "_____" ) c["variables"].addMember( "a1", IECore.StringData( "A1" ) ) c["variables"].addMember( "a2", IECore.StringData( "A2" ) ) c["variables"].addMember( "b1", IECore.StringData( "B1" ) ) c["variables"].addMember( "b2", IECore.StringData( "B2" ) ) c["variables"].addMember( "c1", IECore.StringData( "C1" ) ) c["variables"].addMember( "c2", IECore.StringData( "C2" ) ) self.assertEqual( c["out"].getValue(), "A1_A2_B1_B2_C1_C2" ) d["variables"].setValue( "a* c*" ) self.assertEqual( c["out"].getValue(), "__B1_B2__" )
def testDirtyPropagation( self ) : n = GafferTest.StringInOutNode() d = Gaffer.DeleteContextVariables() d.setup( Gaffer.StringPlug() ) d["in"].setInput( n["out"] ) # deleting a variable should dirty the output: dirtied = GafferTest.CapturingSlot( d.plugDirtiedSignal() ) d["variables"].setValue( "a" ) self.failUnless( d["out"] in [ p[0] for p in dirtied ] )
def test( self ) : n = GafferTest.StringInOutNode() d = Gaffer.DeleteContextVariables() d.setup( Gaffer.StringPlug() ) d["in"].setInput( n["out"] ) c = Gaffer.ContextVariables() c.setup( Gaffer.StringPlug() ) c["in"].setInput( d["out"] ) n["in"].setValue( "$a" ) self.assertEqual( c["out"].getValue(), "" ) c["variables"].addMember( "a", IECore.StringData( "A" ) ) self.assertEqual( c["out"].getValue(), "A" ) d["variables"].setValue( "a" ) self.assertEqual( c["out"].getValue(), "" )
def test( self ) : n = GafferTest.StringInOutNode() d = Gaffer.DeleteContextVariables() d.setup( Gaffer.StringPlug() ) d["in"].setInput( n["out"] ) c = Gaffer.ContextVariables() c.setup( Gaffer.StringPlug() ) c["in"].setInput( d["out"] ) n["in"].setValue( "$a" ) self.assertEqual( c["out"].getValue(), "" ) c["variables"].addChild( Gaffer.NameValuePlug( "a", IECore.StringData( "A" ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) ) self.assertEqual( c["out"].getValue(), "A" ) d["variables"].setValue( "a" ) self.assertEqual( c["out"].getValue(), "" )
inputScript=inputScript, node="CollectScenes", outputScript=outputScript) process = subprocess.Popen(command, shell=True, stderr=subprocess.PIPE) process.wait() script["fileName"].setValue(outputScript) script.load() with GafferUI.Window() as window: graphEditorWindow = GafferUI.GraphEditor(script) graphEditorWindow.parent().reveal() graphEditorWindow.parent()._qtWidget().resize(800, 520) __delay(0.01) GafferUI.WidgetAlgo.grab( widget=graphEditorWindow, imagePath="images/{imageName}.png".format(imageName=name + "Stats")) graphEditorWindow.parent().close() del graphEditorWindow script.addChild(Gaffer.DeleteContextVariables()) script["DeleteContextVariables"].setup(GafferScene.ScenePlug("in", )) script["DeleteContextVariables"].addChild( Gaffer.V2fPlug( "__uiPosition", defaultValue=imath.V2f(0, 0), flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, )) script["DeleteContextVariables"]["variables"].setValue('collect:rootName') script["DeleteContextVariables"]["in"].setInput(script["Group"]["out"]) script["Transform"]["in"].setInput(script["DeleteContextVariables"]["out"]) script["DeleteContextVariables"]["__uiPosition"].setValue(imath.V2f( -3.0, 4.75)) script["fileName"].setValue(improvedScript) script.save()
def testMerging(self): allFilter = GafferScene.PathFilter() allFilter["paths"].setValue(IECore.StringVectorData(['/...'])) plane = GafferScene.Plane() plane["divisions"].setValue(imath.V2i(20, 20)) # Assign a basic gradient shader uvGradientCode = GafferOSL.OSLCode() uvGradientCode["out"].addChild( Gaffer.Color3fPlug("out", direction=Gaffer.Plug.Direction.Out)) uvGradientCode["code"].setValue('out = color( u, v, 0.5 );') shaderAssignment = GafferScene.ShaderAssignment() shaderAssignment["in"].setInput(plane["out"]) shaderAssignment["filter"].setInput(allFilter["out"]) shaderAssignment["shader"].setInput(uvGradientCode["out"]["out"]) # Set up a random id from 0 - 3 on each face randomCode = GafferOSL.OSLCode() randomCode["out"].addChild( Gaffer.IntPlug("randomId", direction=Gaffer.Plug.Direction.Out)) randomCode["code"].setValue( 'randomId = int(cellnoise( P * 100 ) * 4);') outInt = GafferOSL.OSLShader() outInt.loadShader("ObjectProcessing/OutInt") outInt["parameters"]["name"].setValue('randomId') outInt["parameters"]["value"].setInput(randomCode["out"]["randomId"]) outObject = GafferOSL.OSLShader() outObject.loadShader("ObjectProcessing/OutObject") outObject["parameters"]["in0"].setInput( outInt["out"]["primitiveVariable"]) oSLObject = GafferOSL.OSLObject() oSLObject["in"].setInput(shaderAssignment["out"]) oSLObject["filter"].setInput(allFilter["out"]) oSLObject["shader"].setInput(outObject["out"]) oSLObject["interpolation"].setValue(2) # Create 4 meshes by picking each of the 4 ids deleteContextVariables = Gaffer.DeleteContextVariables() deleteContextVariables.setup(GafferScene.ScenePlug()) deleteContextVariables["variables"].setValue('collect:rootName') deleteContextVariables["in"].setInput(oSLObject["out"]) pickCode = GafferOSL.OSLCode() pickCode["parameters"].addChild(Gaffer.IntPlug("targetId")) pickCode["out"].addChild( Gaffer.IntPlug("cull", direction=Gaffer.Plug.Direction.Out)) pickCode["code"].setValue( 'int randomId; getattribute( "randomId", randomId ); cull = randomId != targetId;' ) expression = Gaffer.Expression() pickCode.addChild(expression) expression.setExpression( 'parent.parameters.targetId = stoi( context( "collect:rootName", "0" ) );', "OSL") outInt1 = GafferOSL.OSLShader() outInt1.loadShader("ObjectProcessing/OutInt") outInt1["parameters"]["name"].setValue('deleteFaces') outInt1["parameters"]["value"].setInput(pickCode["out"]["cull"]) outObject1 = GafferOSL.OSLShader() outObject1.loadShader("ObjectProcessing/OutObject") outObject1["parameters"]["in0"].setInput( outInt1["out"]["primitiveVariable"]) oSLObject1 = GafferOSL.OSLObject() oSLObject1["in"].setInput(deleteContextVariables["out"]) oSLObject1["filter"].setInput(allFilter["out"]) oSLObject1["shader"].setInput(outObject1["out"]) oSLObject1["interpolation"].setValue(2) deleteFaces = GafferScene.DeleteFaces() deleteFaces["in"].setInput(oSLObject1["out"]) deleteFaces["filter"].setInput(allFilter["out"]) collectScenes = GafferScene.CollectScenes() collectScenes["in"].setInput(deleteFaces["out"]) collectScenes["rootNames"].setValue( IECore.StringVectorData(['0', '1', '2', '3'])) collectScenes["sourceRoot"].setValue('/plane') # First variant: bake everything, covering the whole 1001 UDIM customAttributes1 = GafferScene.CustomAttributes() customAttributes1["attributes"].addMember( 'bake:fileName', IECore.StringData( '${bakeDirectory}/complete/<AOV>/<AOV>.<UDIM>.exr')) customAttributes1["in"].setInput(collectScenes["out"]) # Second vaiant: bake just 2 of the 4 meshes, leaving lots of holes that will need filling pruneFilter = GafferScene.PathFilter() pruneFilter["paths"].setValue(IECore.StringVectorData(['/2', '/3'])) prune = GafferScene.Prune() prune["in"].setInput(collectScenes["out"]) prune["filter"].setInput(pruneFilter["out"]) customAttributes2 = GafferScene.CustomAttributes() customAttributes2["attributes"].addMember( 'bake:fileName', IECore.StringData( '${bakeDirectory}/incomplete/<AOV>/<AOV>.<UDIM>.exr')) customAttributes2["in"].setInput(prune["out"]) # Third variant: bake everything, but with one mesh at a higher resolution customAttributes3 = GafferScene.CustomAttributes() customAttributes3["attributes"].addMember( 'bake:fileName', IECore.StringData( '${bakeDirectory}/mismatch/<AOV>/<AOV>.<UDIM>.exr')) customAttributes3["in"].setInput(collectScenes["out"]) pathFilter2 = GafferScene.PathFilter() pathFilter2["paths"].setValue(IECore.StringVectorData(['/2'])) customAttributes = GafferScene.CustomAttributes() customAttributes["attributes"].addMember('bake:resolution', IECore.IntData(200)) customAttributes["filter"].setInput(pathFilter2["out"]) customAttributes["in"].setInput(customAttributes3["out"]) # Merge the 3 variants mergeGroup = GafferScene.Group() mergeGroup["in"][-1].setInput(customAttributes["out"]) mergeGroup["in"][-1].setInput(customAttributes1["out"]) mergeGroup["in"][-1].setInput(customAttributes2["out"]) arnoldTextureBake = GafferArnold.ArnoldTextureBake() arnoldTextureBake["in"].setInput(mergeGroup["out"]) arnoldTextureBake["filter"].setInput(allFilter["out"]) arnoldTextureBake["bakeDirectory"].setValue(self.temporaryDirectory() + '/bakeMerge/') arnoldTextureBake["defaultResolution"].setValue(128) # We want to check the intermediate results arnoldTextureBake["cleanupIntermediateFiles"].setValue(False) # Dispatch the bake script = Gaffer.ScriptNode() script.addChild(arnoldTextureBake) dispatcher = GafferDispatch.LocalDispatcher() dispatcher["jobsDirectory"].setValue(self.temporaryDirectory()) dispatcher.dispatch([arnoldTextureBake]) # Check results imageReader = GafferImage.ImageReader() outLayer = GafferOSL.OSLShader() outLayer.loadShader("ImageProcessing/OutLayer") outLayer["parameters"]["layerColor"].setInput( uvGradientCode["out"]["out"]) outImage = GafferOSL.OSLShader() outImage.loadShader("ImageProcessing/OutImage") outImage["parameters"]["in0"].setInput(outLayer["out"]["layer"]) oSLImage = GafferOSL.OSLImage() oSLImage["in"].setInput(imageReader["out"]) oSLImage["shader"].setInput(outImage["out"]) merge3 = GafferImage.Merge() merge3["in"]["in0"].setInput(oSLImage["out"]) merge3["in"]["in1"].setInput(imageReader["out"]) merge3["operation"].setValue(10) edgeDetect = self.SimpleEdgeDetect() edgeDetect["in"].setInput(imageReader["out"]) edgeStats = GafferImage.ImageStats() edgeStats["in"].setInput(edgeDetect["out"]) refDiffStats = GafferImage.ImageStats() refDiffStats["in"].setInput(merge3["out"]) oneLayerReader = GafferImage.ImageReader() grade = GafferImage.Grade() grade["in"].setInput(oneLayerReader["out"]) grade["channels"].setValue('[A]') grade["blackPoint"].setValue(imath.Color4f(0, 0, 0, 0.999899983)) copyChannels = GafferImage.CopyChannels() copyChannels["in"]["in0"].setInput(merge3["out"]) copyChannels["in"]["in1"].setInput(grade["out"]) copyChannels["channels"].setValue('[A]') premultiply = GafferImage.Premultiply() premultiply["in"].setInput(copyChannels["out"]) refDiffCoveredStats = GafferImage.ImageStats() refDiffCoveredStats["in"].setInput(premultiply["out"]) # We are testing 3 different cases: # complete : Should be an exact match. # incomplete : Expect some mild variance of slopes and some error, because we have to # reconstruct a lot of missing data. # mismatch : We should get a larger image, sized to the highest override on any mesh. # Match won't be as perfect, because we're combining source images at # different resolutions for name, expectedSize, maxEdge, maxRefDiff, maxMaskedDiff in [ ("complete", 128, 0.01, 0.000001, 0.000001), ("incomplete", 128, 0.05, 0.15, 0.000001), ("mismatch", 200, 0.01, 0.01, 0.01) ]: imageReader["fileName"].setValue(self.temporaryDirectory() + "/bakeMerge/" + name + "/beauty/beauty.1001.tx") oneLayerReader["fileName"].setValue(self.temporaryDirectory() + "/bakeMerge/" + name + "/beauty/beauty.1001.exr") self.assertEqual(imageReader["out"]["format"].getValue().width(), expectedSize) self.assertEqual(imageReader["out"]["format"].getValue().height(), expectedSize) edgeStats["area"].setValue( imath.Box2i(imath.V2i(1), imath.V2i(expectedSize - 1))) refDiffStats["area"].setValue( imath.Box2i(imath.V2i(1), imath.V2i(expectedSize - 1))) refDiffCoveredStats["area"].setValue( imath.Box2i(imath.V2i(0), imath.V2i(expectedSize))) # Blue channel is constant, so everything should line up perfectly self.assertEqual(0, edgeStats["max"].getValue()[2]) self.assertEqual(0, refDiffStats["max"].getValue()[2]) self.assertEqual(0, refDiffCoveredStats["max"].getValue()[2]) for i in range(2): # Make sure we've got actual data, by checking that we have some error ( we're not expecting # to perfectly reconstruct the gradient when the input is incomplete ) self.assertGreater(edgeStats["max"].getValue()[i], 0.005) if name == "incomplete": self.assertGreater(edgeStats["max"].getValue()[i], 0.03) self.assertGreater(refDiffStats["max"].getValue()[i], 0.06) self.assertLess(edgeStats["max"].getValue()[i], maxEdge) self.assertLess(refDiffStats["max"].getValue()[i], maxRefDiff) self.assertLess(refDiffCoveredStats["max"].getValue()[i], maxMaskedDiff)
def __init__(self, name="ArnoldTextureBake"): GafferDispatch.TaskNode.__init__(self, name) self["in"] = GafferScene.ScenePlug() self["filter"] = GafferScene.FilterPlug() self["bakeDirectory"] = Gaffer.StringPlug("bakeDirectory", defaultValue="") self["defaultFileName"] = Gaffer.StringPlug( "defaultFileName", defaultValue="${bakeDirectory}/<AOV>/<AOV>.<UDIM>.exr") self["defaultResolution"] = Gaffer.IntPlug("defaultResolution", defaultValue=512) self["uvSet"] = Gaffer.StringPlug("uvSet", defaultValue='uv') self["normalOffset"] = Gaffer.FloatPlug("offset", defaultValue=0.1) self["aovs"] = Gaffer.StringPlug("aovs", defaultValue='beauty:RGBA') self["tasks"] = Gaffer.IntPlug("tasks", defaultValue=1) self["cleanupIntermediateFiles"] = Gaffer.BoolPlug( "cleanupIntermediateFiles", defaultValue=True) self["applyMedianFilter"] = Gaffer.BoolPlug("applyMedianFilter", Gaffer.Plug.Direction.In, False) self["medianRadius"] = Gaffer.IntPlug("medianRadius", Gaffer.Plug.Direction.In, 1) # Set up connection to preTasks beforehand self["__PreTaskList"] = GafferDispatch.TaskList() self["__PreTaskList"]["preTasks"].setInput(self["preTasks"]) self["__CleanPreTasks"] = Gaffer.DeleteContextVariables() self["__CleanPreTasks"].setup(GafferDispatch.TaskNode.TaskPlug()) self["__CleanPreTasks"]["in"].setInput(self["__PreTaskList"]["task"]) self["__CleanPreTasks"]["variables"].setValue( "BAKE_WEDGE:index BAKE_WEDGE:value_unused") # First, setup python commands which will dispatch a chunk of a render or image tasks as # immediate execution once they reach the farm - this allows us to run multiple tasks in # one farm process. self["__RenderDispatcher"] = GafferDispatch.PythonCommand() self["__RenderDispatcher"]["preTasks"][0].setInput( self["__CleanPreTasks"]["out"]) self["__RenderDispatcher"]["command"].setValue( inspect.cleandoc(""" import GafferDispatch # We need to access frame and "BAKE_WEDGE:index" so that the hash of render varies with the wedge index, # so we might as well print what we're doing IECore.msg( IECore.MessageHandler.Level.Info, "Bake Process", "Dispatching render task index %i for frame %i" % ( context["BAKE_WEDGE:index"], context.getFrame() ) ) d = GafferDispatch.LocalDispatcher() d.dispatch( [ self.parent()["__bakeDirectoryContext"] ] ) """)) self["__ImageDispatcher"] = GafferDispatch.PythonCommand() self["__ImageDispatcher"]["preTasks"][0].setInput( self["__RenderDispatcher"]["task"]) self["__ImageDispatcher"]["command"].setValue( inspect.cleandoc(""" import GafferDispatch # We need to access frame and "BAKE_WEDGE:index" so that the hash of render varies with the wedge index, # so we might as well print what we're doing IECore.msg( IECore.MessageHandler.Level.Info, "Bake Process", "Dispatching image task index %i for frame %i" % ( context["BAKE_WEDGE:index"], context.getFrame() ) ) d = GafferDispatch.LocalDispatcher() d.dispatch( [ self.parent()["__CleanUpSwitch"] ] ) """)) # Connect through the dispatch settings to the render dispatcher # ( The image dispatcher runs much quicker, and should be OK using default settings ) self["__RenderDispatcher"]["dispatcher"].setInput(self["dispatcher"]) # Set up variables so the dispatcher knows that the render and image dispatches depend on # the file paths ( in case they are varying in a wedge ) for redispatch in [ self["__RenderDispatcher"], self["__ImageDispatcher"] ]: redispatch["variables"].addChild( Gaffer.NameValuePlug("bakeDirectory", "", "bakeDirectoryVar")) redispatch["variables"].addChild( Gaffer.NameValuePlug("defaultFileName", "", "defaultFileNameVar")) # Connect the variables via an expression so that get expanded ( this also means that # if you put #### in a filename you will get per frame tasks, because the hash will depend # on frame number ) self["__DispatchVariableExpression"] = Gaffer.Expression() self["__DispatchVariableExpression"].setExpression( inspect.cleandoc(""" parent["__RenderDispatcher"]["variables"]["bakeDirectoryVar"]["value"] = parent["bakeDirectory"] parent["__RenderDispatcher"]["variables"]["defaultFileNameVar"]["value"] = parent["defaultFileName"] parent["__ImageDispatcher"]["variables"]["bakeDirectoryVar"]["value"] = parent["bakeDirectory"] parent["__ImageDispatcher"]["variables"]["defaultFileNameVar"]["value"] = parent["defaultFileName"] """), "python") # Wedge based on tasks into the overall number of tasks to run. Note that we don't know how # much work each task will do until we actually run the render tasks ( this is when scene # expansion happens ). Because we must group all tasks that write to the same file into the # same task batch, if tasks is a large number, some tasks batches could end up empty self["__MainWedge"] = GafferDispatch.Wedge() self["__MainWedge"]["preTasks"][0].setInput( self["__ImageDispatcher"]["task"]) self["__MainWedge"]["variable"].setValue("BAKE_WEDGE:value_unused") self["__MainWedge"]["indexVariable"].setValue("BAKE_WEDGE:index") self["__MainWedge"]["mode"].setValue(1) self["__MainWedge"]["intMin"].setValue(1) self["__MainWedge"]["intMax"].setInput(self["tasks"]) self["task"].setInput(self["__MainWedge"]["task"]) self["task"].setFlags(Gaffer.Plug.Flags.Serialisable, False) # Now set up the render tasks. This involves doing the actual rendering, and triggering the # output of the file list index file. # First get rid of options from the upstream scene that could mess up the bake self["__OptionOverrides"] = GafferScene.StandardOptions() self["__OptionOverrides"]["in"].setInput(self["in"]) self["__OptionOverrides"]["options"]["pixelAspectRatio"][ "enabled"].setValue(True) self["__OptionOverrides"]["options"]["resolutionMultiplier"][ "enabled"].setValue(True) self["__OptionOverrides"]["options"]["overscan"]["enabled"].setValue( True) self["__OptionOverrides"]["options"]["renderCropWindow"][ "enabled"].setValue(True) self["__OptionOverrides"]["options"]["cameraBlur"]["enabled"].setValue( True) self["__OptionOverrides"]["options"]["transformBlur"][ "enabled"].setValue(True) self["__OptionOverrides"]["options"]["deformationBlur"][ "enabled"].setValue(True) self["__CameraSetup"] = self.__CameraSetup() self["__CameraSetup"]["in"].setInput(self["__OptionOverrides"]["out"]) self["__CameraSetup"]["filter"].setInput(self["filter"]) self["__CameraSetup"]["defaultFileName"].setInput( self["defaultFileName"]) self["__CameraSetup"]["defaultResolution"].setInput( self["defaultResolution"]) self["__CameraSetup"]["uvSet"].setInput(self["uvSet"]) self["__CameraSetup"]["aovs"].setInput(self["aovs"]) self["__CameraSetup"]["normalOffset"].setInput(self["normalOffset"]) self["__CameraSetup"]["tasks"].setInput(self["tasks"]) self["__Expression"] = Gaffer.Expression() self["__Expression"].setExpression( 'parent["__CameraSetup"]["taskIndex"] = context.get( "BAKE_WEDGE:index", 0 )', "python") self["__indexFilePath"] = Gaffer.StringPlug() self["__indexFilePath"].setFlags(Gaffer.Plug.Flags.Serialisable, False) self["__IndexFileExpression"] = Gaffer.Expression() self["__IndexFileExpression"].setExpression( inspect.cleandoc(""" import os parent["__indexFilePath"] = os.path.join( parent["bakeDirectory"], "BAKE_FILE_INDEX_" + str( context.get("BAKE_WEDGE:index", 0 ) ) + ".####.txt" ) """), "python") self["__outputIndexCommand"] = Gaffer.PythonCommand() self["__outputIndexCommand"]["variables"].addChild( Gaffer.NameValuePlug("bakeDirectory", Gaffer.StringPlug())) self["__outputIndexCommand"]["variables"][0]["value"].setInput( self["bakeDirectory"]) self["__outputIndexCommand"]["variables"].addChild( Gaffer.NameValuePlug("indexFilePath", Gaffer.StringPlug())) self["__outputIndexCommand"]["variables"][1]["value"].setInput( self["__indexFilePath"]) self["__outputIndexCommand"]["variables"].addChild( Gaffer.NameValuePlug( "fileList", Gaffer.StringVectorDataPlug( defaultValue=IECore.StringVectorData()))) self["__outputIndexCommand"]["variables"][2]["value"].setInput( self["__CameraSetup"]["renderFileList"]) self["__outputIndexCommand"]["command"].setValue( inspect.cleandoc(""" import os import distutils.dir_util # Ensure path exists distutils.dir_util.mkpath( variables["bakeDirectory"] ) f = open( variables["indexFilePath"], "w" ) f.writelines( [ i + "\\n" for i in sorted( variables["fileList"] ) ] ) f.close() IECore.msg( IECore.MessageHandler.Level.Info, "Bake Process", "Wrote list of bake files for this chunk to " + variables["indexFilePath"] ) """)) self["__arnoldRender"] = GafferArnold.ArnoldRender() self["__arnoldRender"]["preTasks"][0].setInput( self["__outputIndexCommand"]["task"]) self["__arnoldRender"]["dispatcher"]["immediate"].setValue(True) self["__arnoldRender"]["in"].setInput(self["__CameraSetup"]["out"]) self["__bakeDirectoryContext"] = GafferDispatch.TaskContextVariables() self["__bakeDirectoryContext"]["variables"].addChild( Gaffer.NameValuePlug("bakeDirectory", Gaffer.StringPlug())) self["__bakeDirectoryContext"]["variables"][0]["value"].setInput( self["bakeDirectory"]) self["__bakeDirectoryContext"]["preTasks"][0].setInput( self["__arnoldRender"]["task"]) # Now set up the image tasks. This involves merging all layers for a UDIM, filling in the # background, writing out this image, converting it to tx, and optionally deleting all the exrs self["__imageList"] = Gaffer.CompoundObjectPlug( "__imageList", defaultValue=IECore.CompoundObject()) self["__imageList"].setFlags(Gaffer.Plug.Flags.Serialisable, False) self["__ImageReader"] = GafferImage.ImageReader() self["__CurInputFileExpression"] = Gaffer.Expression() self["__CurInputFileExpression"].setExpression( inspect.cleandoc(""" l = parent["__imageList"] outFile = context["wedge:outFile"] loopIndex = context[ "loop:index" ] parent["__ImageReader"]["fileName"] = l[outFile][ loopIndex ] """), "python") # Find the max size of any input file self["__SizeLoop"] = Gaffer.LoopComputeNode() self["__SizeLoop"].setup(Gaffer.IntPlug()) self["__SizeMaxExpression"] = Gaffer.Expression() self["__SizeMaxExpression"].setExpression( inspect.cleandoc(""" f = parent["__ImageReader"]["out"]["format"] parent["__SizeLoop"]["next"] = max( f.width(), parent["__SizeLoop"]["previous"] ) """), "python") # Loop over all input files for this output file, and merge them all together self["__ImageLoop"] = Gaffer.LoopComputeNode() self["__ImageLoop"].setup(GafferImage.ImagePlug()) self["__NumInputsForCurOutputExpression"] = Gaffer.Expression() self["__NumInputsForCurOutputExpression"].setExpression( inspect.cleandoc(""" l = parent["__imageList"] outFile = context["wedge:outFile"] numInputs = len( l[outFile] ) parent["__ImageLoop"]["iterations"] = numInputs parent["__SizeLoop"]["iterations"] = numInputs """), "python") self["__Resize"] = GafferImage.Resize() self["__Resize"]["format"]["displayWindow"]["min"].setValue( imath.V2i(0, 0)) self["__Resize"]['format']["displayWindow"]["max"]["x"].setInput( self["__SizeLoop"]["out"]) self["__Resize"]['format']["displayWindow"]["max"]["y"].setInput( self["__SizeLoop"]["out"]) self["__Resize"]['in'].setInput(self["__ImageReader"]["out"]) self["__Merge"] = GafferImage.Merge() self["__Merge"]["in"][0].setInput(self["__Resize"]["out"]) self["__Merge"]["in"][1].setInput(self["__ImageLoop"]["previous"]) self["__Merge"]["operation"].setValue(GafferImage.Merge.Operation.Add) self["__ImageLoop"]["next"].setInput(self["__Merge"]["out"]) # Write out the combined image, so we can immediately read it back in # This is just because we're doing enough image processing that we # could saturate the cache, and Gaffer wouldn't know that this is # the important result to keep self["__ImageIntermediateWriter"] = GafferImage.ImageWriter() self["__ImageIntermediateWriter"]["in"].setInput( self["__ImageLoop"]["out"]) self["__ImageIntermediateReader"] = GafferImage.ImageReader() # Now that we've merged everything together, we can use a BleedFill to fill in the background, # so that texture filtering across the edges will pull in colors that are at least reasonable. self["__BleedFill"] = GafferImage.BleedFill() self["__BleedFill"]["in"].setInput( self["__ImageIntermediateReader"]["out"]) self["__Median"] = GafferImage.Median() self["__Median"]["in"].setInput(self["__BleedFill"]["out"]) self["__Median"]["enabled"].setInput(self["applyMedianFilter"]) self["__Median"]["radius"]["x"].setInput(self["medianRadius"]) self["__Median"]["radius"]["y"].setInput(self["medianRadius"]) # Write out the result self["__ImageWriter"] = GafferImage.ImageWriter() self["__ImageWriter"]["in"].setInput(self["__Median"]["out"]) self["__ImageWriter"]["preTasks"][0].setInput( self["__ImageIntermediateWriter"]["task"]) # Convert result to texture self["__ConvertCommand"] = GafferDispatch.SystemCommand() # We shouldn't need a sub-shell and this prevents S.I.P on the Mac from # blocking the dylibs loaded by maketx. self["__ConvertCommand"]["shell"].setValue(False) self["__ConvertCommand"]["substitutions"].addChild( Gaffer.NameValuePlug("inFile", IECore.StringData(), "member1")) self["__ConvertCommand"]["substitutions"].addChild( Gaffer.NameValuePlug("outFile", IECore.StringData(), "member1")) self["__ConvertCommand"]["preTasks"][0].setInput( self["__ImageWriter"]["task"]) self["__ConvertCommand"]["command"].setValue( 'maketx --wrap clamp {inFile} -o {outFile}') self["__CommandSetupExpression"] = Gaffer.Expression() self["__CommandSetupExpression"].setExpression( inspect.cleandoc(""" outFileBase = context["wedge:outFile"] intermediateExr = outFileBase + ".intermediate.exr" parent["__ImageIntermediateWriter"]["fileName"] = intermediateExr parent["__ImageIntermediateReader"]["fileName"] = intermediateExr tmpExr = outFileBase + ".tmp.exr" parent["__ImageWriter"]["fileName"] = tmpExr parent["__ConvertCommand"]["substitutions"]["member1"]["value"] = tmpExr parent["__ConvertCommand"]["substitutions"]["member2"]["value"] = outFileBase + ".tx" """), "python") self["__ImageWedge"] = GafferDispatch.Wedge() self["__ImageWedge"]["preTasks"][0].setInput( self["__ConvertCommand"]["task"]) self["__ImageWedge"]["variable"].setValue('wedge:outFile') self["__ImageWedge"]["indexVariable"].setValue('wedge:outFileIndex') self["__ImageWedge"]["mode"].setValue(int( Gaffer.Wedge.Mode.StringList)) self["__CleanUpCommand"] = GafferDispatch.PythonCommand() self["__CleanUpCommand"]["preTasks"][0].setInput( self["__ImageWedge"]["task"]) self["__CleanUpCommand"]["variables"].addChild( Gaffer.NameValuePlug( "filesToDelete", Gaffer.StringVectorDataPlug( defaultValue=IECore.StringVectorData()), "member1")) self["__CleanUpCommand"]["command"].setValue( inspect.cleandoc(""" import os for tmpFile in variables["filesToDelete"]: os.remove( tmpFile ) """)) self["__CleanUpExpression"] = Gaffer.Expression() self["__CleanUpExpression"].setExpression( inspect.cleandoc(""" imageList = parent["__imageList"] toDelete = [] for outFileBase, inputExrs in imageList.items(): tmpExr = outFileBase + ".tmp.exr" intermediateExr = outFileBase + ".intermediate.exr" toDelete.extend( inputExrs ) toDelete.append( tmpExr ) toDelete.append( intermediateExr ) toDelete.append( parent["__indexFilePath"] ) parent["__CleanUpCommand"]["variables"]["member1"]["value"] = IECore.StringVectorData( toDelete ) """), "python") self["__CleanUpSwitch"] = GafferDispatch.TaskSwitch() self["__CleanUpSwitch"]["preTasks"][0].setInput( self["__ImageWedge"]["task"]) self["__CleanUpSwitch"]["preTasks"][1].setInput( self["__CleanUpCommand"]["task"]) self["__CleanUpSwitch"]["index"].setInput( self["cleanupIntermediateFiles"]) # Set up the list of input image files to process, and the corresponding list of # output files to wedge over self["__ImageSetupExpression"] = Gaffer.Expression() self["__ImageSetupExpression"].setExpression( inspect.cleandoc(""" f = open( parent["__indexFilePath"], "r" ) fileList = f.read().splitlines() fileDict = {} for i in fileList: rootName = i.rsplit( ".exr", 1 )[0] if rootName in fileDict: fileDict[ rootName ].append( i ) else: fileDict[ rootName ] = IECore.StringVectorData( [i] ) parent["__imageList"] = IECore.CompoundObject( fileDict ) parent["__ImageWedge"]["strings"] = IECore.StringVectorData( fileDict.keys() ) """), "python")