def __init__(self, name="SphereOrCube"): GafferScene.SceneNode.__init__(self, name) self["type"] = Gaffer.IntPlug( defaultValue=int(self.Type.Sphere), minValue=int(self.Type.Sphere), maxValue=int(self.Type.Cube), ) self["__sphere"] = GafferScene.Sphere() self["__sphere"]["enabled"].setInput(self["enabled"]) self["__cube"] = GafferScene.Cube() self["__cube"]["enabled"].setInput(self["enabled"]) self["__primitiveSwitch"] = GafferScene.SceneSwitch() self["__primitiveSwitch"]["index"].setInput(self["type"]) self["__primitiveSwitch"]["in"][0].setInput( self["__sphere"]["out"]) self["__primitiveSwitch"]["in"][1].setInput( self["__cube"]["out"]) self["out"].setInput(self["__primitiveSwitch"]["out"])
def __visitationGraph( self ) : # L1_1 L1_2 # | |\ # | | \ # | | \ # L2_1 L2_2 L2_3 # |\ | / # | \ | / # | \ | / # | \ |/ # L3_1 L3_2 s = Gaffer.ScriptNode() s["L1_1"] = GafferTest.MultiplyNode() s["L1_2"] = GafferTest.AddNode() s["L2_1"] = GafferTest.AddNode() s["L2_2"] = GafferTest.MultiplyNode() s["L2_3"] = GafferTest.AddNode() s["L3_1"] = GafferTest.AddNode() s["L3_2"] = GafferTest.MultiplyNode() s["L3_2"]["op3"] = Gaffer.IntPlug() s["L2_1"]["op1"].setInput( s["L1_1"]["product"] ) s["L2_2"]["op1"].setInput( s["L1_2"]["sum"] ) s["L2_3"]["op1"].setInput( s["L1_2"]["sum"] ) s["L3_1"]["op1"].setInput( s["L2_1"]["sum"] ) s["L3_2"]["op1"].setInput( s["L2_1"]["sum"] ) s["L3_2"]["op2"].setInput( s["L2_2"]["product"] ) s["L3_2"]["op3"].setInput( s["L2_3"]["sum"] ) return s
def testUndoAndRedoOrder(self): s = Gaffer.ScriptNode() s["n"] = Gaffer.Node() s["n"]["p"] = Gaffer.IntPlug() values = [] def f(plug): values.append(plug.getValue()) c = s["n"].plugSetSignal().connect(f) with Gaffer.UndoScope(s): s["n"]["p"].setValue(10) s["n"]["p"].setValue(20) self.assertEqual(values, [10, 20]) s.undo() self.assertEqual(values, [10, 20, 10, 0]) s.redo() self.assertEqual(values, [10, 20, 10, 0, 10, 20])
def __init__( self, name = "_ObjectPreview" ) : Gaffer.Node.__init__( self, name ) self["fileName"] = Gaffer.StringPlug( defaultValue = "", substitutions = Gaffer.Context.Substitutions.NoSubstitutions ) self["frameRate"] = Gaffer.FloatPlug( defaultValue = 24.0 ) self["samplesPerFrame"] = Gaffer.IntPlug( defaultValue = 1, minValue = 1 ) # single object scenes using Reader ops behind the scenes? self["ObjectReader"] = Gaffer.ObjectReader() self["ObjectReaderExpression"] = Gaffer.Expression( "Expression" ) self["ObjectReaderExpression"].setExpression( ''' import IECore fileName = parent['fileName'] try : sequence = IECore.FileSequence( fileName ) calc = IECore.OversamplesCalculator( frameRate = parent["frameRate"], samplesPerFrame = parent["samplesPerFrame"] ) if isinstance( sequence.frameList, IECore.FrameRange ) and sequence.frameList.step == 1 : calc.setTicksPerSecond( 24 ) result = sequence.fileNameForFrame( calc.framesToTicks( context['frame'] ) ) except : result = fileName parent['ObjectReader']['fileName'] = result ''' ) self["ObjectToScene"] = GafferScene.ObjectToScene( "ObjectToScene" ) self["ObjectToScene"]["object"].setInput( self["ObjectReader"]["out"] ) self["out"] = GafferScene.ScenePlug( direction = Gaffer.Plug.Direction.Out ) self["out"].setInput( self["ObjectToScene"]["out"] )
def testReloadDoesntRemoveCustomPlugs(self): # plugs unrelated to referencing shouldn't disappear when a reference is # reloaded. various parts of the ui might be using them for other purposes. s = Gaffer.ScriptNode() s["n1"] = GafferTest.AddNode() s["n2"] = GafferTest.AddNode() s["n2"]["op1"].setInput(s["n1"]["sum"]) b = Gaffer.Box.create(s, Gaffer.StandardSet([s["n1"]])) b.exportForReference("/tmp/test.grf") s2 = Gaffer.ScriptNode() s2["r"] = Gaffer.Reference() s2["r"].load("/tmp/test.grf") s2["r"]["mySpecialPlug"] = Gaffer.IntPlug( flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic) s2["r"].load("/tmp/test.grf") self.assertTrue("mySpecialPlug" in s2["r"])
def testCanAnimate( self ) : s = Gaffer.ScriptNode() s["n"] = Gaffer.Node() s["n"]["user"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) s["n"]["user"]["i"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) s["n"]["user"]["b"] = Gaffer.BoolPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) s["n"]["user"]["s"] = Gaffer.StringPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) self.assertTrue( Gaffer.Animation.canAnimate( s["n"]["user"]["f"] ) ) self.assertTrue( Gaffer.Animation.canAnimate( s["n"]["user"]["i"] ) ) self.assertTrue( Gaffer.Animation.canAnimate( s["n"]["user"]["b"] ) ) self.assertFalse( Gaffer.Animation.canAnimate( s["n"]["user"]["s"] ) ) # Can't key because it has an input. s["n"]["user"]["f"].setInput( s["n"]["user"]["i"] ) self.assertFalse( Gaffer.Animation.canAnimate( s["n"]["user"]["f"] ) ) # Can't key because there's no parent where we can # put the Animation node. n = Gaffer.Node() n["user"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) self.assertFalse( Gaffer.Animation.canAnimate( n["user"]["f"] ) )
def testDefaultValue( self ) : p = Gaffer.IntPlug( "p", Gaffer.Plug.Direction.In, 10, 0, 20 ) self.assertEqual( p.defaultValue(), 10 ) self.assertEqual( p.getValue(), 10 ) p.setValue( 5 ) self.assertEqual( p.getValue(), 5 ) self.assertEqual( p.defaultValue(), 10 ) p.setToDefault() self.assertEqual( p.defaultValue(), 10 ) self.assertEqual( p.getValue(), 10 ) p.setValue( 5 ) self.assertEqual( p.getValue(), 5 ) self.assertEqual( p.defaultValue(), 10 )
def __init__(self, name="Wedge"): GafferDispatch.TaskContextProcessor.__init__(self, name) self["variable"] = Gaffer.StringPlug(defaultValue="wedge:value") self["indexVariable"] = Gaffer.StringPlug(defaultValue="wedge:index") self["mode"] = Gaffer.IntPlug( defaultValue=int(self.Mode.FloatRange), minValue=int(self.Mode.FloatRange), maxValue=int(self.Mode.StringList), ) # float range self["floatMin"] = Gaffer.FloatPlug(defaultValue=0) self["floatMax"] = Gaffer.FloatPlug(defaultValue=1) self["floatSteps"] = Gaffer.IntPlug(minValue=2, defaultValue=11) # int range self["intMin"] = Gaffer.IntPlug(defaultValue=0) self["intMax"] = Gaffer.IntPlug(defaultValue=5) self["intStep"] = Gaffer.IntPlug(minValue=1, defaultValue=1) # color range self["ramp"] = Gaffer.SplinefColor3fPlug( defaultValue=Gaffer.SplineDefinitionfColor3f(( (0, imath.Color3f(0)), (1, imath.Color3f(1)), ), Gaffer.SplineDefinitionInterpolation.CatmullRom)) self["colorSteps"] = Gaffer.IntPlug(defaultValue=5, minValue=2) # lists self["floats"] = Gaffer.FloatVectorDataPlug( defaultValue=IECore.FloatVectorData()) self["ints"] = Gaffer.IntVectorDataPlug( defaultValue=IECore.IntVectorData()) self["strings"] = Gaffer.StringVectorDataPlug( defaultValue=IECore.StringVectorData())
def __init__( self, name = "InternalConnectionsNode" ) : Gaffer.Node.__init__( self, name ) self["in1"] = Gaffer.IntPlug() self["in2"] = Gaffer.IntPlug() self["__in"] = Gaffer.IntPlug() self["out1"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out ) self["out2"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out ) self["__out"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out ) self["out1"].setInput( self["in1"] ) self["__add"] = GafferTest.AddNode() self["__add"]["op1"].setInput( self["in2"] ) self["__add"]["op2"].setInput( self["__out"] ) self["__in"].setInput( self["__add"]["sum"] ) self["out2"].setInput( self["__add"]["sum"] )
def _addPlugs( self, despatcherPlug ) : despatcherPlug["testDespatcherPlug"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.In, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
def testAddColumnWithName(self): s = Gaffer.Spreadsheet() i = s["rows"].addColumn(Gaffer.IntPlug("x"), name="y") self.assertEqual(s["rows"].defaultRow()["cells"][0].getName(), "y") self.assertEqual(s["out"][0].getName(), "y")
def __init__(self, name="LayoutNode"): Gaffer.Node.__init__(self, name) self["top0"] = Gaffer.IntPlug() self["top1"] = Gaffer.IntPlug() self["top2"] = Gaffer.IntPlug() self["left0"] = Gaffer.IntPlug() self["left1"] = Gaffer.IntPlug() self["left2"] = Gaffer.IntPlug() self["bottom0"] = Gaffer.IntPlug(direction=Gaffer.Plug.Direction.Out) self["bottom1"] = Gaffer.IntPlug(direction=Gaffer.Plug.Direction.Out) self["bottom2"] = Gaffer.IntPlug(direction=Gaffer.Plug.Direction.Out) self["right0"] = Gaffer.IntPlug(direction=Gaffer.Plug.Direction.Out) self["right1"] = Gaffer.IntPlug(direction=Gaffer.Plug.Direction.Out) self["right2"] = Gaffer.IntPlug(direction=Gaffer.Plug.Direction.Out)
def testAcceptsNoneInput(self): p = Gaffer.IntPlug("hello") self.assertTrue(p.acceptsInput(None))
def __init__(self, name="__CameraSetup"): GafferScene.FilteredSceneProcessor.__init__(self, name) # Public plugs self["cameraGroup"] = Gaffer.StringPlug("cameraGroup", Gaffer.Plug.Direction.In, "__TEXTUREBAKE_CAMERAS") self["bakeDirectory"] = Gaffer.StringPlug("bakeDirectory", Gaffer.Plug.Direction.In, "") self["defaultFileName"] = Gaffer.StringPlug( "defaultFileName", Gaffer.Plug.Direction.In, "${bakeDirectory}/<AOV>/<AOV>.<UDIM>.exr") self["defaultResolution"] = Gaffer.IntPlug( "defaultResolution", Gaffer.Plug.Direction.In, 512) self["uvSet"] = Gaffer.StringPlug("uvSet", Gaffer.Plug.Direction.In, "uv") self["normalOffset"] = Gaffer.FloatPlug("normalOffset", Gaffer.Plug.Direction.In, 0.1) self["aovs"] = Gaffer.StringPlug("aovs", Gaffer.Plug.Direction.In, "beauty:rgba") self["tasks"] = Gaffer.IntPlug("tasks", Gaffer.Plug.Direction.In, 1) self["taskIndex"] = Gaffer.IntPlug("taskIndex", Gaffer.Plug.Direction.In, 0) # Output self["renderFileList"] = Gaffer.StringVectorDataPlug( "renderFileList", Gaffer.Plug.Direction.Out, defaultValue=IECore.StringVectorData()) self["renderFileList"].setFlags(Gaffer.Plug.Flags.Serialisable, False) # Private internal network self["__udimQuery"] = GafferScene.UDIMQuery() self["__udimQuery"]["in"].setInput(self["in"]) self["__udimQuery"]["uvSet"].setInput(self["uvSet"]) self["__udimQuery"]["attributes"].setValue( "bake:resolution bake:fileName") self["__udimQuery"]["filter"].setInput(self["filter"]) self["__chunkedBakeInfo"] = Gaffer.CompoundObjectPlug( "__chunkedBakeInfo", Gaffer.Plug.Direction.In, IECore.CompoundObject()) self["__chunkedBakeInfo"].setFlags(Gaffer.Plug.Flags.Serialisable, False) self["__chunkExpression"] = Gaffer.Expression() self["__chunkExpression"].setExpression( inspect.cleandoc(""" # Locate the next point in the list of files to bake where we can split the list into chunks without # seperating two files that need to get combined into the same texture def nextChunkBreak( i, l ): while i > 0 and i < len( l ) and ( l[i - 1].get("udim") == l[i].get("udim") and l[i - 1].get("fileName") == l[i].get("fileName") ): i += 1 return i rawInfo = parent["__udimQuery"]["out"] defaultFileName = parent["defaultFileName"] defaultResolution = parent["defaultResolution"] listInfo = [] for udim, meshes in rawInfo.items(): for mesh, extraAttributes in meshes.items(): resolution = defaultResolution if "bake:resolution" in extraAttributes: resolution = extraAttributes["bake:resolution"].value fileName = defaultFileName if "bake:fileName" in extraAttributes: fileName = extraAttributes["bake:fileName"].value listInfo.append( { "udim" : int( udim ), "mesh" : mesh, "resolution" : resolution, "fileName" : fileName } ) listInfo.sort( key = lambda i: (i["fileName"], i["udim"] ) ) info = IECore.CompoundObject() numTasks = parent["tasks"] taskIndex = parent["taskIndex"] chunkStart = nextChunkBreak( ( taskIndex * len( listInfo ) ) / numTasks, listInfo ) chunkEnd = nextChunkBreak( ( ( taskIndex + 1 ) * len( listInfo ) ) / numTasks, listInfo ) dupeCount = 0 prevFileName = "" for i in listInfo[chunkStart:chunkEnd]: o = IECore.CompoundObject() o["mesh"] = IECore.StringData( i["mesh"] ) o["udim"] = IECore.IntData( i["udim"] ) o["resolution"] = IECore.IntData( i["resolution"] ) udimStr = str( i["udim"] ) fileName = i["fileName"].replace( "<UDIM>", udimStr ) if fileName == prevFileName: dupeCount += 1 fileName = fileName + ".layer" + str( dupeCount ) else: prevFileName = fileName dupeCount = 0 o["fileName"] = IECore.StringData( fileName ) name = o["mesh"].value.replace( "/", "_" ) + "." + udimStr info[ name ] = o parent["__chunkedBakeInfo"] = info fileList = [] for name, i in info.items(): fileName = i["fileName"].value for nameAndAov in parent["aovs"].strip( " " ).split( " " ): fileList.append( i["fileName"].value.replace( "<AOV>", nameAndAov.split(":")[0] ) ) parent["renderFileList"] = IECore.StringVectorData( fileList ) """), "python") self["__parent"] = GafferScene.Parent() self["__parent"]["parent"].setValue("/") for c in [ 'bound', 'transform', 'attributes', 'object', 'childNames', 'setNames', 'set' ]: self["__parent"]["in"][c].setInput(self["in"][c]) self["__outputExpression"] = Gaffer.Expression() self["__outputExpression"].setExpression( inspect.cleandoc(""" import IECoreScene # Transfer all input globals except for outputs inGlobals = parent["in"]["globals"] outGlobals = IECore.CompoundObject() for key, value in inGlobals.items(): if not key.startswith( "output:" ): outGlobals[key] = value # Make our own outputs info = parent["__chunkedBakeInfo"] for cameraName, i in info.items(): params = IECore.CompoundData() fileName = i["fileName"].value params["camera"] = IECore.StringData( "/" + parent["cameraGroup"] + "/" + cameraName ) for nameAndAov in parent["aovs"].strip( " " ).split( " " ): tokens = nameAndAov.split( ":" ) if len( tokens ) != 2: raise RuntimeError( "Invalid bake aov specification: %s It should contain a : between name and data." ) ( aovName, aov ) = tokens aovFileName = fileName.replace( "<AOV>", aovName ) outGlobals["output:" + cameraName + "." + aov] = IECoreScene.Output( aovFileName, "exr", aov + " RGBA", params ) parent["__parent"]["in"]["globals"] = outGlobals """), "python") self["__camera"] = GafferScene.Camera() self["__camera"]["projection"].setValue("orthographic") self["__cameraTweaks"] = GafferScene.CameraTweaks() self["__cameraTweaks"]["in"].setInput(self["__camera"]["out"]) self["__cameraTweaks"]["tweaks"][ "projection"] = GafferScene.TweakPlug("projection", "uv_camera") self["__cameraTweaks"]["tweaks"][ "resolution"] = GafferScene.TweakPlug("resolution", imath.V2i(0)) self["__cameraTweaks"]["tweaks"][ "u_offset"] = GafferScene.TweakPlug("u_offset", 0.0) self["__cameraTweaks"]["tweaks"][ "v_offset"] = GafferScene.TweakPlug("v_offset", 0.0) self["__cameraTweaks"]["tweaks"]["mesh"] = GafferScene.TweakPlug( "mesh", "") self["__cameraTweaks"]["tweaks"]["uv_set"] = GafferScene.TweakPlug( "uv_set", "") self["__cameraTweaks"]["tweaks"][ "extend_edges"] = GafferScene.TweakPlug("extend_edges", False) self["__cameraTweaks"]["tweaks"]["offset"] = GafferScene.TweakPlug( "offset", 0.1) self["__cameraTweaks"]["tweaks"]["offset"]["value"].setInput( self["normalOffset"]) self["__cameraTweaksFilter"] = GafferScene.PathFilter() self["__cameraTweaksFilter"]["paths"].setValue( IECore.StringVectorData(['/camera'])) self["__cameraTweaks"]["filter"].setInput( self["__cameraTweaksFilter"]["out"]) self["__collectScenes"] = GafferScene.CollectScenes() self["__collectScenes"]["sourceRoot"].setValue("/camera") self["__collectScenes"]["rootNameVariable"].setValue( "collect:cameraName") self["__collectScenes"]["in"].setInput( self["__cameraTweaks"]["out"]) self["__group"] = GafferScene.Group() self["__group"]["in"][0].setInput(self["__collectScenes"]["out"]) self["__group"]["name"].setInput(self["cameraGroup"]) self["__parent"]["children"][0].setInput(self["__group"]["out"]) self["__collectSceneRootsExpression"] = Gaffer.Expression() self["__collectSceneRootsExpression"].setExpression( inspect.cleandoc(""" info = parent["__chunkedBakeInfo"] parent["__collectScenes"]["rootNames"] = IECore.StringVectorData( info.keys() ) """), "python") self["__cameraSetupExpression"] = Gaffer.Expression() self["__cameraSetupExpression"].setExpression( inspect.cleandoc(""" cameraName = context["collect:cameraName"] info = parent["__chunkedBakeInfo"] i = info[cameraName] udimOffset = i["udim"].value - 1001 parent["__cameraTweaks"]["tweaks"]["resolution"]["value"] = imath.V2i( i["resolution"].value ) parent["__cameraTweaks"]["tweaks"]["u_offset"]["value"] = -( udimOffset % 10 ) parent["__cameraTweaks"]["tweaks"]["v_offset"]["value"] = -( udimOffset / 10 ) parent["__cameraTweaks"]["tweaks"]["mesh"]["value"] = i["mesh"].value parent["__cameraTweaks"]["tweaks"]["uv_set"]["value"] = parent["uvSet"] if parent["uvSet"] != "uv" else "" """), "python") self["out"].setFlags(Gaffer.Plug.Flags.Serialisable, False) self["out"].setInput(self["__parent"]["out"])
def __init__( self, name = "TaskSwitch" ) : GafferDispatch.ExecutableNode.__init__( self, name ) self["index"] = Gaffer.IntPlug( minValue = 0 )
def __init__( self, name = "MetadataTestNodeD" ) : Gaffer.Node.__init__( self, name ) self["a"] = Gaffer.IntPlug() self["b"] = Gaffer.IntPlug()
def testDerivingInPython(self): class TestPlug(Gaffer.Plug): def __init__(self, name="TestPlug", direction=Gaffer.Plug.Direction.In, flags=Gaffer.Plug.Flags.Default): Gaffer.Plug.__init__(self, name, direction, flags) self.inputHasBeenSet = False def acceptsInput(self, plug): if not Gaffer.Plug.acceptsInput(self, plug): return False return isinstance(plug, (TestPlug, type(None))) def setInput(self, plug): Gaffer.Plug.setInput(self, plug) self.inputHasBeenSet = True def acceptsParent(self, potentialParent): if not Gaffer.Plug.acceptsParent(self, potentialParent): return False if isinstance(potentialParent, Gaffer.CompoundPlug): return False return True def createCounterpart(self, name, direction): return TestPlug(name, direction, self.getFlags()) IECore.registerRunTimeTyped(TestPlug) # check the constructor p1 = TestPlug("testIn") self.assertEqual(p1.getName(), "testIn") self.assertEqual(p1.direction(), Gaffer.Plug.Direction.In) self.assertEqual(p1.getFlags(), Gaffer.Plug.Flags.Default) n1 = Gaffer.Node() n1.addChild(p1) self.assertEqual(n1["testIn"], p1) n2 = Gaffer.Node() n2.addChild( TestPlug(name="testOut", direction=Gaffer.Plug.Direction.Out)) n2.addChild( Gaffer.IntPlug(name="intOut", direction=Gaffer.Plug.Direction.Out)) # check that accepts input and setInput can be overridden self.failUnless(n1["testIn"].acceptsInput(n2["testOut"])) self.failIf(n1["testIn"].acceptsInput(n2["intOut"])) self.assertRaises(RuntimeError, n1["testIn"].setInput, n2["intOut"]) self.assertEqual(n1["testIn"].inputHasBeenSet, False) n1["testIn"].setInput(n2["testOut"]) self.assertEqual(n1["testIn"].inputHasBeenSet, True) self.assertEqual(n1["testIn"].getInput(), n2["testOut"]) # check that acceptsParent can be overridden p2 = TestPlug() self.assertRaises(RuntimeError, Gaffer.CompoundPlug().addChild, p2) # try making a counterpart p3 = p2.createCounterpart("ll", Gaffer.Plug.Direction.Out) self.assertEqual(p3.getName(), "ll") self.assertEqual(p3.direction(), Gaffer.Plug.Direction.Out)
def createProcessor(): n = Gaffer.TimeWarp() n.setup(Gaffer.IntPlug()) return n
def __init__(self, name="ArnoldTextureBake"): GafferDispatch.TaskNode.__init__(self, name) self["in"] = GafferScene.ScenePlug() self["filter"] = GafferScene.FilterPlug() self["bakeDirectory"] = Gaffer.StringPlug("bakeDirectory", defaultValue="") self["defaultFileName"] = Gaffer.StringPlug( "defaultFileName", defaultValue="${bakeDirectory}/<AOV>/<AOV>.<UDIM>.exr") self["defaultResolution"] = Gaffer.IntPlug("defaultResolution", defaultValue=512) self["uvSet"] = Gaffer.StringPlug("uvSet", defaultValue='uv') self["normalOffset"] = Gaffer.FloatPlug("offset", defaultValue=0.1) self["aovs"] = Gaffer.StringPlug("aovs", defaultValue='beauty:RGBA') self["tasks"] = Gaffer.IntPlug("tasks", defaultValue=1) self["cleanupIntermediateFiles"] = Gaffer.BoolPlug( "cleanupIntermediateFiles", defaultValue=True) self["applyMedianFilter"] = Gaffer.BoolPlug("applyMedianFilter", Gaffer.Plug.Direction.In, False) self["medianRadius"] = Gaffer.IntPlug("medianRadius", Gaffer.Plug.Direction.In, 1) # Set up connection to preTasks beforehand self["__PreTaskList"] = GafferDispatch.TaskList() self["__PreTaskList"]["preTasks"].setInput(self["preTasks"]) self["__CleanPreTasks"] = Gaffer.DeleteContextVariables() self["__CleanPreTasks"].setup(GafferDispatch.TaskNode.TaskPlug()) self["__CleanPreTasks"]["in"].setInput(self["__PreTaskList"]["task"]) self["__CleanPreTasks"]["variables"].setValue( "BAKE_WEDGE:index BAKE_WEDGE:value_unused") # First, setup python commands which will dispatch a chunk of a render or image tasks as # immediate execution once they reach the farm - this allows us to run multiple tasks in # one farm process. self["__RenderDispatcher"] = GafferDispatch.PythonCommand() self["__RenderDispatcher"]["preTasks"][0].setInput( self["__CleanPreTasks"]["out"]) self["__RenderDispatcher"]["command"].setValue( inspect.cleandoc(""" import GafferDispatch # We need to access frame and "BAKE_WEDGE:index" so that the hash of render varies with the wedge index, # so we might as well print what we're doing IECore.msg( IECore.MessageHandler.Level.Info, "Bake Process", "Dispatching render task index %i for frame %i" % ( context["BAKE_WEDGE:index"], context.getFrame() ) ) d = GafferDispatch.LocalDispatcher() d.dispatch( [ self.parent()["__bakeDirectoryContext"] ] ) """)) self["__ImageDispatcher"] = GafferDispatch.PythonCommand() self["__ImageDispatcher"]["preTasks"][0].setInput( self["__RenderDispatcher"]["task"]) self["__ImageDispatcher"]["command"].setValue( inspect.cleandoc(""" import GafferDispatch # We need to access frame and "BAKE_WEDGE:index" so that the hash of render varies with the wedge index, # so we might as well print what we're doing IECore.msg( IECore.MessageHandler.Level.Info, "Bake Process", "Dispatching image task index %i for frame %i" % ( context["BAKE_WEDGE:index"], context.getFrame() ) ) d = GafferDispatch.LocalDispatcher() d.dispatch( [ self.parent()["__CleanUpSwitch"] ] ) """)) # Connect through the dispatch settings to the render dispatcher # ( The image dispatcher runs much quicker, and should be OK using default settings ) self["__RenderDispatcher"]["dispatcher"].setInput(self["dispatcher"]) # Set up variables so the dispatcher knows that the render and image dispatches depend on # the file paths ( in case they are varying in a wedge ) for redispatch in [ self["__RenderDispatcher"], self["__ImageDispatcher"] ]: redispatch["variables"].addChild( Gaffer.NameValuePlug("bakeDirectory", "", "bakeDirectoryVar")) redispatch["variables"].addChild( Gaffer.NameValuePlug("defaultFileName", "", "defaultFileNameVar")) # Connect the variables via an expression so that get expanded ( this also means that # if you put #### in a filename you will get per frame tasks, because the hash will depend # on frame number ) self["__DispatchVariableExpression"] = Gaffer.Expression() self["__DispatchVariableExpression"].setExpression( inspect.cleandoc(""" parent["__RenderDispatcher"]["variables"]["bakeDirectoryVar"]["value"] = parent["bakeDirectory"] parent["__RenderDispatcher"]["variables"]["defaultFileNameVar"]["value"] = parent["defaultFileName"] parent["__ImageDispatcher"]["variables"]["bakeDirectoryVar"]["value"] = parent["bakeDirectory"] parent["__ImageDispatcher"]["variables"]["defaultFileNameVar"]["value"] = parent["defaultFileName"] """), "python") # Wedge based on tasks into the overall number of tasks to run. Note that we don't know how # much work each task will do until we actually run the render tasks ( this is when scene # expansion happens ). Because we must group all tasks that write to the same file into the # same task batch, if tasks is a large number, some tasks batches could end up empty self["__MainWedge"] = GafferDispatch.Wedge() self["__MainWedge"]["preTasks"][0].setInput( self["__ImageDispatcher"]["task"]) self["__MainWedge"]["variable"].setValue("BAKE_WEDGE:value_unused") self["__MainWedge"]["indexVariable"].setValue("BAKE_WEDGE:index") self["__MainWedge"]["mode"].setValue(1) self["__MainWedge"]["intMin"].setValue(1) self["__MainWedge"]["intMax"].setInput(self["tasks"]) self["task"].setInput(self["__MainWedge"]["task"]) self["task"].setFlags(Gaffer.Plug.Flags.Serialisable, False) # Now set up the render tasks. This involves doing the actual rendering, and triggering the # output of the file list index file. # First get rid of options from the upstream scene that could mess up the bake self["__OptionOverrides"] = GafferScene.StandardOptions() self["__OptionOverrides"]["in"].setInput(self["in"]) self["__OptionOverrides"]["options"]["pixelAspectRatio"][ "enabled"].setValue(True) self["__OptionOverrides"]["options"]["resolutionMultiplier"][ "enabled"].setValue(True) self["__OptionOverrides"]["options"]["overscan"]["enabled"].setValue( True) self["__OptionOverrides"]["options"]["renderCropWindow"][ "enabled"].setValue(True) self["__OptionOverrides"]["options"]["cameraBlur"]["enabled"].setValue( True) self["__OptionOverrides"]["options"]["transformBlur"][ "enabled"].setValue(True) self["__OptionOverrides"]["options"]["deformationBlur"][ "enabled"].setValue(True) self["__CameraSetup"] = self.__CameraSetup() self["__CameraSetup"]["in"].setInput(self["__OptionOverrides"]["out"]) self["__CameraSetup"]["filter"].setInput(self["filter"]) self["__CameraSetup"]["defaultFileName"].setInput( self["defaultFileName"]) self["__CameraSetup"]["defaultResolution"].setInput( self["defaultResolution"]) self["__CameraSetup"]["uvSet"].setInput(self["uvSet"]) self["__CameraSetup"]["aovs"].setInput(self["aovs"]) self["__CameraSetup"]["normalOffset"].setInput(self["normalOffset"]) self["__CameraSetup"]["tasks"].setInput(self["tasks"]) self["__Expression"] = Gaffer.Expression() self["__Expression"].setExpression( 'parent["__CameraSetup"]["taskIndex"] = context.get( "BAKE_WEDGE:index", 0 )', "python") self["__indexFilePath"] = Gaffer.StringPlug() self["__indexFilePath"].setFlags(Gaffer.Plug.Flags.Serialisable, False) self["__IndexFileExpression"] = Gaffer.Expression() self["__IndexFileExpression"].setExpression( inspect.cleandoc(""" import os parent["__indexFilePath"] = os.path.join( parent["bakeDirectory"], "BAKE_FILE_INDEX_" + str( context.get("BAKE_WEDGE:index", 0 ) ) + ".####.txt" ) """), "python") self["__outputIndexCommand"] = Gaffer.PythonCommand() self["__outputIndexCommand"]["variables"].addChild( Gaffer.NameValuePlug("bakeDirectory", Gaffer.StringPlug())) self["__outputIndexCommand"]["variables"][0]["value"].setInput( self["bakeDirectory"]) self["__outputIndexCommand"]["variables"].addChild( Gaffer.NameValuePlug("indexFilePath", Gaffer.StringPlug())) self["__outputIndexCommand"]["variables"][1]["value"].setInput( self["__indexFilePath"]) self["__outputIndexCommand"]["variables"].addChild( Gaffer.NameValuePlug( "fileList", Gaffer.StringVectorDataPlug( defaultValue=IECore.StringVectorData()))) self["__outputIndexCommand"]["variables"][2]["value"].setInput( self["__CameraSetup"]["renderFileList"]) self["__outputIndexCommand"]["command"].setValue( inspect.cleandoc(""" import os import distutils.dir_util # Ensure path exists distutils.dir_util.mkpath( variables["bakeDirectory"] ) f = open( variables["indexFilePath"], "w" ) f.writelines( [ i + "\\n" for i in sorted( variables["fileList"] ) ] ) f.close() IECore.msg( IECore.MessageHandler.Level.Info, "Bake Process", "Wrote list of bake files for this chunk to " + variables["indexFilePath"] ) """)) self["__arnoldRender"] = GafferArnold.ArnoldRender() self["__arnoldRender"]["preTasks"][0].setInput( self["__outputIndexCommand"]["task"]) self["__arnoldRender"]["dispatcher"]["immediate"].setValue(True) self["__arnoldRender"]["in"].setInput(self["__CameraSetup"]["out"]) self["__bakeDirectoryContext"] = GafferDispatch.TaskContextVariables() self["__bakeDirectoryContext"]["variables"].addChild( Gaffer.NameValuePlug("bakeDirectory", Gaffer.StringPlug())) self["__bakeDirectoryContext"]["variables"][0]["value"].setInput( self["bakeDirectory"]) self["__bakeDirectoryContext"]["preTasks"][0].setInput( self["__arnoldRender"]["task"]) # Now set up the image tasks. This involves merging all layers for a UDIM, filling in the # background, writing out this image, converting it to tx, and optionally deleting all the exrs self["__imageList"] = Gaffer.CompoundObjectPlug( "__imageList", defaultValue=IECore.CompoundObject()) self["__imageList"].setFlags(Gaffer.Plug.Flags.Serialisable, False) self["__ImageReader"] = GafferImage.ImageReader() self["__CurInputFileExpression"] = Gaffer.Expression() self["__CurInputFileExpression"].setExpression( inspect.cleandoc(""" l = parent["__imageList"] outFile = context["wedge:outFile"] loopIndex = context[ "loop:index" ] parent["__ImageReader"]["fileName"] = l[outFile][ loopIndex ] """), "python") # Find the max size of any input file self["__SizeLoop"] = Gaffer.LoopComputeNode() self["__SizeLoop"].setup(Gaffer.IntPlug()) self["__SizeMaxExpression"] = Gaffer.Expression() self["__SizeMaxExpression"].setExpression( inspect.cleandoc(""" f = parent["__ImageReader"]["out"]["format"] parent["__SizeLoop"]["next"] = max( f.width(), parent["__SizeLoop"]["previous"] ) """), "python") # Loop over all input files for this output file, and merge them all together self["__ImageLoop"] = Gaffer.LoopComputeNode() self["__ImageLoop"].setup(GafferImage.ImagePlug()) self["__NumInputsForCurOutputExpression"] = Gaffer.Expression() self["__NumInputsForCurOutputExpression"].setExpression( inspect.cleandoc(""" l = parent["__imageList"] outFile = context["wedge:outFile"] numInputs = len( l[outFile] ) parent["__ImageLoop"]["iterations"] = numInputs parent["__SizeLoop"]["iterations"] = numInputs """), "python") self["__Resize"] = GafferImage.Resize() self["__Resize"]["format"]["displayWindow"]["min"].setValue( imath.V2i(0, 0)) self["__Resize"]['format']["displayWindow"]["max"]["x"].setInput( self["__SizeLoop"]["out"]) self["__Resize"]['format']["displayWindow"]["max"]["y"].setInput( self["__SizeLoop"]["out"]) self["__Resize"]['in'].setInput(self["__ImageReader"]["out"]) self["__Merge"] = GafferImage.Merge() self["__Merge"]["in"][0].setInput(self["__Resize"]["out"]) self["__Merge"]["in"][1].setInput(self["__ImageLoop"]["previous"]) self["__Merge"]["operation"].setValue(GafferImage.Merge.Operation.Add) self["__ImageLoop"]["next"].setInput(self["__Merge"]["out"]) # Write out the combined image, so we can immediately read it back in # This is just because we're doing enough image processing that we # could saturate the cache, and Gaffer wouldn't know that this is # the important result to keep self["__ImageIntermediateWriter"] = GafferImage.ImageWriter() self["__ImageIntermediateWriter"]["in"].setInput( self["__ImageLoop"]["out"]) self["__ImageIntermediateReader"] = GafferImage.ImageReader() # Now that we've merged everything together, we can use a BleedFill to fill in the background, # so that texture filtering across the edges will pull in colors that are at least reasonable. self["__BleedFill"] = GafferImage.BleedFill() self["__BleedFill"]["in"].setInput( self["__ImageIntermediateReader"]["out"]) self["__Median"] = GafferImage.Median() self["__Median"]["in"].setInput(self["__BleedFill"]["out"]) self["__Median"]["enabled"].setInput(self["applyMedianFilter"]) self["__Median"]["radius"]["x"].setInput(self["medianRadius"]) self["__Median"]["radius"]["y"].setInput(self["medianRadius"]) # Write out the result self["__ImageWriter"] = GafferImage.ImageWriter() self["__ImageWriter"]["in"].setInput(self["__Median"]["out"]) self["__ImageWriter"]["preTasks"][0].setInput( self["__ImageIntermediateWriter"]["task"]) # Convert result to texture self["__ConvertCommand"] = GafferDispatch.SystemCommand() # We shouldn't need a sub-shell and this prevents S.I.P on the Mac from # blocking the dylibs loaded by maketx. self["__ConvertCommand"]["shell"].setValue(False) self["__ConvertCommand"]["substitutions"].addChild( Gaffer.NameValuePlug("inFile", IECore.StringData(), "member1")) self["__ConvertCommand"]["substitutions"].addChild( Gaffer.NameValuePlug("outFile", IECore.StringData(), "member1")) self["__ConvertCommand"]["preTasks"][0].setInput( self["__ImageWriter"]["task"]) self["__ConvertCommand"]["command"].setValue( 'maketx --wrap clamp {inFile} -o {outFile}') self["__CommandSetupExpression"] = Gaffer.Expression() self["__CommandSetupExpression"].setExpression( inspect.cleandoc(""" outFileBase = context["wedge:outFile"] intermediateExr = outFileBase + ".intermediate.exr" parent["__ImageIntermediateWriter"]["fileName"] = intermediateExr parent["__ImageIntermediateReader"]["fileName"] = intermediateExr tmpExr = outFileBase + ".tmp.exr" parent["__ImageWriter"]["fileName"] = tmpExr parent["__ConvertCommand"]["substitutions"]["member1"]["value"] = tmpExr parent["__ConvertCommand"]["substitutions"]["member2"]["value"] = outFileBase + ".tx" """), "python") self["__ImageWedge"] = GafferDispatch.Wedge() self["__ImageWedge"]["preTasks"][0].setInput( self["__ConvertCommand"]["task"]) self["__ImageWedge"]["variable"].setValue('wedge:outFile') self["__ImageWedge"]["indexVariable"].setValue('wedge:outFileIndex') self["__ImageWedge"]["mode"].setValue(int( Gaffer.Wedge.Mode.StringList)) self["__CleanUpCommand"] = GafferDispatch.PythonCommand() self["__CleanUpCommand"]["preTasks"][0].setInput( self["__ImageWedge"]["task"]) self["__CleanUpCommand"]["variables"].addChild( Gaffer.NameValuePlug( "filesToDelete", Gaffer.StringVectorDataPlug( defaultValue=IECore.StringVectorData()), "member1")) self["__CleanUpCommand"]["command"].setValue( inspect.cleandoc(""" import os for tmpFile in variables["filesToDelete"]: os.remove( tmpFile ) """)) self["__CleanUpExpression"] = Gaffer.Expression() self["__CleanUpExpression"].setExpression( inspect.cleandoc(""" imageList = parent["__imageList"] toDelete = [] for outFileBase, inputExrs in imageList.items(): tmpExr = outFileBase + ".tmp.exr" intermediateExr = outFileBase + ".intermediate.exr" toDelete.extend( inputExrs ) toDelete.append( tmpExr ) toDelete.append( intermediateExr ) toDelete.append( parent["__indexFilePath"] ) parent["__CleanUpCommand"]["variables"]["member1"]["value"] = IECore.StringVectorData( toDelete ) """), "python") self["__CleanUpSwitch"] = GafferDispatch.TaskSwitch() self["__CleanUpSwitch"]["preTasks"][0].setInput( self["__ImageWedge"]["task"]) self["__CleanUpSwitch"]["preTasks"][1].setInput( self["__CleanUpCommand"]["task"]) self["__CleanUpSwitch"]["index"].setInput( self["cleanupIntermediateFiles"]) # Set up the list of input image files to process, and the corresponding list of # output files to wedge over self["__ImageSetupExpression"] = Gaffer.Expression() self["__ImageSetupExpression"].setExpression( inspect.cleandoc(""" f = open( parent["__indexFilePath"], "r" ) fileList = f.read().splitlines() fileDict = {} for i in fileList: rootName = i.rsplit( ".exr", 1 )[0] if rootName in fileDict: fileDict[ rootName ].append( i ) else: fileDict[ rootName ] = IECore.StringVectorData( [i] ) parent["__imageList"] = IECore.CompoundObject( fileDict ) parent["__ImageWedge"]["strings"] = IECore.StringVectorData( fileDict.keys() ) """), "python")
def __init__( self, name = "__CameraSetup" ) : GafferScene.FilteredSceneProcessor.__init__( self, name ) # Public plugs self["cameraGroup"] = Gaffer.StringPlug( "cameraGroup", Gaffer.Plug.Direction.In, "__TEXTUREBAKE_CAMERAS" ) self["bakeDirectory"] = Gaffer.StringPlug( "bakeDirectory", Gaffer.Plug.Direction.In, "" ) self["defaultFileName"] = Gaffer.StringPlug( "defaultFileName", Gaffer.Plug.Direction.In, "${bakeDirectory}/<AOV>/<AOV>.<UDIM>.exr" ) self["defaultResolution"] = Gaffer.IntPlug( "defaultResolution", Gaffer.Plug.Direction.In, 512 ) self["uvSet"] = Gaffer.StringPlug( "uvSet", Gaffer.Plug.Direction.In, "uv" ) self["udims"] = Gaffer.StringPlug( "udims", Gaffer.Plug.Direction.In, "" ) self["normalOffset"] = Gaffer.FloatPlug( "normalOffset", Gaffer.Plug.Direction.In, 0.1 ) self["aovs"] = Gaffer.StringPlug( "aovs", Gaffer.Plug.Direction.In, "beauty:rgba" ) self["tasks"] = Gaffer.IntPlug( "tasks", Gaffer.Plug.Direction.In, 1 ) self["taskIndex"] = Gaffer.IntPlug( "taskIndex", Gaffer.Plug.Direction.In, 0 ) # Output self["renderFileList"] = Gaffer.StringVectorDataPlug( "renderFileList", Gaffer.Plug.Direction.Out, defaultValue = IECore.StringVectorData() ) self["renderFileList"].setFlags( Gaffer.Plug.Flags.Serialisable, False ) # Private internal network self["__udimQuery"] = GafferScene.UDIMQuery() self["__udimQuery"]["in"].setInput( self["in"] ) self["__udimQuery"]["uvSet"].setInput( self["uvSet"] ) self["__udimQuery"]["attributes"].setValue( "bake:resolution bake:fileName" ) self["__udimQuery"]["filter"].setInput( self["filter"] ) self["__chunkedBakeInfo"] = Gaffer.CompoundObjectPlug( "__chunkedBakeInfo", Gaffer.Plug.Direction.In, IECore.CompoundObject() ) self["__chunkedBakeInfo"].setFlags( Gaffer.Plug.Flags.Serialisable, False ) self["__chunkExpression"] = Gaffer.Expression() self["__chunkExpression"].setExpression( inspect.cleandoc( """ import collections import re rawInfo = parent["__udimQuery"]["out"] defaultFileName = parent["defaultFileName"] defaultResolution = parent["defaultResolution"] selectUdimsStr = parent["udims"] # FrameList really ought to take care of this check, instead of just doing # something obviously wrong if re.match( ".*[0-9] +[0-9].*", selectUdimsStr ): raise RuntimeError( "ArnoldTextureBake : Udim list must be comma separated." ) selectUdims = set( IECore.FrameList.parse( selectUdimsStr ).asList() ) allMeshes = collections.defaultdict( lambda : [] ) for udim, meshes in rawInfo.items(): if selectUdims and not int( udim ) in selectUdims: continue for mesh, extraAttributes in meshes.items(): resolution = defaultResolution if "bake:resolution" in extraAttributes: resolution = extraAttributes["bake:resolution"].value fileName = defaultFileName if "bake:fileName" in extraAttributes: fileName = extraAttributes["bake:fileName"].value allMeshes[ (fileName, udim) ].append( { "mesh" : mesh, "resolution" : resolution } ) fileList = sorted( allMeshes.keys() ) info = IECore.CompoundObject() numTasks = min( parent["tasks"], len( fileList ) ) taskIndex = parent["taskIndex"] if taskIndex < numTasks: chunkStart = ( taskIndex * len( fileList ) ) // numTasks chunkEnd = ( ( taskIndex + 1 ) * len( fileList ) ) // numTasks dupeCount = 0 prevFileName = "" for fileNameTemplate, udim in fileList[chunkStart:chunkEnd]: for meshData in allMeshes[(fileNameTemplate, udim)]: o = IECore.CompoundObject() o["mesh"] = IECore.StringData( meshData["mesh"] ) o["udim"] = IECore.IntData( int( udim ) ) o["resolution"] = IECore.IntData( meshData["resolution"] ) udimStr = str( udim ) fileName = fileNameTemplate.replace( "<UDIM>", udimStr ) if fileName == prevFileName: dupeCount += 1 fileName = fileName + ".layer" + str( dupeCount ) else: prevFileName = fileName dupeCount = 0 o["fileName"] = IECore.StringData( fileName ) name = o["mesh"].value.replace( "/", "_" ) + "." + udimStr info[ name ] = o parent["__chunkedBakeInfo"] = info fileList = [] for name, i in info.items(): fileName = i["fileName"].value for nameAndAov in parent["aovs"].strip( " " ).split( " " ): fileList.append( i["fileName"].value.replace( "<AOV>", nameAndAov.split(":")[0] ) ) parent["renderFileList"] = IECore.StringVectorData( fileList ) """ ), "python" ) self["__parent"] = GafferScene.Parent() self["__parent"]["parent"].setValue( "/" ) for c in ['bound', 'transform', 'attributes', 'object', 'childNames', 'setNames', 'set']: self["__parent"]["in"][c].setInput( self["in"][c] ) self["__outputExpression"] = Gaffer.Expression() self["__outputExpression"].setExpression( inspect.cleandoc( """ import IECoreScene # Transfer all input globals except for outputs inGlobals = parent["in"]["globals"] outGlobals = IECore.CompoundObject() for key, value in inGlobals.items(): if not key.startswith( "output:" ): outGlobals[key] = value # Make our own outputs info = parent["__chunkedBakeInfo"] for cameraName, i in info.items(): params = IECore.CompoundData() fileName = i["fileName"].value params["camera"] = IECore.StringData( "/" + parent["cameraGroup"] + "/" + cameraName ) for nameAndAov in parent["aovs"].strip( " " ).split( " " ): tokens = nameAndAov.split( ":" ) if len( tokens ) != 2: raise RuntimeError( "Invalid bake aov specification: %s It should contain a : between name and data." ) ( aovName, aov ) = tokens aovFileName = fileName.replace( "<AOV>", aovName ) outGlobals["output:" + cameraName + "." + aov] = IECoreScene.Output( aovFileName, "exr", aov + " RGBA", params ) parent["__parent"]["in"]["globals"] = outGlobals """ ), "python" ) self["__camera"] = GafferScene.Camera() self["__camera"]["projection"].setValue( "orthographic" ) self["__cameraTweaks"] = GafferScene.CameraTweaks() self["__cameraTweaks"]["in"].setInput( self["__camera"]["out"] ) self["__cameraTweaks"]["tweaks"]["projection"] = GafferScene.TweakPlug( "projection", "uv_camera" ) self["__cameraTweaks"]["tweaks"]["resolution"] = GafferScene.TweakPlug( "resolution", imath.V2i( 0 ) ) self["__cameraTweaks"]["tweaks"]["u_offset"] = GafferScene.TweakPlug( "u_offset", 0.0 ) self["__cameraTweaks"]["tweaks"]["v_offset"] = GafferScene.TweakPlug( "v_offset", 0.0 ) self["__cameraTweaks"]["tweaks"]["mesh"] = GafferScene.TweakPlug( "mesh", "" ) self["__cameraTweaks"]["tweaks"]["uv_set"] = GafferScene.TweakPlug( "uv_set", "" ) self["__cameraTweaks"]["tweaks"]["extend_edges"] = GafferScene.TweakPlug( "extend_edges", False ) self["__cameraTweaks"]["tweaks"]["offset"] = GafferScene.TweakPlug( "offset", 0.1 ) self["__cameraTweaks"]["tweaks"]["offset"]["value"].setInput( self["normalOffset"] ) self["__cameraTweaksFilter"] = GafferScene.PathFilter() self["__cameraTweaksFilter"]["paths"].setValue( IECore.StringVectorData( [ '/camera' ] ) ) self["__cameraTweaks"]["filter"].setInput( self["__cameraTweaksFilter"]["out"] ) self["__collectScenes"] = GafferScene.CollectScenes() self["__collectScenes"]["sourceRoot"].setValue( "/camera" ) self["__collectScenes"]["rootNameVariable"].setValue( "collect:cameraName" ) self["__collectScenes"]["in"].setInput( self["__cameraTweaks"]["out"] ) self["__group"] = GafferScene.Group() self["__group"]["in"][0].setInput( self["__collectScenes"]["out"] ) self["__group"]["name"].setInput( self["cameraGroup"] ) self["__parent"]["children"][0].setInput( self["__group"]["out"] ) self["__collectSceneRootsExpression"] = Gaffer.Expression() self["__collectSceneRootsExpression"].setExpression( inspect.cleandoc( """ info = parent["__chunkedBakeInfo"] parent["__collectScenes"]["rootNames"] = IECore.StringVectorData( info.keys() ) """ ), "python" ) self["__cameraSetupExpression"] = Gaffer.Expression() self["__cameraSetupExpression"].setExpression( inspect.cleandoc( """ cameraName = context["collect:cameraName"] info = parent["__chunkedBakeInfo"] i = info[cameraName] udimOffset = i["udim"].value - 1001 parent["__cameraTweaks"]["tweaks"]["resolution"]["value"] = imath.V2i( i["resolution"].value ) parent["__cameraTweaks"]["tweaks"]["u_offset"]["value"] = -( udimOffset % 10 ) parent["__cameraTweaks"]["tweaks"]["v_offset"]["value"] = -( udimOffset // 10 ) parent["__cameraTweaks"]["tweaks"]["mesh"]["value"] = i["mesh"].value parent["__cameraTweaks"]["tweaks"]["uv_set"]["value"] = parent["uvSet"] if parent["uvSet"] != "uv" else "" """ ), "python" ) self["out"].setFlags( Gaffer.Plug.Flags.Serialisable, False ) self["out"].setInput( self["__parent"]["out"] )
def testMerging(self): allFilter = GafferScene.PathFilter() allFilter["paths"].setValue(IECore.StringVectorData(['/...'])) plane = GafferScene.Plane() plane["divisions"].setValue(imath.V2i(20, 20)) # Assign a basic gradient shader uvGradientCode = GafferOSL.OSLCode() uvGradientCode["out"].addChild( Gaffer.Color3fPlug("out", direction=Gaffer.Plug.Direction.Out)) uvGradientCode["code"].setValue('out = color( u, v, 0.5 );') shaderAssignment = GafferScene.ShaderAssignment() shaderAssignment["in"].setInput(plane["out"]) shaderAssignment["filter"].setInput(allFilter["out"]) shaderAssignment["shader"].setInput(uvGradientCode["out"]["out"]) # Set up a random id from 0 - 3 on each face randomCode = GafferOSL.OSLCode() randomCode["out"].addChild( Gaffer.IntPlug("randomId", direction=Gaffer.Plug.Direction.Out)) randomCode["code"].setValue( 'randomId = int(cellnoise( P * 100 ) * 4);') outInt = GafferOSL.OSLShader() outInt.loadShader("ObjectProcessing/OutInt") outInt["parameters"]["name"].setValue('randomId') outInt["parameters"]["value"].setInput(randomCode["out"]["randomId"]) outObject = GafferOSL.OSLShader() outObject.loadShader("ObjectProcessing/OutObject") outObject["parameters"]["in0"].setInput( outInt["out"]["primitiveVariable"]) oSLObject = GafferOSL.OSLObject() oSLObject["in"].setInput(shaderAssignment["out"]) oSLObject["filter"].setInput(allFilter["out"]) oSLObject["shader"].setInput(outObject["out"]) oSLObject["interpolation"].setValue(2) # Create 4 meshes by picking each of the 4 ids deleteContextVariables = Gaffer.DeleteContextVariables() deleteContextVariables.setup(GafferScene.ScenePlug()) deleteContextVariables["variables"].setValue('collect:rootName') deleteContextVariables["in"].setInput(oSLObject["out"]) pickCode = GafferOSL.OSLCode() pickCode["parameters"].addChild(Gaffer.IntPlug("targetId")) pickCode["out"].addChild( Gaffer.IntPlug("cull", direction=Gaffer.Plug.Direction.Out)) pickCode["code"].setValue( 'int randomId; getattribute( "randomId", randomId ); cull = randomId != targetId;' ) expression = Gaffer.Expression() pickCode.addChild(expression) expression.setExpression( 'parent.parameters.targetId = stoi( context( "collect:rootName", "0" ) );', "OSL") outInt1 = GafferOSL.OSLShader() outInt1.loadShader("ObjectProcessing/OutInt") outInt1["parameters"]["name"].setValue('deleteFaces') outInt1["parameters"]["value"].setInput(pickCode["out"]["cull"]) outObject1 = GafferOSL.OSLShader() outObject1.loadShader("ObjectProcessing/OutObject") outObject1["parameters"]["in0"].setInput( outInt1["out"]["primitiveVariable"]) oSLObject1 = GafferOSL.OSLObject() oSLObject1["in"].setInput(deleteContextVariables["out"]) oSLObject1["filter"].setInput(allFilter["out"]) oSLObject1["shader"].setInput(outObject1["out"]) oSLObject1["interpolation"].setValue(2) deleteFaces = GafferScene.DeleteFaces() deleteFaces["in"].setInput(oSLObject1["out"]) deleteFaces["filter"].setInput(allFilter["out"]) collectScenes = GafferScene.CollectScenes() collectScenes["in"].setInput(deleteFaces["out"]) collectScenes["rootNames"].setValue( IECore.StringVectorData(['0', '1', '2', '3'])) collectScenes["sourceRoot"].setValue('/plane') # First variant: bake everything, covering the whole 1001 UDIM customAttributes1 = GafferScene.CustomAttributes() customAttributes1["attributes"].addMember( 'bake:fileName', IECore.StringData( '${bakeDirectory}/complete/<AOV>/<AOV>.<UDIM>.exr')) customAttributes1["in"].setInput(collectScenes["out"]) # Second vaiant: bake just 2 of the 4 meshes, leaving lots of holes that will need filling pruneFilter = GafferScene.PathFilter() pruneFilter["paths"].setValue(IECore.StringVectorData(['/2', '/3'])) prune = GafferScene.Prune() prune["in"].setInput(collectScenes["out"]) prune["filter"].setInput(pruneFilter["out"]) customAttributes2 = GafferScene.CustomAttributes() customAttributes2["attributes"].addMember( 'bake:fileName', IECore.StringData( '${bakeDirectory}/incomplete/<AOV>/<AOV>.<UDIM>.exr')) customAttributes2["in"].setInput(prune["out"]) # Third variant: bake everything, but with one mesh at a higher resolution customAttributes3 = GafferScene.CustomAttributes() customAttributes3["attributes"].addMember( 'bake:fileName', IECore.StringData( '${bakeDirectory}/mismatch/<AOV>/<AOV>.<UDIM>.exr')) customAttributes3["in"].setInput(collectScenes["out"]) pathFilter2 = GafferScene.PathFilter() pathFilter2["paths"].setValue(IECore.StringVectorData(['/2'])) customAttributes = GafferScene.CustomAttributes() customAttributes["attributes"].addMember('bake:resolution', IECore.IntData(200)) customAttributes["filter"].setInput(pathFilter2["out"]) customAttributes["in"].setInput(customAttributes3["out"]) # Merge the 3 variants mergeGroup = GafferScene.Group() mergeGroup["in"][-1].setInput(customAttributes["out"]) mergeGroup["in"][-1].setInput(customAttributes1["out"]) mergeGroup["in"][-1].setInput(customAttributes2["out"]) arnoldTextureBake = GafferArnold.ArnoldTextureBake() arnoldTextureBake["in"].setInput(mergeGroup["out"]) arnoldTextureBake["filter"].setInput(allFilter["out"]) arnoldTextureBake["bakeDirectory"].setValue(self.temporaryDirectory() + '/bakeMerge/') arnoldTextureBake["defaultResolution"].setValue(128) # We want to check the intermediate results arnoldTextureBake["cleanupIntermediateFiles"].setValue(False) # Dispatch the bake script = Gaffer.ScriptNode() script.addChild(arnoldTextureBake) dispatcher = GafferDispatch.LocalDispatcher() dispatcher["jobsDirectory"].setValue(self.temporaryDirectory()) dispatcher.dispatch([arnoldTextureBake]) # Check results imageReader = GafferImage.ImageReader() outLayer = GafferOSL.OSLShader() outLayer.loadShader("ImageProcessing/OutLayer") outLayer["parameters"]["layerColor"].setInput( uvGradientCode["out"]["out"]) outImage = GafferOSL.OSLShader() outImage.loadShader("ImageProcessing/OutImage") outImage["parameters"]["in0"].setInput(outLayer["out"]["layer"]) oSLImage = GafferOSL.OSLImage() oSLImage["in"].setInput(imageReader["out"]) oSLImage["shader"].setInput(outImage["out"]) merge3 = GafferImage.Merge() merge3["in"]["in0"].setInput(oSLImage["out"]) merge3["in"]["in1"].setInput(imageReader["out"]) merge3["operation"].setValue(10) edgeDetect = self.SimpleEdgeDetect() edgeDetect["in"].setInput(imageReader["out"]) edgeStats = GafferImage.ImageStats() edgeStats["in"].setInput(edgeDetect["out"]) refDiffStats = GafferImage.ImageStats() refDiffStats["in"].setInput(merge3["out"]) oneLayerReader = GafferImage.ImageReader() grade = GafferImage.Grade() grade["in"].setInput(oneLayerReader["out"]) grade["channels"].setValue('[A]') grade["blackPoint"].setValue(imath.Color4f(0, 0, 0, 0.999899983)) copyChannels = GafferImage.CopyChannels() copyChannels["in"]["in0"].setInput(merge3["out"]) copyChannels["in"]["in1"].setInput(grade["out"]) copyChannels["channels"].setValue('[A]') premultiply = GafferImage.Premultiply() premultiply["in"].setInput(copyChannels["out"]) refDiffCoveredStats = GafferImage.ImageStats() refDiffCoveredStats["in"].setInput(premultiply["out"]) # We are testing 3 different cases: # complete : Should be an exact match. # incomplete : Expect some mild variance of slopes and some error, because we have to # reconstruct a lot of missing data. # mismatch : We should get a larger image, sized to the highest override on any mesh. # Match won't be as perfect, because we're combining source images at # different resolutions for name, expectedSize, maxEdge, maxRefDiff, maxMaskedDiff in [ ("complete", 128, 0.01, 0.000001, 0.000001), ("incomplete", 128, 0.05, 0.15, 0.000001), ("mismatch", 200, 0.01, 0.01, 0.01) ]: imageReader["fileName"].setValue(self.temporaryDirectory() + "/bakeMerge/" + name + "/beauty/beauty.1001.tx") oneLayerReader["fileName"].setValue(self.temporaryDirectory() + "/bakeMerge/" + name + "/beauty/beauty.1001.exr") self.assertEqual(imageReader["out"]["format"].getValue().width(), expectedSize) self.assertEqual(imageReader["out"]["format"].getValue().height(), expectedSize) edgeStats["area"].setValue( imath.Box2i(imath.V2i(1), imath.V2i(expectedSize - 1))) refDiffStats["area"].setValue( imath.Box2i(imath.V2i(1), imath.V2i(expectedSize - 1))) refDiffCoveredStats["area"].setValue( imath.Box2i(imath.V2i(0), imath.V2i(expectedSize))) # Blue channel is constant, so everything should line up perfectly self.assertEqual(0, edgeStats["max"].getValue()[2]) self.assertEqual(0, refDiffStats["max"].getValue()[2]) self.assertEqual(0, refDiffCoveredStats["max"].getValue()[2]) for i in range(2): # Make sure we've got actual data, by checking that we have some error ( we're not expecting # to perfectly reconstruct the gradient when the input is incomplete ) self.assertGreater(edgeStats["max"].getValue()[i], 0.005) if name == "incomplete": self.assertGreater(edgeStats["max"].getValue()[i], 0.03) self.assertGreater(refDiffStats["max"].getValue()[i], 0.06) self.assertLess(edgeStats["max"].getValue()[i], maxEdge) self.assertLess(refDiffStats["max"].getValue()[i], maxRefDiff) self.assertLess(refDiffCoveredStats["max"].getValue()[i], maxMaskedDiff)
def testDefaultExpressionForSupportedPlugs(self): s = Gaffer.ScriptNode() s["n"] = Gaffer.Node() s["n"]["user"].addChild( Gaffer.IntPlug(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)) s["n"]["user"].addChild( Gaffer.FloatPlug(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)) s["n"]["user"].addChild( Gaffer.StringPlug(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)) s["n"]["user"].addChild( Gaffer.BoolPlug(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)) s["n"]["user"].addChild( Gaffer.V2fPlug(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)) s["n"]["user"].addChild( Gaffer.V2iPlug(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)) s["n"]["user"].addChild( Gaffer.V3fPlug(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)) s["n"]["user"].addChild( Gaffer.V3iPlug(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)) s["n"]["user"].addChild( Gaffer.Color3fPlug(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)) s["n"]["user"].addChild( Gaffer.Color4fPlug(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)) s["n"]["user"].addChild( Gaffer.Box2fPlug(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)) s["n"]["user"].addChild( Gaffer.Box2iPlug(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)) s["n"]["user"].addChild( Gaffer.Box3fPlug(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)) s["n"]["user"].addChild( Gaffer.Box3iPlug(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)) s["n"]["user"].addChild( Gaffer.IntVectorDataPlug(defaultValue=IECore.IntVectorData([0, 1]), flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)) s["n"]["user"].addChild( Gaffer.FloatVectorDataPlug( defaultValue=IECore.FloatVectorData([0, 1]), flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)) s["n"]["user"].addChild( Gaffer.StringVectorDataPlug( defaultValue=IECore.StringVectorData(["a", "b"]), flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)) s["n"]["user"].addChild( Gaffer.V3fVectorDataPlug( defaultValue=IECore.V3fVectorData([IECore.V3f(1)]), flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)) s["e"] = Gaffer.Expression() for plug in s["n"]["user"]: value = plug.getValue() s["e"].setExpression(s["e"].defaultExpression(plug, "python")) self.assertTrue(plug.getInput().node().isSame(s["e"])) self.assertEqual(plug.getValue(), value)
def testUndoMerging(self): s = Gaffer.ScriptNode() s["n"] = Gaffer.Node() s["n"]["p"] = Gaffer.IntPlug() self.assertEqual(s["n"]["p"].getValue(), 0) self.assertFalse(s.undoAvailable()) cs = GafferTest.CapturingSlot(s["n"].plugSetSignal()) with Gaffer.UndoScope(s, mergeGroup="test"): s["n"]["p"].setValue(1) self.assertEqual(len(cs), 1) self.assertEqual(s["n"]["p"].getValue(), 1) self.assertTrue(s.undoAvailable()) with Gaffer.UndoScope(s, mergeGroup="test"): s["n"]["p"].setValue(2) self.assertEqual(len(cs), 2) self.assertEqual(s["n"]["p"].getValue(), 2) self.assertTrue(s.undoAvailable()) with Gaffer.UndoScope(s, mergeGroup="test2"): s["n"]["p"].setValue(3) self.assertEqual(len(cs), 3) self.assertEqual(s["n"]["p"].getValue(), 3) self.assertTrue(s.undoAvailable()) s.undo() self.assertEqual(len(cs), 4) self.assertEqual(s["n"]["p"].getValue(), 2) self.assertTrue(s.undoAvailable()) s.undo() self.assertEqual(len(cs), 5) self.assertEqual(s["n"]["p"].getValue(), 0) self.assertFalse(s.undoAvailable()) s.redo() self.assertEqual(len(cs), 6) self.assertEqual(s["n"]["p"].getValue(), 2) self.assertTrue(s.undoAvailable()) s.undo() self.assertEqual(len(cs), 7) self.assertEqual(s["n"]["p"].getValue(), 0) self.assertFalse(s.undoAvailable()) s.redo() s.redo() self.assertEqual(len(cs), 9) self.assertEqual(s["n"]["p"].getValue(), 3) self.assertTrue(s.undoAvailable()) s.undo() s.undo() self.assertEqual(len(cs), 11) self.assertEqual(s["n"]["p"].getValue(), 0) self.assertFalse(s.undoAvailable())
def testResetDefault(self): script = Gaffer.ScriptNode() script["node"] = Gaffer.Node() script["node"]["user"]["i"] = Gaffer.IntPlug( defaultValue=1, flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic) script["node"]["user"]["v"] = Gaffer.V3iPlug( defaultValue=imath.V3i(1, 2, 3), flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic) def assertPreconditions(script): self.assertTrue(script["node"]["user"]["i"].isSetToDefault()) self.assertEqual(script["node"]["user"]["i"].defaultValue(), 1) self.assertEqual(script["node"]["user"]["i"].getValue(), 1) self.assertTrue(script["node"]["user"]["v"].isSetToDefault()) self.assertEqual(script["node"]["user"]["v"].defaultValue(), imath.V3i(1, 2, 3)) self.assertEqual(script["node"]["user"]["v"].getValue(), imath.V3i(1, 2, 3)) assertPreconditions(script) with Gaffer.UndoScope(script): script["node"]["user"]["i"].setValue(2) script["node"]["user"]["i"].resetDefault() script["node"]["user"]["v"].setValue(imath.V3i(10, 11, 12)) script["node"]["user"]["v"].resetDefault() def assertPostconditions(script): self.assertTrue(script["node"]["user"]["i"].isSetToDefault()) self.assertEqual(script["node"]["user"]["i"].defaultValue(), 2) self.assertEqual(script["node"]["user"]["i"].getValue(), 2) self.assertTrue(script["node"]["user"]["v"].isSetToDefault()) self.assertEqual(script["node"]["user"]["v"].defaultValue(), imath.V3i(10, 11, 12)) self.assertEqual(script["node"]["user"]["v"].getValue(), imath.V3i(10, 11, 12)) script.undo() assertPreconditions(script) script.redo() assertPostconditions(script) script.undo() assertPreconditions(script) script.redo() assertPostconditions(script) script2 = Gaffer.ScriptNode() script2.execute(script.serialise()) assertPostconditions(script2)
def testPromotion(self): def assertCellEqual(cellPlug1, cellPlug2): self.assertEqual(cellPlug1.getName(), cellPlug2.getName()) self.assertIsInstance(cellPlug1, Gaffer.Spreadsheet.CellPlug) self.assertIsInstance(cellPlug2, Gaffer.Spreadsheet.CellPlug) self.assertEqual(cellPlug1["enabled"].getValue(), cellPlug2["enabled"].getValue()) self.assertEqual(cellPlug1["value"].getValue(), cellPlug2["value"].getValue()) def assertRowEqual(rowPlug1, rowPlug2): self.assertEqual(rowPlug1.getName(), rowPlug2.getName()) self.assertIsInstance(rowPlug1, Gaffer.Spreadsheet.RowPlug) self.assertIsInstance(rowPlug2, Gaffer.Spreadsheet.RowPlug) self.assertEqual(rowPlug1["name"].getValue(), rowPlug2["name"].getValue()) self.assertEqual(rowPlug1["enabled"].getValue(), rowPlug2["enabled"].getValue()) self.assertEqual(rowPlug1["cells"].keys(), rowPlug2["cells"].keys()) for k in rowPlug1["cells"].keys(): assertCellEqual(rowPlug1["cells"][k], rowPlug2["cells"][k]) def assertRowsEqual(rowsPlug1, rowsPlug2): self.assertIsInstance(rowsPlug1, Gaffer.Spreadsheet.RowsPlug) self.assertIsInstance(rowsPlug2, Gaffer.Spreadsheet.RowsPlug) self.assertEqual(rowsPlug1.keys(), rowsPlug2.keys()) for k in rowsPlug1.keys(): assertRowEqual(rowsPlug1[k], rowsPlug2[k]) def assertOutputsValid(spreadsheet): self.assertEqual(spreadsheet["rows"].defaultRow()["cells"].keys(), spreadsheet["out"].keys()) for o in spreadsheet["out"]: self.assertEqual( spreadsheet.correspondingInput(o), spreadsheet["rows"].defaultRow()["cells"][ o.getName()]["value"]) s = Gaffer.ScriptNode() s["b"] = Gaffer.Box() # Make a Spreadsheet with some existing cells # and promote the "rows" plug. s["b"]["s1"] = Gaffer.Spreadsheet() s["b"]["s1"]["rows"].addColumn(Gaffer.IntPlug("i")) s["b"]["s1"]["rows"].addRow()["cells"][0]["value"].setValue(10) s["b"]["s1"]["rows"].addRow()["cells"][0]["value"].setValue(20) p1 = Gaffer.PlugAlgo.promote(s["b"]["s1"]["rows"]) assertRowsEqual(p1, s["b"]["s1"]["rows"]) assertOutputsValid(s["b"]["s1"]) self.assertTrue(Gaffer.PlugAlgo.isPromoted(s["b"]["s1"]["rows"])) # Promote the "rows" plug on an empty spreadsheet, # and add some cells. s["b"]["s2"] = Gaffer.Spreadsheet() p2 = Gaffer.PlugAlgo.promote(s["b"]["s2"]["rows"]) assertRowsEqual(p2, s["b"]["s2"]["rows"]) assertOutputsValid(s["b"]["s2"]) self.assertTrue(Gaffer.PlugAlgo.isPromoted(s["b"]["s2"]["rows"])) p2.addColumn(Gaffer.IntPlug("i")) p2.addRow()["cells"][0]["value"].setValue(10) p2.addRow()["cells"][0]["value"].setValue(20) assertRowsEqual(p2, s["b"]["s2"]["rows"]) assertOutputsValid(s["b"]["s2"]) self.assertTrue(Gaffer.PlugAlgo.isPromoted(s["b"]["s2"]["rows"])) p2.addColumn(Gaffer.IntPlug("j")) assertRowsEqual(p2, s["b"]["s2"]["rows"]) assertOutputsValid(s["b"]["s2"]) self.assertTrue(Gaffer.PlugAlgo.isPromoted(s["b"]["s2"]["rows"])) # Remove a column p2.removeColumn(0) assertRowsEqual(p2, s["b"]["s2"]["rows"]) assertOutputsValid(s["b"]["s2"]) self.assertTrue(Gaffer.PlugAlgo.isPromoted(s["b"]["s2"]["rows"])) # Serialise and reload, and check all is well s2 = Gaffer.ScriptNode() s2.execute(s.serialise()) assertRowsEqual(s2["b"]["s1"]["rows"], s["b"]["s1"]["rows"]) assertRowsEqual(s2["b"]["s2"]["rows"], s["b"]["s2"]["rows"]) assertOutputsValid(s["b"]["s1"]) assertOutputsValid(s["b"]["s2"])
def intSwitch(self): result = Gaffer.Switch() result.setup(Gaffer.IntPlug()) return result
def testDefaultValues(self): s = Gaffer.ScriptNode() s["b"] = Gaffer.Box() s["b"]["p"] = Gaffer.IntPlug(defaultValue=1, flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic) s["b"]["p"].setValue(2) s["b"].exportForReference(self.temporaryDirectory() + "/test.grf") s["r"] = Gaffer.Reference() s["r"].load(self.temporaryDirectory() + "/test.grf") # The value at the time of box export should become # the default value on the reference node. But the # default value on the box itself should remain the # same. self.assertEqual(s["r"]["p"].getValue(), 2) self.assertEqual(s["r"]["p"].defaultValue(), 2) self.assertEqual(s["b"]["p"].defaultValue(), 1) # And we should be able to save and reload the script # and have that still be the case. s["fileName"].setValue(self.temporaryDirectory() + "/test.gfr") s.save() s.load() self.assertEqual(s["r"]["p"].getValue(), 2) self.assertEqual(s["r"]["p"].defaultValue(), 2) self.assertEqual(s["b"]["p"].getValue(), 2) self.assertEqual(s["b"]["p"].defaultValue(), 1) # If we change the value on the box and reexport, # then the reference should pick up both the new value # and the new default. s["b"]["p"].setValue(3) s["b"].exportForReference(self.temporaryDirectory() + "/test.grf") s["r"].load(self.temporaryDirectory() + "/test.grf") self.assertEqual(s["r"]["p"].getValue(), 3) self.assertEqual(s["r"]["p"].defaultValue(), 3) self.assertEqual(s["b"]["p"].getValue(), 3) self.assertEqual(s["b"]["p"].defaultValue(), 1) # And that should still hold after saving and reloading the script. s.save() s.load() self.assertEqual(s["r"]["p"].getValue(), 3) self.assertEqual(s["r"]["p"].defaultValue(), 3) self.assertEqual(s["b"]["p"].getValue(), 3) self.assertEqual(s["b"]["p"].defaultValue(), 1) # But if the user changes the value on the reference node, # it should be kept. s["r"]["p"].setValue(100) self.assertEqual(s["r"]["p"].getValue(), 100) self.assertEqual(s["r"]["p"].defaultValue(), 3) self.assertEqual(s["b"]["p"].getValue(), 3) self.assertEqual(s["b"]["p"].defaultValue(), 1) # And a save and load shouldn't change that. s.save() s.load() self.assertEqual(s["r"]["p"].getValue(), 100) self.assertEqual(s["r"]["p"].defaultValue(), 3) self.assertEqual(s["b"]["p"].getValue(), 3) self.assertEqual(s["b"]["p"].defaultValue(), 1) # And now the user has changed a value, only the # default value should be updated if we load a new # reference. s["b"]["p"].setValue(4) s["b"].exportForReference(self.temporaryDirectory() + "/test.grf") s["r"].load(self.temporaryDirectory() + "/test.grf") self.assertEqual(s["r"]["p"].getValue(), 100) self.assertEqual(s["r"]["p"].defaultValue(), 4) self.assertEqual(s["b"]["p"].getValue(), 4) self.assertEqual(s["b"]["p"].defaultValue(), 1) # And a save and load shouldn't change anything. s.save() s.load() self.assertEqual(s["r"]["p"].getValue(), 100) self.assertEqual(s["r"]["p"].defaultValue(), 4) self.assertEqual(s["b"]["p"].getValue(), 4) self.assertEqual(s["b"]["p"].defaultValue(), 1) # And since we know that all plugs in box exports # have had their default values set to the current # value, there shouldn't be any need for a single # setValue() call in the exported file. e = "".join(file(self.temporaryDirectory() + "/test.grf").readlines()) self.assertTrue("setValue" not in e)
def testAcceptsNoneInput(self): p = Gaffer.IntPlug("hello") self.failUnless(p.acceptsInput(None))
def _doSetupPlugs( self, parentPlug ) : parentPlug["testDispatcherPlug"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.In, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
def testReadOnlySetValueRaises(self): p = Gaffer.IntPlug(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.ReadOnly) self.assertRaises(RuntimeError, p.setValue, 10)