def testContains( self ) : self.assertFalse( GafferImage.contains( IECore.Box2i(), IECore.V2i( 0 ) ) ) self.assertFalse( GafferImage.contains( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 0 ) ), IECore.V2i( 0 ) ) ) self.assertFalse( GafferImage.contains( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 1 ) ), IECore.V2i( 1 ) ) ) self.assertTrue( GafferImage.contains( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 1 ) ), IECore.V2i( 0 ) ) )
def testEmpty( self ) : self.assertTrue( GafferImage.empty( IECore.Box2i() ) ) self.assertTrue( GafferImage.empty( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 0 ) ) ) ) self.assertFalse( GafferImage.empty( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 1 ) ) ) ) self.assertTrue( GafferImage.empty( IECore.Box2i( IECore.V2i( 2147483646 ), IECore.V2i( -2147483646 ) ) ) ) self.assertTrue( GafferImage.empty( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( -2147483647 ) ) ) ) self.assertTrue( GafferImage.empty( IECore.Box2i( IECore.V2i( 2147483647 ), IECore.V2i( 0 ) ) ) ) self.assertTrue( GafferImage.empty( IECore.Box2i( IECore.V2i( -1 ), IECore.V2i( -2147483647 ) ) ) ) self.assertTrue( GafferImage.empty( IECore.Box2i( IECore.V2i( 2147483647 ), IECore.V2i( 1 ) ) ) ) self.assertTrue( GafferImage.empty( IECore.Box2i( IECore.V2i( 1 ), IECore.V2i( -2147483647 ) ) ) ) self.assertTrue( GafferImage.empty( IECore.Box2i( IECore.V2i( 2147483647 ), IECore.V2i( -1 ) ) ) )
def testChannelExistsBindings( self ) : # Test that both forms of binding to channelExists return the same # value c = GafferImage.Constant() d = GafferImage.DeleteChannels() d["in"].setInput( c["out"] ) d["mode"].setValue( GafferImage.DeleteChannels.Mode.Delete ) d["channels"].setValue( IECore.StringVectorData( [ "R", "A" ] ) ) for chan in [ "R", "G", "B", "A" ] : self.assertEqual( GafferImage.channelExists( d["out"], chan ), GafferImage.channelExists( d["out"]["channelNames"].getValue(), chan ) )
def testDefaultFormatFromScript( self ) : s = Gaffer.ScriptNode() self.assertFalse( "defaultFormat" in s ) s["c"] = GafferImage.Constant() self.assertTrue( "defaultFormat" in s ) defaultFormatPlug = GafferImage.FormatPlug.acquireDefaultFormatPlug( s ) self.assertTrue( defaultFormatPlug.isSame( s["defaultFormat"] ) ) with s.context() : self.assertFalse( GafferImage.empty( s["c"]["out"]["format"].getValue().getDisplayWindow() ) ) f = GafferImage.Format( 100, 200, 2 ) defaultFormatPlug.setValue( f ) self.assertEqual( s["c"]["out"]["format"].getValue(), f ) f = GafferImage.Format( 200, 400, 1 ) defaultFormatPlug.setValue( f ) self.assertEqual( s["c"]["out"]["format"].getValue(), f )
def testChannelExists( self ) : c = GafferImage.Constant() d = GafferImage.DeleteChannels() d["in"].setInput( c["out"] ) d["mode"].setValue( GafferImage.DeleteChannels.Mode.Delete ) d["channels"].setValue( IECore.StringVectorData( [] ) ) self.assertTrue( GafferImage.channelExists( d["out"], "R" ) ) self.assertTrue( GafferImage.channelExists( d["out"], "G" ) ) self.assertTrue( GafferImage.channelExists( d["out"], "B" ) ) self.assertTrue( GafferImage.channelExists( d["out"], "A" ) ) for chan in [ "R", "G", "B", "A" ] : d["channels"].setValue( IECore.StringVectorData( [ chan ] ) ) self.assertFalse( GafferImage.channelExists( d["out"], chan ) )
def testClamp( self ) : self.assertEqual( GafferImage.clamp( IECore.V2i( 5, 6 ), IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 10 ) ) ), IECore.V2i( 5, 6 ) ) self.assertEqual( GafferImage.clamp( IECore.V2i( 10, 6 ), IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 10 ) ) ), IECore.V2i( 9, 6 ) ) self.assertEqual( GafferImage.clamp( IECore.V2i( 0, 6 ), IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 10 ) ) ), IECore.V2i( 0, 6 ) ) self.assertEqual( GafferImage.clamp( IECore.V2i( 5, -1 ), IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 10 ) ) ), IECore.V2i( 5, 0 ) ) self.assertEqual( GafferImage.clamp( IECore.V2i( 5, 10 ), IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 10 ) ) ), IECore.V2i( 5, 9 ) )
def testLayerName( self ) : for channelName, layerName in [ ( "R", "" ), ( "A", "" ), ( "Z", "" ), ( "myFunkyChannel", "" ), ( "left.R", "left" ), ( "right.myFunkyChannel", "right" ), ( "diffuse.left.R", "diffuse.left" ), ] : self.assertEqual( GafferImage.layerName( channelName ), layerName )
def testBaseName( self ) : for channelName, baseName in [ ( "R", "R" ), ( "A", "A" ), ( "Z", "Z" ), ( "myFunkyChannel", "myFunkyChannel" ), ( "left.R", "R" ), ( "right.myFunkyChannel", "myFunkyChannel" ), ( "diffuse.left.R", "R" ), ] : self.assertEqual( GafferImage.baseName( channelName ), baseName )
def testIntersects( self ) : self.assertFalse( GafferImage.intersects( IECore.Box2i(), IECore.Box2i() ) ) self.assertFalse( GafferImage.intersects( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 10 ) ), IECore.Box2i( IECore.V2i( 10 ), IECore.V2i( 20 ) ), ) ) self.assertTrue( GafferImage.intersects( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 10 ) ), IECore.Box2i( IECore.V2i( 9 ), IECore.V2i( 20 ) ), ) )
def testIntersection( self ) : self.assertEqual( GafferImage.intersection( IECore.Box2i( IECore.V2i( 1, 2 ), IECore.V2i( 9, 10 ) ), IECore.Box2i( IECore.V2i( 2, 0 ), IECore.V2i( 8, 29 ) ), ), IECore.Box2i( IECore.V2i( 2, 2 ), IECore.V2i( 8, 10 ) ) )
def testCoordinateSystemTransforms( self ) : f = GafferImage.Format( IECore.Box2i( IECore.V2i( -100, -200 ), IECore.V2i( 501, 301 ) ), 1 ) self.assertEqual( f.fromEXRSpace( IECore.V2i( -100, -200 ) ), IECore.V2i( -100, 300 ) ) self.assertEqual( f.fromEXRSpace( IECore.V2i( -100, 300 ) ), IECore.V2i( -100, -200 ) ) self.assertEqual( f.toEXRSpace( IECore.V2i( -100, -200 ) ), IECore.V2i( -100, 300 ) ) self.assertEqual( f.toEXRSpace( IECore.V2i( -100, 300 ) ), IECore.V2i( -100, -200 ) ) self.assertEqual( f.toEXRSpace( IECore.Box2i( IECore.V2i( -100, -200 ), IECore.V2i( 501, 301 ) ) ), IECore.Box2i( IECore.V2i( -100, -200 ), IECore.V2i( 500, 300 ) ) ) self.assertEqual( f.toEXRSpace( IECore.Box2i( IECore.V2i( -100, -100 ), IECore.V2i( 501, 301 ) ) ), IECore.Box2i( IECore.V2i( -100, -200 ), IECore.V2i( 500, 200 ) ) ) self.assertEqual( f.toEXRSpace( IECore.Box2i( IECore.V2i( -100, -200 ), IECore.V2i( 501, -100 ) ) ), IECore.Box2i( IECore.V2i( -100, 201 ), IECore.V2i( 500, 300 ) ) ) for i in range( 0, 1000 ) : p = IECore.V2i( int( random.uniform( -500, 500 ) ), int( random.uniform( -500, 500 ) ) ) pDown = f.toEXRSpace( p ) self.assertEqual( f.fromEXRSpace( pDown ), p ) b = IECore.Box2i() b.extendBy( IECore.V2i( int( random.uniform( -500, 500 ) ), int( random.uniform( -500, 500 ) ) ) ) b.extendBy( IECore.V2i( int( random.uniform( -500, 500 ) ), int( random.uniform( -500, 500 ) ) ) ) bDown = f.toEXRSpace( b ) if not GafferImage.empty( b ) : self.assertEqual( f.fromEXRSpace( bDown ), b ) else : self.assertEqual( f.fromEXRSpace( bDown ), IECore.Box2i() )
def __menuDefinition( self ) : image = next( iter( self.getPlug().node().children( GafferImage.ImagePlug ) ) ) channelNames = [] with self.getContext() : with IECore.IgnoredExceptions( Exception ) : channelNames = image["channelNames"].getValue() result = IECore.MenuDefinition() if not channelNames : result.append( "/No channels available", { "active" : False } ) return result added = set() nonStandardLayers = set( [ GafferImage.ImageAlgo.layerName( x ) for x in channelNames if GafferImage.ImageAlgo.baseName( x ) not in [ "R", "G", "B", "A" ] ] ) for channelName in sorted( channelNames, key = GafferImage.ImageAlgo.layerName ) : if GafferImage.ImageAlgo.baseName( channelName ) in [ "R", "G", "B", "A" ] : layerName = GafferImage.layerName( channelName ) prefix = layerName + "." if layerName else "" text = prefix + "RGBA" value = [ prefix + x for x in [ "R", "G", "B", "A" ] ] else : text = channelName value = [ channelName ] * 4 if text in added : continue added.add( text ) if not GafferImage.ImageAlgo.layerName( text ) in nonStandardLayers: # If there are only the standard channels, we don't need a submenu text = text.replace( ".RGBA", "" ) result.append( "/" + text.replace( ".", "/" ), { "command" : functools.partial( Gaffer.WeakMethod( self.__setValue ), value = value ), "checkBox" : text == self.__menuButton.getText(), } ) return result
def testDefaultFormatFromContext(self): constant = GafferImage.Constant() with Gaffer.Context() as context: # Even if we haven't specified a default context, we should still # be given something. self.assertFalse(GafferImage.empty(constant["out"]["format"].getValue().getDisplayWindow())) # And if we specify something specific, we should get it. f = GafferImage.Format(100, 200, 2) GafferImage.FormatPlug.setDefaultFormat(context, f) self.assertEqual(GafferImage.FormatPlug.getDefaultFormat(context), f) self.assertEqual(constant["out"]["format"].getValue(), f) f = GafferImage.Format(200, 400, 1) GafferImage.FormatPlug.setDefaultFormat(context, f) self.assertEqual(GafferImage.FormatPlug.getDefaultFormat(context), f) self.assertEqual(constant["out"]["format"].getValue(), f)
def testColorIndex( self ) : for channelName, index in [ ( "R", 0 ), ( "G", 1 ), ( "B", 2 ), ( "A", 3 ), ( "Z", -1 ), ( "myFunkyChannel", -1 ), ( "left.R", 0 ), ( "left.G", 1 ), ( "left.B", 2 ), ( "left.A", 3 ), ( "left.Z", -1 ), ( "right.myFunkyChannel", -1 ), ( "diffuse.left.R", 0 ), ( "diffuse.left.G", 1 ), ( "diffuse.left.B", 2 ), ( "diffuse.left.A", 3 ), ( "diffuse.left.Z", -1 ), ] : self.assertEqual( GafferImage.colorIndex( channelName ), index )
def testLargeDataWindowAddedToSmall(self): b = GafferImage.Constant() b["format"].setValue(GafferImage.Format(500, 500, 1.0)) b["color"].setValue(imath.Color4f(1, 0, 0, 1)) a = GafferImage.Constant() a["format"].setValue(GafferImage.Format(500, 500, 1.0)) a["color"].setValue(imath.Color4f(0, 1, 0, 1)) mask = GafferImage.Constant() mask["format"].setValue(GafferImage.Format(500, 500, 1.0)) mask["color"].setValue(imath.Color4f(0.5)) bCrop = GafferImage.Crop() bCrop["in"].setInput(b["out"]) bCrop["areaSource"].setValue(bCrop.AreaSource.Area) bCrop["area"].setValue(imath.Box2i(imath.V2i(50), imath.V2i(162))) bCrop["affectDisplayWindow"].setValue(False) m = GafferImage.Mix() m["in"][0].setInput(bCrop["out"]) m["in"][1].setInput(a["out"]) m["mask"].setInput(mask["out"]) redSampler = GafferImage.Sampler( m["out"], "R", m["out"]["format"].getValue().getDisplayWindow()) greenSampler = GafferImage.Sampler( m["out"], "G", m["out"]["format"].getValue().getDisplayWindow()) blueSampler = GafferImage.Sampler( m["out"], "B", m["out"]["format"].getValue().getDisplayWindow()) def sample(x, y): return imath.Color3f( redSampler.sample(x, y), greenSampler.sample(x, y), blueSampler.sample(x, y), ) # We should only have yellow in areas where the background exists, # and should have just green everywhere else. self.assertEqual(sample(49, 49), imath.Color3f(0, 0.5, 0)) self.assertEqual(sample(50, 50), imath.Color3f(0.5, 0.5, 0)) self.assertEqual(sample(161, 161), imath.Color3f(0.5, 0.5, 0)) self.assertEqual(sample(162, 162), imath.Color3f(0, 0.5, 0))
def testEmpty( self ) : self.assertTrue( GafferImage.empty( IECore.Box2i() ) ) self.assertTrue( GafferImage.empty( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 0 ) ) ) ) self.assertFalse( GafferImage.empty( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 1 ) ) ) )
def testNonFlatThrows(self): blur = GafferImage.Blur() blur["radius"].setValue(imath.V2f(1)) self.assertRaisesDeepNotSupported(blur)
def testImageViewStatus(self): script = Gaffer.ScriptNode() script["image"] = GafferImage.ImageReader() view = GafferUI.View.create(script["image"]["out"]) tool = GafferSceneUI.CropWindowTool(view) tool["active"].setValue(True) # Presently, crop window tool updates are coupled to `preRender`, so we # need to actually show the View before we can verify our behaviour. with GafferUI.Window() as window: GafferUI.GadgetWidget(view.viewportGadget()) window.setVisible(True) # Check process exceptions script["image"]["fileName"].setValue("/i/do/not/exist.exr") self.waitForIdle(1000) self.assertEqual( tool.status(), "Error: image.__oiioReader.out.format : OpenImageIOReader : Could not create ImageInput : Could not open file \"/i/do/not/exist.exr\"" ) # Missing metadata script["image"]["fileName"].setValue( "${GAFFER_ROOT}/resources/images/macaw.exr") with IECore.CapturingMessageHandler() as mh: self.waitForIdle(1000) # Don't fail due to running on computer that can't do GPU color transforms well if len(mh.messages): self.assertEqual(len(mh.messages), 1) self.assertEqual(mh.messages[0].context, "ImageGadget") self.assertTrue(mh.messages[0].message.startswith( "Could not find supported floating point texture format in OpenGL" )) self.assertEqual( tool.status(), "Error: No <b>gaffer:sourceScene</b> metadata in image") script["meta"] = GafferImage.ImageMetadata() script["meta"]["metadata"].addChild( Gaffer.NameValuePlug("gaffer:sourceScene", "options.out", True, "member1")) script["meta"]["in"].setInput(script["image"]["out"]) script["options"] = GafferScene.StandardOptions() view["in"].setInput(script["meta"]["out"]) # Valid options path self.waitForIdle(1000) self.assertEqual( tool.status(), "Info: Editing <b>options.options.renderCropWindow.value</b>")
def testFormatAffectsOutput( self ) : crop = GafferImage.Crop() cs = GafferTest.CapturingSlot( crop.plugDirtiedSignal() ) crop["format"].setValue( GafferImage.Format( 100, 200 ) ) self.assertIn( crop["out"]["dataWindow"], { x[0] for x in cs } )
def sample(image, channelName, pos): sampler = GafferImage.Sampler(image, channelName, image["dataWindow"].getValue()) return sampler.sample(pos.x, pos.y)
def testDefaultName(self): l = GafferImage.ImageLoop() self.assertEqual(l.getName(), "ImageLoop")
def testEditSubdivisionAttributes(self): script = Gaffer.ScriptNode() script["cube"] = GafferScene.Cube() script["cube"]["dimensions"].setValue(imath.V3f(2)) script["meshType"] = GafferScene.MeshType() script["meshType"]["in"].setInput(script["cube"]["out"]) script["meshType"]["meshType"].setValue("catmullClark") script["attributes"] = GafferArnold.ArnoldAttributes() script["attributes"]["in"].setInput(script["meshType"]["out"]) script["attributes"]["attributes"]["subdivIterations"][ "enabled"].setValue(True) script["outputs"] = GafferScene.Outputs() script["outputs"].addOutput( "beauty", IECoreScene.Output("test", "ieDisplay", "rgba", { "driverType": "ImageDisplayDriver", "handle": "subdivisionTest", })) script["outputs"]["in"].setInput(script["attributes"]["out"]) script["objectToImage"] = GafferImage.ObjectToImage() script["imageStats"] = GafferImage.ImageStats() script["imageStats"]["in"].setInput(script["objectToImage"]["out"]) script["imageStats"]["channels"].setValue( IECore.StringVectorData(["R", "G", "B", "A"])) script["imageStats"]["area"].setValue( imath.Box2i(imath.V2i(0), imath.V2i(640, 480))) script["options"] = GafferScene.StandardOptions() script["options"]["in"].setInput(script["outputs"]["out"]) script["options"]["options"]["filmFit"]["enabled"].setValue(True) script["options"]["options"]["filmFit"]["value"].setValue( IECoreScene.Camera.FilmFit.Fit) script["render"] = self._createInteractiveRender() script["render"]["in"].setInput(script["options"]["out"]) # Render the cube with one level of subdivision. Check we get roughly the # alpha coverage we expect. script["render"]["state"].setValue(script["render"].State.Running) time.sleep(1) script["objectToImage"]["object"].setValue( IECoreImage.ImageDisplayDriver.storedImage("subdivisionTest")) self.assertAlmostEqual(script["imageStats"]["average"][3].getValue(), 0.381, delta=0.001) # Now up the number of subdivision levels. The alpha coverage should # increase as the shape tends towards the limit surface. script["attributes"]["attributes"]["subdivIterations"][ "value"].setValue(4) time.sleep(1) script["objectToImage"]["object"].setValue( IECoreImage.ImageDisplayDriver.storedImage("subdivisionTest")) self.assertAlmostEqual(script["imageStats"]["average"][3].getValue(), 0.424, delta=0.001)
def testWedge( self ) : s = Gaffer.ScriptNode() s["sphere"] = GafferScene.Sphere() s["sphere"]["sets"].setValue( "${wedge:value}" ) s["filter"] = GafferScene.SetFilter() s["filter"]["set"].setValue( "hidden" ) s["attributes"] = GafferScene.StandardAttributes() s["attributes"]["attributes"]["visibility"]["enabled"].setValue( True ) s["attributes"]["attributes"]["visibility"]["value"].setValue( False ) s["attributes"]["filter"].setInput( s["filter"]["out"] ) s["attributes"]["in"].setInput( s["sphere"]["out"] ) s["outputs"] = GafferScene.Outputs() s["outputs"].addOutput( "beauty", IECore.Display( self.temporaryDirectory() + "/${wedge:value}.tif", "tiff", "rgba", { } ) ) s["outputs"]["in"].setInput( s["attributes"]["out"] ) s["render"] = GafferRenderMan.RenderManRender() s["render"]["ribFileName"].setValue( self.temporaryDirectory() + "/test.rib" ) s["render"]["in"].setInput( s["outputs"]["out"] ) s["wedge"] = GafferDispatch.Wedge() s["wedge"]["mode"].setValue( int( s["wedge"].Mode.StringList ) ) s["wedge"]["strings"].setValue( IECore.StringVectorData( [ "visible", "hidden" ] ) ) s["wedge"]["preTasks"][0].setInput( s["render"]["task"] ) s["fileName"].setValue( self.temporaryDirectory() + "/test.gfr" ) s.save() dispatcher = GafferDispatch.LocalDispatcher() dispatcher["jobsDirectory"].setValue( self.temporaryDirectory() + "/testJobDirectory" ) dispatcher["framesMode"].setValue( GafferDispatch.Dispatcher.FramesMode.CurrentFrame ) dispatcher["executeInBackground"].setValue( False ) dispatcher.dispatch( [ s["wedge"] ] ) hidden = GafferImage.ImageReader() hidden["fileName"].setValue( self.temporaryDirectory() + "/hidden.tif" ) visible = GafferImage.ImageReader() visible["fileName"].setValue( self.temporaryDirectory() + "/visible.tif" ) hiddenStats = GafferImage.ImageStats() hiddenStats["in"].setInput( hidden["out"] ) hiddenStats["regionOfInterest"].setValue( hidden["out"]["format"].getValue().getDisplayWindow() ) visibleStats = GafferImage.ImageStats() visibleStats["in"].setInput( visible["out"] ) visibleStats["regionOfInterest"].setValue( visible["out"]["format"].getValue().getDisplayWindow() ) self.assertLess( hiddenStats["average"].getValue()[0], 0.05 ) self.assertGreater( visibleStats["average"].getValue()[0], .35 )
def __testMergedDifferentDataWindows(self, ensureOverlap=False): ts = GafferImage.ImagePlug.tileSize() tileCount = 2 values1 = { "R": 0.25, "G": 0.5, "B": 1.0, "A": 0.5, "Z": 10.0, "ZBack": 12.0 } values2 = { "R": 2.0, "G": 3.0, "B": 4.0, "A": 1.0, "Z": 20.0, "ZBack": 20.0 } sourceFormat = GafferImage.Format( imath.Box2i(imath.V2i(0), imath.V2i(ts * tileCount)), 1) constant1 = GafferImage.Constant() constant1["format"].setValue(sourceFormat) constant1["color"].setValue( imath.Color4f(values1["R"], values1["G"], values1["B"], values1["A"])) addDepth1 = GafferImage.FlatToDeep() addDepth1["in"].setInput(constant1["out"]) addDepth1["depth"].setValue(values1["Z"]) addDepth1["zBackMode"].setValue( GafferImage.FlatToDeep.ZBackMode.Thickness) addDepth1["thickness"].setValue(values1["ZBack"] - values1["Z"]) constant2 = GafferImage.Constant() constant2["format"].setValue(sourceFormat) constant2["color"].setValue( imath.Color4f(values2["R"], values2["G"], values2["B"], values2["A"])) addDepth2 = GafferImage.FlatToDeep() addDepth2["in"].setInput(constant2["out"]) addDepth2["depth"].setValue(values2["Z"]) addDepth2["zBackMode"].setValue( GafferImage.FlatToDeep.ZBackMode.Thickness) addDepth2["thickness"].setValue(values2["ZBack"] - values2["Z"]) crop1 = GafferImage.Crop() crop1["in"].setInput(addDepth1["out"]) crop1["affectDisplayWindow"].setValue(False) crop2 = GafferImage.Crop() crop2["in"].setInput(addDepth2["out"]) crop2["affectDisplayWindow"].setValue(False) merge = GafferImage.DeepMerge() merge["in"][0].setInput(crop1["out"]) merge["in"][1].setInput(crop2["out"]) for i in range(10): crop1Area = imath.Box2i() crop1Area.extendBy( imath.V2i(int(random.uniform(0, sourceFormat.width())), int(random.uniform(0, sourceFormat.height())))) crop1Area.extendBy( imath.V2i(int(random.uniform(0, sourceFormat.width())), int(random.uniform(0, sourceFormat.height())))) crop2Area = imath.Box2i() crop2Area.extendBy( imath.V2i(int(random.uniform(0, sourceFormat.width())), int(random.uniform(0, sourceFormat.height())))) crop2Area.extendBy( imath.V2i(int(random.uniform(0, sourceFormat.width())), int(random.uniform(0, sourceFormat.height())))) # If we want to ensure that the two crop areas overlap, extend the second one to a random point # within the first one's area if ensureOverlap and not GafferImage.BufferAlgo.intersects( crop1Area, crop2Area): crop2Area.extendBy( imath.V2i( int( random.uniform(crop1Area.min().x, crop1Area.max().x)), int( random.uniform(crop1Area.min().y, crop1Area.max().y)))) crop1["area"].setValue(crop1Area) crop2["area"].setValue(crop2Area) for tileX in range(tileCount): for tileY in range(tileCount): tileOrigin = imath.V2i(tileX * ts, tileY * ts) sampleOffsets = merge["out"].sampleOffsets(tileOrigin) self.assertEqual( sampleOffsets, self.__getExpectedSampleOffsets( tileOrigin, crop1Area, crop2Area)) for channelName in values1.keys(): channelData = merge["out"].channelData( channelName, tileOrigin) self.assertEqual( channelData, self.__getExpectedChannelData( tileOrigin, crop1Area, values1[channelName], crop2Area, values2[channelName]))
def testChannelRequest(self): ts = GafferImage.ImagePlug.tileSize() a = GafferImage.Constant() a["color"].setValue(imath.Color4f(0.1, 0.2, 0.3, 0.4)) addDepthA = GafferImage.FlatToDeep() addDepthA["in"].setInput(a["out"]) addDepthA["depth"].setValue(2.0) addDepthA["zBackMode"].setValue( GafferImage.FlatToDeep.ZBackMode.Thickness) addDepthA["thickness"].setValue(1.0) ad = GafferImage.DeleteChannels() ad["in"].setInput(addDepthA["out"]) ad["mode"].setValue(GafferImage.DeleteChannels.Mode.Delete) ad["channels"].setValue(IECore.StringVectorData(["G", "Z", "ZBack"])) b = GafferImage.Constant() b["color"].setValue(imath.Color4f(0.5, 0.6, 0.7, 0.8)) addDepthB = GafferImage.FlatToDeep() addDepthB["in"].setInput(b["out"]) addDepthB["depth"].setValue(4.0) addDepthB["zBackMode"].setValue( GafferImage.FlatToDeep.ZBackMode.Thickness) addDepthB["thickness"].setValue(1.0) bd = GafferImage.DeleteChannels() bd["in"].setInput(addDepthB["out"]) bd["mode"].setValue(GafferImage.DeleteChannels.Mode.Delete) bd["channels"].setValue(IECore.StringVectorData(["R", "A"])) merge = GafferImage.DeepMerge() merge["in"][0].setInput(ad["out"]) merge["in"][1].setInput(bd["out"]) ad["enabled"].setValue(False) bd["enabled"].setValue(False) expectedChannelData = {} expectedChannelData["R"] = IECore.FloatVectorData([0.1, 0.5] * ts * ts) expectedChannelData["G"] = IECore.FloatVectorData([0.2, 0.6] * ts * ts) expectedChannelData["B"] = IECore.FloatVectorData([0.3, 0.7] * ts * ts) expectedChannelData["A"] = IECore.FloatVectorData([0.4, 0.8] * ts * ts) expectedChannelData["Z"] = IECore.FloatVectorData([2.0, 4.0] * ts * ts) expectedChannelData["ZBack"] = IECore.FloatVectorData([3.0, 5.0] * ts * ts) for channelName in expectedChannelData: actualChannelData = merge["out"].channelData( channelName, imath.V2i(0)) self.assertEqual(actualChannelData, expectedChannelData[channelName]) ad["enabled"].setValue(True) bd["enabled"].setValue(True) expectedChannelData = {} expectedChannelData["R"] = IECore.FloatVectorData([0.1, 0.0] * ts * ts) expectedChannelData["G"] = IECore.FloatVectorData([0.0, 0.6] * ts * ts) expectedChannelData["B"] = IECore.FloatVectorData([0.3, 0.7] * ts * ts) expectedChannelData["A"] = IECore.FloatVectorData([0.4, 0.0] * ts * ts) expectedChannelData["Z"] = IECore.FloatVectorData([0.0, 4.0] * ts * ts) expectedChannelData["ZBack"] = IECore.FloatVectorData([0.0, 5.0] * ts * ts) for channelName in expectedChannelData: actualChannelData = merge["out"].channelData( channelName, imath.V2i(0)) self.assertEqual(actualChannelData, expectedChannelData[channelName])
def testChannelData(self): ts = GafferImage.ImagePlug.tileSize() constant1 = GafferImage.Constant() constant1["format"].setValue( GafferImage.Format(imath.Box2i(imath.V2i(0), imath.V2i(512)), 1)) constant1["color"].setValue(imath.Color4f(0.25, 0.5, 1.0, 0.5)) addDepth1 = GafferImage.FlatToDeep() addDepth1["in"].setInput(constant1["out"]) addDepth1["depth"].setValue(10.0) constant2 = GafferImage.Constant() constant2["format"].setValue( GafferImage.Format(imath.Box2i(imath.V2i(0), imath.V2i(512)), 1)) constant2["color"].setValue(imath.Color4f(2.0, 3.0, 4.0, 1.0)) addDepth2 = GafferImage.FlatToDeep() addDepth2["in"].setInput(constant2["out"]) addDepth2["depth"].setValue(20.0) merge = GafferImage.DeepMerge() merge["in"][0].setInput(addDepth1["out"]) merge["in"][1].setInput(addDepth2["out"]) expectedChannelData = {} expectedChannelData["R"] = IECore.FloatVectorData([0.25, 2.0] * ts * ts) expectedChannelData["G"] = IECore.FloatVectorData([0.5, 3.0] * ts * ts) expectedChannelData["B"] = IECore.FloatVectorData([1.0, 4.0] * ts * ts) expectedChannelData["A"] = IECore.FloatVectorData([0.5, 1.0] * ts * ts) expectedChannelData["Z"] = IECore.FloatVectorData([10.0, 20.0] * ts * ts) for channelName in expectedChannelData: actualChannelData = merge["out"].channelData( channelName, imath.V2i(0)) self.assertEqual(actualChannelData, expectedChannelData[channelName]) addDepth1["zBackMode"].setValue( GafferImage.FlatToDeep.ZBackMode.Thickness) addDepth1["thickness"].setValue(2.0) expectedChannelData["ZBack"] = IECore.FloatVectorData([12.0, 20.0] * ts * ts) for channelName in expectedChannelData: actualChannelData = merge["out"].channelData( channelName, imath.V2i(0)) self.assertEqual(actualChannelData, expectedChannelData[channelName]) addDepth2["zBackMode"].setValue( GafferImage.FlatToDeep.ZBackMode.Thickness) addDepth2["thickness"].setValue(0.1) expectedChannelData["ZBack"] = IECore.FloatVectorData([12.0, 20.1] * ts * ts) for channelName in expectedChannelData: actualChannelData = merge["out"].channelData( channelName, imath.V2i(0)) self.assertEqual(actualChannelData, expectedChannelData[channelName])
def testConstructor(self): w = GafferImage.VectorWarp() self.assertTrue(isinstance(w, GafferImage.VectorWarp))
def testEmptyInputDataWindow(self): m = GafferImage.Mirror() self.assertTrue(GafferImage.empty(m["in"]["dataWindow"].getValue())) self.assertTrue(GafferImage.empty(m["out"]["dataWindow"].getValue()))
def testWarpImage(self): def __warpImage(size, distortion, idistortStyle): w = imath.Box2i(imath.V2i(0), size - imath.V2i(1)) image = IECoreImage.ImagePrimitive(w, w) R = IECore.FloatVectorData(size.x * size.y) G = IECore.FloatVectorData(size.x * size.y) for iy in range(size.y): for ix in range(size.x): x = (ix + 0.5) / size.x y = 1 - (iy + 0.5) / size.y if idistortStyle: R[iy * size.x + ix] = distortion * math.sin(y * 8) * size.x G[iy * size.x + ix] = distortion * math.sin(x * 8) * size.y else: R[iy * size.x + ix] = x + distortion * math.sin(y * 8) G[iy * size.x + ix] = y + distortion * math.sin(x * 8) image["R"] = R image["G"] = G return image def __dotGrid(size): w = imath.Box2i(imath.V2i(0), size - imath.V2i(1)) image = IECoreImage.ImagePrimitive(w, w) R = IECore.FloatVectorData(size.x * size.y) G = IECore.FloatVectorData(size.x * size.y) B = IECore.FloatVectorData(size.x * size.y) for iy in range(0, size.y): for ix in range(0, size.x): q = max(ix % 16, iy % 16) R[iy * size.x + ix] = q < 1 G[iy * size.x + ix] = q < 4 B[iy * size.x + ix] = q < 8 image["R"] = R image["G"] = G image["B"] = B return image objectToImageSource = GafferImage.ObjectToImage() objectToImageSource["object"].setValue(__dotGrid(imath.V2i(300))) # TODO - reorder channels of our source image because ObjectToImage outputs in opposite order to # the rest of Gaffer. This probably should be fixed in ObjectToImage, # or we shouldn't depend on channel order to check if images are equal? sourceReorderConstant = GafferImage.Constant() sourceReorderConstant["format"].setValue( GafferImage.Format(300, 300, 1.000)) sourceReorderDelete = GafferImage.DeleteChannels() sourceReorderDelete["channels"].setValue(IECore.StringVectorData(["A" ])) sourceReorderDelete["in"].setInput(sourceReorderConstant["out"]) sourceReorder = GafferImage.CopyChannels() sourceReorder["channels"].setValue("R G B") sourceReorder["in"]["in0"].setInput(sourceReorderDelete["out"]) sourceReorder["in"]["in1"].setInput(objectToImageSource["out"]) objectToImageVector = GafferImage.ObjectToImage() vectorWarp = GafferImage.VectorWarp() vectorWarp["in"].setInput(sourceReorder["out"]) vectorWarp["vector"].setInput(objectToImageVector["out"]) # Test that a warp with no distortion and a box filter reproduces the input objectToImageVector["object"].setValue( __warpImage(imath.V2i(300), 0, False)) vectorWarp["filter"].setValue("box") self.assertImagesEqual(vectorWarp["out"], sourceReorder["out"], maxDifference=0.00001) # Test that a warp with distortion produces an expected output objectToImageVector["object"].setValue( __warpImage(imath.V2i(300), 0.2, False)) vectorWarp["filter"].setValue("blackman-harris") # Enable to write out images for visual comparison if False: testWriter = GafferImage.ImageWriter() testWriter["in"].setInput(vectorWarp["out"]) testWriter["fileName"].setValue("/tmp/dotGrid.warped.exr") testWriter["task"].execute() expectedReader = GafferImage.ImageReader() expectedReader["fileName"].setValue( os.path.dirname(__file__) + "/images/dotGrid.warped.exr") # Test that we can get the same result using pixel offsets instead of normalized coordinates objectToImageVector["object"].setValue( __warpImage(imath.V2i(300), 0.2, True)) vectorWarp["vectorMode"].setValue( GafferImage.VectorWarp.VectorMode.Relative) vectorWarp["vectorUnits"].setValue( GafferImage.VectorWarp.VectorUnits.Pixels) self.assertImagesEqual(vectorWarp["out"], expectedReader["out"], maxDifference=0.0005, ignoreMetadata=True)
def test(self): getRed = GafferOSL.OSLShader() getRed.loadShader("ImageProcessing/InChannel") getRed["parameters"]["channelName"].setValue("R") getGreen = GafferOSL.OSLShader() getGreen.loadShader("ImageProcessing/InChannel") getGreen["parameters"]["channelName"].setValue("G") getBlue = GafferOSL.OSLShader() getBlue.loadShader("ImageProcessing/InChannel") getBlue["parameters"]["channelName"].setValue("B") buildColor = GafferOSL.OSLShader() buildColor.loadShader("Utility/BuildColor") buildColor["parameters"]["r"].setInput(getBlue["out"]["channelValue"]) buildColor["parameters"]["g"].setInput(getGreen["out"]["channelValue"]) buildColor["parameters"]["b"].setInput(getRed["out"]["channelValue"]) outRGB = GafferOSL.OSLShader() outRGB.loadShader("ImageProcessing/OutLayer") outRGB["parameters"]["layerColor"].setInput(buildColor["out"]["c"]) imageShader = GafferOSL.OSLShader() imageShader.loadShader("ImageProcessing/OutImage") imageShader["parameters"]["in0"].setInput(outRGB["out"]["layer"]) reader = GafferImage.ImageReader() reader["fileName"].setValue( os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/rgb.100x100.exr")) image = GafferOSL.OSLImage() image["in"].setInput(reader["out"]) # we haven't connected the shader yet, so the node should act as a pass through self.assertEqual(image["out"].image(), reader["out"].image()) self.assertEqual(image["out"].imageHash(), reader["out"].imageHash()) # that should all change when we hook up a shader cs = GafferTest.CapturingSlot(image.plugDirtiedSignal()) image["shader"].setInput(imageShader["out"]) self.assertEqual(len(cs), 5) self.assertTrue(cs[0][0].isSame(image["shader"])) self.assertTrue(cs[1][0].isSame(image["__shading"])) self.assertTrue(cs[2][0].isSame(image["out"]["channelNames"])) self.assertTrue(cs[3][0].isSame(image["out"]["channelData"])) self.assertTrue(cs[4][0].isSame(image["out"])) inputImage = reader["out"].image() outputImage = image["out"].image() self.assertNotEqual(inputImage, outputImage) self.assertEqual(outputImage["R"].data, inputImage["B"].data) self.assertEqual(outputImage["G"].data, inputImage["G"].data) self.assertEqual(outputImage["B"].data, inputImage["R"].data) # changes in the shader network should signal more dirtiness del cs[:] getGreen["parameters"]["channelName"].setValue("R") self.assertEqual(len(cs), 5) self.assertTrue(cs[0][0].isSame(image["shader"])) self.assertTrue(cs[1][0].isSame(image["__shading"])) self.assertTrue(cs[2][0].isSame(image["out"]["channelNames"])) self.assertTrue(cs[3][0].isSame(image["out"]["channelData"])) self.assertTrue(cs[4][0].isSame(image["out"])) del cs[:] buildColor["parameters"]["r"].setInput(getRed["out"]["channelValue"]) self.assertEqual(len(cs), 5) self.assertTrue(cs[0][0].isSame(image["shader"])) self.assertTrue(cs[1][0].isSame(image["__shading"])) self.assertTrue(cs[2][0].isSame(image["out"]["channelNames"])) self.assertTrue(cs[3][0].isSame(image["out"]["channelData"])) self.assertTrue(cs[4][0].isSame(image["out"])) inputImage = reader["out"].image() outputImage = image["out"].image() self.assertEqual(outputImage["R"].data, inputImage["R"].data) self.assertEqual(outputImage["G"].data, inputImage["R"].data) self.assertEqual(outputImage["B"].data, inputImage["R"].data)
def __init__( self, name = "ArnoldTextureBake" ) : GafferDispatch.TaskNode.__init__( self, name ) self["in"] = GafferScene.ScenePlug() self["filter"] = GafferScene.FilterPlug() self["bakeDirectory"] = Gaffer.StringPlug( "bakeDirectory", defaultValue = "" ) self["defaultFileName"] = Gaffer.StringPlug( "defaultFileName", defaultValue = "${bakeDirectory}/<AOV>/<AOV>.<UDIM>.exr" ) self["defaultResolution"] = Gaffer.IntPlug( "defaultResolution", defaultValue = 512 ) self["uvSet"] = Gaffer.StringPlug( "uvSet", defaultValue = 'uv' ) self["normalOffset"] = Gaffer.FloatPlug( "offset", defaultValue = 0.1 ) self["aovs"] = Gaffer.StringPlug( "aovs", defaultValue = 'beauty:RGBA' ) self["tasks"] = Gaffer.IntPlug( "tasks", defaultValue = 1 ) self["cleanupIntermediateFiles"] = Gaffer.BoolPlug( "cleanupIntermediateFiles", defaultValue = True ) # First, setup python commands which will dispatch a chunk of a render or image tasks as # immediate execution once they reach the farm - this allows us to run multiple tasks in # one farm process. self["__RenderDispatcher"] = GafferDispatch.PythonCommand() self["__RenderDispatcher"]["command"].setValue( inspect.cleandoc( """ import GafferDispatch # We need to access frame and "wedge:index" so that the hash of render varies with the wedge index, # so we might as well print what we're doing IECore.msg( IECore.MessageHandler.Level.Info, "Bake Process", "Dispatching render task index %i for frame %i" % ( context["wedge:index"], context.getFrame() ) ) d = GafferDispatch.LocalDispatcher() d.dispatch( [ self.parent()["__bakeDirectoryContext"] ] ) """ ) ) self["__ImageDispatcher"] = GafferDispatch.PythonCommand() self["__ImageDispatcher"]["preTasks"][0].setInput( self["__RenderDispatcher"]["task"] ) self["__ImageDispatcher"]["command"].setValue( inspect.cleandoc( """ import GafferDispatch # We need to access frame and "wedge:index" so that the hash of render varies with the wedge index, # so we might as well print what we're doing IECore.msg( IECore.MessageHandler.Level.Info, "Bake Process", "Dispatching image task index %i for frame %i" % ( context["wedge:index"], context.getFrame() ) ) d = GafferDispatch.LocalDispatcher() d.dispatch( [ self.parent()["__CleanUpSwitch"] ] ) """ ) ) # Wedge based on tasks into the overall number of tasks to run. Note that we don't know how # much work each task will do until we actually run the render tasks ( this is when scene # expansion happens ). Because we must group all tasks that write to the same file into the # same task batch, if tasks is a large number, some tasks batches could end up empty self["__MainWedge"] = GafferDispatch.Wedge() self["__MainWedge"]["preTasks"][0].setInput( self["__ImageDispatcher"]["task"] ) self["__MainWedge"]["mode"].setValue( 1 ) self["__MainWedge"]["intMin"].setValue( 1 ) self["__MainWedge"]["intMax"].setInput( self["tasks"] ) self["task"].setInput( self["__MainWedge"]["task"] ) self["task"].setFlags( Gaffer.Plug.Flags.Serialisable, False ) # Now set up the render tasks. This involves doing the actual rendering, and triggering the # output of the file list index file. # First get rid of options from the upstream scene that could mess up the bake self["__OptionOverrides"] = GafferScene.StandardOptions() self["__OptionOverrides"]["in"].setInput( self["in"] ) self["__OptionOverrides"]["options"]["pixelAspectRatio"]["enabled"].setValue( True ) self["__OptionOverrides"]["options"]["resolutionMultiplier"]["enabled"].setValue( True ) self["__OptionOverrides"]["options"]["overscan"]["enabled"].setValue( True ) self["__OptionOverrides"]["options"]["renderCropWindow"]["enabled"].setValue( True ) self["__OptionOverrides"]["options"]["cameraBlur"]["enabled"].setValue( True ) self["__OptionOverrides"]["options"]["transformBlur"]["enabled"].setValue( True ) self["__OptionOverrides"]["options"]["deformationBlur"]["enabled"].setValue( True ) self["__CameraSetup"] = self.__CameraSetup() self["__CameraSetup"]["in"].setInput( self["__OptionOverrides"]["out"] ) self["__CameraSetup"]["filter"].setInput( self["filter"] ) self["__CameraSetup"]["defaultFileName"].setInput( self["defaultFileName"] ) self["__CameraSetup"]["defaultResolution"].setInput( self["defaultResolution"] ) self["__CameraSetup"]["uvSet"].setInput( self["uvSet"] ) self["__CameraSetup"]["aovs"].setInput( self["aovs"] ) self["__CameraSetup"]["normalOffset"].setInput( self["normalOffset"] ) self["__CameraSetup"]["tasks"].setInput( self["tasks"] ) self["__Expression"] = Gaffer.Expression() self["__Expression"].setExpression( 'parent["__CameraSetup"]["taskIndex"] = context.get( "wedge:index", 0 )', "python" ) self["__indexFilePath"] = Gaffer.StringPlug() self["__indexFilePath"].setFlags( Gaffer.Plug.Flags.Serialisable, False ) self["__IndexFileExpression"] = Gaffer.Expression() self["__IndexFileExpression"].setExpression( inspect.cleandoc( """ import os parent["__indexFilePath"] = os.path.join( parent["bakeDirectory"], "BAKE_FILE_INDEX_" + str( context.get("wedge:index", 0 ) ) + ".####.txt" ) """ ), "python" ) self["__outputIndexCommand"] = Gaffer.PythonCommand() self["__outputIndexCommand"]["variables"].addMember( "bakeDirectory", Gaffer.StringPlug() ) self["__outputIndexCommand"]["variables"][0]["value"].setInput( self["bakeDirectory"] ) self["__outputIndexCommand"]["variables"].addMember( "indexFilePath", Gaffer.StringPlug() ) self["__outputIndexCommand"]["variables"][1]["value"].setInput( self["__indexFilePath"] ) self["__outputIndexCommand"]["variables"].addMember( "fileList", Gaffer.StringVectorDataPlug( defaultValue = IECore.StringVectorData() ) ) self["__outputIndexCommand"]["variables"][2]["value"].setInput( self["__CameraSetup"]["renderFileList"] ) self["__outputIndexCommand"]["command"].setValue( inspect.cleandoc( """ import os import distutils.dir_util # Ensure path exists distutils.dir_util.mkpath( variables["bakeDirectory"] ) f = open( variables["indexFilePath"], "w" ) f.writelines( [ i + "\\n" for i in sorted( variables["fileList"] ) ] ) f.close() IECore.msg( IECore.MessageHandler.Level.Info, "Bake Process", "Wrote list of bake files for this chunk to " + variables["indexFilePath"] ) """ ) ) self["__arnoldRender"] = GafferArnold.ArnoldRender() self["__arnoldRender"]["preTasks"][0].setInput( self["__outputIndexCommand"]["task"] ) self["__arnoldRender"]["dispatcher"]["immediate"].setValue( True ) self["__arnoldRender"]["in"].setInput( self["__CameraSetup"]["out"] ) self["__bakeDirectoryContext"] = GafferDispatch.TaskContextVariables() self["__bakeDirectoryContext"]["variables"].addMember( "bakeDirectory", Gaffer.StringPlug() ) self["__bakeDirectoryContext"]["variables"][0]["value"].setInput( self["bakeDirectory"] ) self["__bakeDirectoryContext"]["preTasks"][0].setInput( self["__arnoldRender"]["task"] ) # Now set up the image tasks. This involves merging all layers for a UDIM, filling in the # background, writing out this image, converting it to tx, and optionally deleting all the exrs self["__imageList"] = Gaffer.CompoundObjectPlug( "__imageList", defaultValue = IECore.CompoundObject() ) self["__imageList"].setFlags( Gaffer.Plug.Flags.Serialisable, False ) self["__ImageReader"] = GafferImage.ImageReader() self["__CurInputFileExpression"] = Gaffer.Expression() self["__CurInputFileExpression"].setExpression( inspect.cleandoc( """ l = parent["__imageList"] outFile = context["wedge:outFile"] loopIndex = context[ "loop:index" ] parent["__ImageReader"]["fileName"] = l[outFile][ loopIndex ] """ ), "python" ) # Find the max size of any input file self["__SizeLoop"] = Gaffer.LoopComputeNode() self["__SizeLoop"].setup( Gaffer.IntPlug() ) self["__SizeMaxExpression"] = Gaffer.Expression() self["__SizeMaxExpression"].setExpression( inspect.cleandoc( """ f = parent["__ImageReader"]["out"]["format"] parent["__SizeLoop"]["next"] = max( f.width(), parent["__SizeLoop"]["previous"] ) """ ), "python" ) # Loop over all input files for this output file, and merge them all together self["__ImageLoop"] = Gaffer.LoopComputeNode() self["__ImageLoop"].setup( GafferImage.ImagePlug() ) self["__NumInputsForCurOutputExpression"] = Gaffer.Expression() self["__NumInputsForCurOutputExpression"].setExpression( inspect.cleandoc( """ l = parent["__imageList"] outFile = context["wedge:outFile"] numInputs = len( l[outFile] ) parent["__ImageLoop"]["iterations"] = numInputs parent["__SizeLoop"]["iterations"] = numInputs """ ), "python" ) self["__Resize"] = GafferImage.Resize() self["__Resize"]["format"]["displayWindow"]["min"].setValue( imath.V2i( 0, 0 ) ) self["__Resize"]['format']["displayWindow"]["max"]["x"].setInput( self["__SizeLoop"]["out"] ) self["__Resize"]['format']["displayWindow"]["max"]["y"].setInput( self["__SizeLoop"]["out"] ) self["__Resize"]['in'].setInput( self["__ImageReader"]["out"] ) self["__Merge"] = GafferImage.Merge() self["__Merge"]["in"][0].setInput( self["__Resize"]["out"] ) self["__Merge"]["in"][1].setInput( self["__ImageLoop"]["previous"] ) self["__Merge"]["operation"].setValue( GafferImage.Merge.Operation.Add ) self["__ImageLoop"]["next"].setInput( self["__Merge"]["out"] ) # Now that we've merged everything together, we can use a BleedFill to fill in the background, # so that texture filtering across the edges will pull in colors that are at least reasonable. self["__BleedFill"] = GafferImage.BleedFill() self["__BleedFill"]["in"].setInput( self["__ImageLoop"]["out"] ) # Write out the result self["__ImageWriter"] = GafferImage.ImageWriter() self["__ImageWriter"]["in"].setInput( self["__BleedFill"]["out"] ) # Convert result to texture self["__ConvertCommand"] = GafferDispatch.SystemCommand() self["__ConvertCommand"]["substitutions"].addMember( "inFile", IECore.StringData() ) self["__ConvertCommand"]["substitutions"].addMember( "outFile", IECore.StringData() ) self["__ConvertCommand"]["preTasks"][0].setInput( self["__ImageWriter"]["task"] ) self["__ConvertCommand"]["command"].setValue( 'maketx --wrap clamp {inFile} -o {outFile}' ) self["__CommandSetupExpression"] = Gaffer.Expression() self["__CommandSetupExpression"].setExpression( inspect.cleandoc( """ outFileBase = context["wedge:outFile"] tmpExr = outFileBase + ".tmp.exr" parent["__ImageWriter"]["fileName"] = tmpExr parent["__ConvertCommand"]["substitutions"]["member1"]["value"] = tmpExr parent["__ConvertCommand"]["substitutions"]["member2"]["value"] = outFileBase + ".tx" """ ), "python" ) self["__ImageWedge"] = GafferDispatch.Wedge() self["__ImageWedge"]["preTasks"][0].setInput( self["__ConvertCommand"]["task"] ) self["__ImageWedge"]["variable"].setValue( 'wedge:outFile' ) self["__ImageWedge"]["indexVariable"].setValue( 'wedge:outFileIndex' ) self["__ImageWedge"]["mode"].setValue( int( Gaffer.Wedge.Mode.StringList ) ) self["__CleanUpCommand"] = GafferDispatch.PythonCommand() self["__CleanUpCommand"]["preTasks"][0].setInput( self["__ImageWedge"]["task"] ) self["__CleanUpCommand"]["variables"].addMember( "filesToDelete", Gaffer.StringVectorDataPlug( defaultValue = IECore.StringVectorData() ) ) self["__CleanUpCommand"]["command"].setValue( inspect.cleandoc( """ import os for tmpFile in variables["filesToDelete"]: os.remove( tmpFile ) """ ) ) self["__CleanUpExpression"] = Gaffer.Expression() self["__CleanUpExpression"].setExpression( inspect.cleandoc( """ imageList = parent["__imageList"] toDelete = [] for outFileBase, inputExrs in imageList.items(): tmpExr = outFileBase + ".tmp.exr" toDelete.extend( inputExrs ) toDelete.append( tmpExr ) toDelete.append( parent["__indexFilePath"] ) parent["__CleanUpCommand"]["variables"]["member1"]["value"] = IECore.StringVectorData( toDelete ) """ ), "python" ) self["__CleanUpSwitch"] = GafferDispatch.TaskSwitch() self["__CleanUpSwitch"]["preTasks"][0].setInput( self["__ImageWedge"]["task"] ) self["__CleanUpSwitch"]["preTasks"][1].setInput( self["__CleanUpCommand"]["task"] ) self["__CleanUpSwitch"]["index"].setInput( self["cleanupIntermediateFiles"] ) # Set up the list of input image files to process, and the corresponding list of # output files to wedge over self["__ImageSetupExpression"] = Gaffer.Expression() self["__ImageSetupExpression"].setExpression( inspect.cleandoc( """ f = open( parent["__indexFilePath"], "r" ) fileList = f.read().splitlines() fileDict = {} for i in fileList: rootName = i.rsplit( ".exr", 1 )[0] if rootName in fileDict: fileDict[ rootName ].append( i ) else: fileDict[ rootName ] = IECore.StringVectorData( [i] ) parent["__imageList"] = IECore.CompoundObject( fileDict ) parent["__ImageWedge"]["strings"] = IECore.StringVectorData( fileDict.keys() ) """ ), "python" )
def testBlurRange(self): constant = GafferImage.Constant() constant["format"].setValue(GafferImage.Format(5, 5, 1.000)) constant["color"].setValue(imath.Color4f(1, 1, 1, 1)) cropDot = GafferImage.Crop() cropDot["area"].setValue(imath.Box2i(imath.V2i(2, 2), imath.V2i(3, 3))) cropDot["affectDisplayWindow"].setValue(False) cropDot["in"].setInput(constant["out"]) blur = GafferImage.Blur() blur["expandDataWindow"].setValue(True) blur["in"].setInput(cropDot["out"]) blur["radius"]["y"].setInput(blur["radius"]["x"]) expression = Gaffer.Expression() blur.addChild(expression) expression.setExpression( 'parent["radius"]["x"] = context[ "loop:index" ] * 0.2', "python") loopInit = GafferImage.Constant() loopInit["format"].setValue(GafferImage.Format(5, 5, 1.000)) imageLoop = Gaffer.Loop() imageLoop.setup(GafferImage.ImagePlug()) imageLoop["in"].setInput(loopInit["out"]) merge = GafferImage.Merge() merge["in"].addChild( GafferImage.ImagePlug( "in2", flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, )) merge["in"]["in0"].setInput(blur["out"]) merge["in"]["in1"].setInput(imageLoop["previous"]) offset = GafferImage.Offset() offset["offset"].setValue(imath.V2i(-5, 0)) offset["in"].setInput(merge["out"]) imageLoop["next"].setInput(offset["out"]) deleteChannels = GafferImage.DeleteChannels() deleteChannels["mode"].setValue(GafferImage.DeleteChannels.Mode.Keep) deleteChannels["channels"].setValue(IECore.StringVectorData(['R'])) deleteChannels["in"].setInput(imageLoop["out"]) finalCrop = GafferImage.Crop() finalCrop["areaSource"].setValue(1) finalCrop["in"].setInput(deleteChannels["out"]) # Enable to write out images for visual comparison if False: testWriter = GafferImage.ImageWriter() testWriter["in"].setInput(finalCrop["out"]) testWriter["fileName"].setValue("/tmp/blurRange.exr") testWriter["openexr"]["dataType"].setValue('float') testWriter["task"].execute() expectedReader = GafferImage.ImageReader() expectedReader["fileName"].setValue( os.path.dirname(__file__) + "/images/blurRange.exr") self.assertImagesEqual(finalCrop["out"], expectedReader["out"], maxDifference=0.00001, ignoreMetadata=True)
def testUnspecifiedFilename( self ) : n = GafferImage.OpenImageIOReader() n["out"]["channelNames"].getValue() n["out"].channelData( "R", imath.V2i( 0 ) )
def __testFormatValue( self ) : return GafferImage.Format( 1234, 5678, 1.4 )
def testEmptyInput( self ) : crop = GafferImage.Crop() crop["area"]["min"].setValue( imath.V2i( 20 ) ) self.assertTrue( GafferImage.BufferAlgo.empty( crop["out"]["dataWindow"].getValue() ) )
def testPassthroughs(self): ts = GafferImage.ImagePlug.tileSize() checkerboardB = GafferImage.Checkerboard() checkerboardB["format"]["displayWindow"].setValue( imath.Box2i(imath.V2i(0), imath.V2i(4096))) checkerboardA = GafferImage.Checkerboard() checkerboardA["format"]["displayWindow"].setValue( imath.Box2i(imath.V2i(0), imath.V2i(4096))) checkerboardA["size"].setValue(imath.V2f(5)) cropB = GafferImage.Crop() cropB["in"].setInput(checkerboardB["out"]) cropB["area"].setValue( imath.Box2i(imath.V2i(ts * 0.5), imath.V2i(ts * 4.5))) cropB["affectDisplayWindow"].setValue(False) cropA = GafferImage.Crop() cropA["in"].setInput(checkerboardA["out"]) cropA["area"].setValue( imath.Box2i(imath.V2i(ts * 2.5), imath.V2i(ts * 6.5))) cropA["affectDisplayWindow"].setValue(False) merge = GafferImage.Merge() merge["in"][0].setInput(cropB["out"]) merge["in"][1].setInput(cropA["out"]) merge["operation"].setValue(8) sampleTileOrigins = { "insideBoth": imath.V2i(ts * 3, ts * 3), "outsideBoth": imath.V2i(ts * 5, ts), "outsideEdgeB": imath.V2i(ts, 0), "insideB": imath.V2i(ts, ts), "internalEdgeB": imath.V2i(ts * 4, ts), "internalEdgeA": imath.V2i(ts * 5, ts * 2), "insideA": imath.V2i(ts * 5, ts * 5), "outsideEdgeA": imath.V2i(ts * 6, ts * 5) } for opName, onlyA, onlyB in [("Atop", "black", "passB"), ("Divide", "operate", "black"), ("Out", "passA", "black"), ("Multiply", "black", "black"), ("Over", "passA", "passB"), ("Subtract", "passA", "operate"), ("Difference", "operate", "operate")]: op = getattr(GafferImage.Merge.Operation, opName) merge["operation"].setValue(op) results = {} for name, tileOrigin in sampleTileOrigins.items(): # We want to check the value pass through code independently # of the hash passthrough code, which we can do by dropping # the value cached and evaluating values first Gaffer.ValuePlug.clearCache() with Gaffer.Context() as c: c["image:tileOrigin"] = tileOrigin c["image:channelName"] = "R" data = merge["out"]["channelData"].getValue(_copy=False) if data.isSame( GafferImage.ImagePlug.blackTile(_copy=False)): computeMode = "black" elif data.isSame( cropB["out"]["channelData"].getValue(_copy=False)): computeMode = "passB" elif data.isSame( cropA["out"]["channelData"].getValue(_copy=False)): computeMode = "passA" else: computeMode = "operate" h = merge["out"]["channelData"].hash() if h == GafferImage.ImagePlug.blackTile().hash(): hashMode = "black" elif h == cropB["out"]["channelData"].hash(): hashMode = "passB" elif h == cropA["out"]["channelData"].hash(): hashMode = "passA" else: hashMode = "operate" self.assertEqual(hashMode, computeMode) results[name] = hashMode self.assertEqual(results["insideBoth"], "operate") self.assertEqual(results["outsideBoth"], "black") self.assertEqual(results["outsideEdgeB"], onlyB) self.assertEqual(results["insideB"], onlyB) self.assertEqual(results["outsideEdgeA"], onlyA) self.assertEqual(results["insideA"], onlyA) if onlyA == "black" or onlyB == "black": self.assertEqual(results["internalEdgeB"], onlyB) self.assertEqual(results["internalEdgeA"], onlyA) else: self.assertEqual(results["internalEdgeB"], "operate") self.assertEqual(results["internalEdgeA"], "operate")
def testDefaultChannelNamesMethod( self ) : channelNames = GafferImage.ImagePlug()['channelNames'].defaultValue() self.assertTrue( 'R' in channelNames ) self.assertTrue( 'G' in channelNames ) self.assertTrue( 'B' in channelNames )
def runBoundaryCorrectness(self, scale): testMerge = GafferImage.Merge() subImageNodes = [] for checkSize, col, bound in [ (2, (0.672299981, 0.672299981, 0), ((11, 7), (61, 57))), (4, (0.972599983, 0.493499994, 1), ((9, 5), (59, 55))), (6, (0.310799986, 0.843800008, 1), ((0, 21), (1024, 41))), (8, (0.958999991, 0.672299981, 0.0296), ((22, 0), (42, 1024))), (10, (0.950900018, 0.0899000019, 0.235499993), ((7, 10), (47, 50))), ]: checkerboard = GafferImage.Checkerboard() checkerboard["format"].setValue( GafferImage.Format(1024 * scale, 1024 * scale, 1.000)) checkerboard["size"].setValue(imath.V2f(checkSize * scale)) checkerboard["colorA"].setValue( imath.Color4f(0.1 * col[0], 0.1 * col[1], 0.1 * col[2], 0.3)) checkerboard["colorB"].setValue( imath.Color4f(0.5 * col[0], 0.5 * col[1], 0.5 * col[2], 0.7)) crop = GafferImage.Crop("Crop") crop["in"].setInput(checkerboard["out"]) crop["area"].setValue( imath.Box2i( imath.V2i(scale * bound[0][0], scale * bound[0][1]), imath.V2i(scale * bound[1][0], scale * bound[1][1]))) crop["affectDisplayWindow"].setValue(False) subImageNodes.append(checkerboard) subImageNodes.append(crop) testMerge["in"][-1].setInput(crop["out"]) testMerge["expression"] = Gaffer.Expression() testMerge["expression"].setExpression( 'parent["operation"] = context[ "loop:index" ]') inverseScale = GafferImage.ImageTransform() inverseScale["in"].setInput(testMerge["out"]) inverseScale["filter"].setValue("box") inverseScale["transform"]["scale"].setValue(imath.V2f(1.0 / scale)) crop1 = GafferImage.Crop() crop1["in"].setInput(inverseScale["out"]) crop1["area"].setValue(imath.Box2i(imath.V2i(0, 0), imath.V2i(64, 64))) loopInit = GafferImage.Constant() loopInit["format"].setValue(GafferImage.Format(896, 64, 1.000)) loopInit["color"].setValue(imath.Color4f(0)) loopOffset = GafferImage.Offset() loopOffset["in"].setInput(crop1["out"]) loopOffset["expression"] = Gaffer.Expression() loopOffset["expression"].setExpression( 'parent["offset"]["x"] = 64 * context[ "loop:index" ]') loopMerge = GafferImage.Merge() loopMerge["in"][1].setInput(loopOffset["out"]) loop = Gaffer.Loop() loop.setup(GafferImage.ImagePlug("in", )) loop["iterations"].setValue(14) loop["in"].setInput(loopInit["out"]) loop["next"].setInput(loopMerge["out"]) loopMerge["in"][0].setInput(loop["previous"]) # Uncomment for debug #imageWriter = GafferImage.ImageWriter( "ImageWriter" ) #imageWriter["in"].setInput( loop["out"] ) #imageWriter['openexr']['dataType'].setValue( "float" ) #imageWriter["fileName"].setValue( "/tmp/mergeBoundaries.exr" ) #imageWriter.execute() reader = GafferImage.ImageReader() reader["fileName"].setValue(self.mergeBoundariesRefPath) self.assertImagesEqual(loop["out"], reader["out"], ignoreMetadata=True, maxDifference=1e-5 if scale > 1 else 0)
def __init__( self, name = "ImageLoop" ) : Gaffer.LoopComputeNode.__init__( self, name ) self.setup( GafferImage.ImagePlug() )
def testLayerMapping(self): constant1 = GafferImage.Constant() constant1['color'].setValue(imath.Color4f(0.1, 0.2, 0.3, 0.4)) constant1["format"].setValue(GafferImage.Format(10, 10, 1.000)) metadata1 = GafferImage.ImageMetadata() metadata1["in"].setInput(constant1["out"]) metadata1["metadata"].addChild(Gaffer.NameValuePlug("test", 1)) constant2 = GafferImage.Constant() constant2['color'].setValue(imath.Color4f(0.2, 0.4, 0.6, 0.8)) constant2["format"].setValue(GafferImage.Format(20, 20, 1.000)) metadata2 = GafferImage.ImageMetadata() metadata2["in"].setInput(constant2["out"]) metadata2["metadata"].addChild(Gaffer.NameValuePlug("test", 2)) switch = Gaffer.Switch() switch.setup(GafferImage.ImagePlug()) switch["in"][0].setInput(metadata1["out"]) switch["in"][1].setInput(metadata2["out"]) e = Gaffer.Expression() switch.addChild(e) e.setExpression( 'parent["index"] = context["collect:layerName"] != "A"', "python") collect = GafferImage.CollectImages() collect["in"].setInput(switch["out"]) # Metadata and format are driven by the first layer collect["rootLayers"].setValue(IECore.StringVectorData(['A', 'B'])) self.assertEqual(collect["out"]["format"].getValue(), GafferImage.Format(10, 10, 1)) self.assertEqual(collect["out"]["metadata"].getValue(), IECore.CompoundData({"test": 1})) collect["rootLayers"].setValue(IECore.StringVectorData(['B', 'A'])) self.assertEqual(collect["out"]["format"].getValue(), GafferImage.Format(20, 20, 1)) self.assertEqual(collect["out"]["metadata"].getValue(), IECore.CompoundData({"test": 2})) collect["rootLayers"].setValue(IECore.StringVectorData([])) self.assertEqual( collect["out"]["format"].getValue(), constant1["format"].getDefaultFormat(Gaffer.Context.current())) self.assertEqual(collect["out"]["metadata"].getValue(), IECore.CompoundData()) sampler = GafferImage.ImageSampler("ImageSampler") sampler["pixel"].setValue(imath.V2f(1, 1)) sampler["channels"].setValue( IECore.StringVectorData(["A.R", "A.G", "A.B", "A.A"])) sampler["image"].setInput(collect["out"]) collect["rootLayers"].setValue(IECore.StringVectorData(['A'])) self.assertEqual(list(collect["out"]["channelNames"].getValue()), ["A.R", "A.G", "A.B", "A.A"]) self.assertEqual(sampler["color"].getValue(), imath.Color4f(0.1, 0.2, 0.3, 0.4)) # Test simple duplicate collect["rootLayers"].setValue(IECore.StringVectorData(['A', 'A'])) self.assertEqual(list(collect["out"]["channelNames"].getValue()), ["A.R", "A.G", "A.B", "A.A"]) self.assertEqual(sampler["color"].getValue(), imath.Color4f(0.1, 0.2, 0.3, 0.4)) collect["rootLayers"].setValue(IECore.StringVectorData(['A', 'B'])) self.assertEqual( list(collect["out"]["channelNames"].getValue()), ["A.R", "A.G", "A.B", "A.A", "B.R", "B.G", "B.B", "B.A"]) self.assertEqual(sampler["color"].getValue(), imath.Color4f(0.1, 0.2, 0.3, 0.4)) sampler["channels"].setValue( IECore.StringVectorData(["B.R", "B.G", "B.B", "B.A"])) self.assertEqual(sampler["color"].getValue(), imath.Color4f(0.2, 0.4, 0.6, 0.8)) # Test overlapping names take the first layer constant1["layer"].setValue("B") collect["rootLayers"].setValue(IECore.StringVectorData(['A', 'A.B'])) sampler["channels"].setValue( IECore.StringVectorData(["A.B.R", "A.B.G", "A.B.B", "A.B.A"])) self.assertEqual(list(collect["out"]["channelNames"].getValue()), ["A.B.R", "A.B.G", "A.B.B", "A.B.A"]) self.assertEqual(sampler["color"].getValue(), imath.Color4f(0.1, 0.2, 0.3, 0.4)) collect["rootLayers"].setValue(IECore.StringVectorData(['A.B', 'A'])) self.assertEqual(list(collect["out"]["channelNames"].getValue()), ["A.B.R", "A.B.G", "A.B.B", "A.B.A"]) self.assertEqual(sampler["color"].getValue(), imath.Color4f(0.2, 0.4, 0.6, 0.8))
def testPassThroughs(self): r = GafferImage.ImageReader() r["fileName"].setValue(self.rPath) g = GafferImage.ImageReader() g["fileName"].setValue(self.gPath) mix = GafferImage.Mix() input1Hash = r["out"].imageHash() mix["in"][0].setInput(r["out"]) mix["in"][1].setInput(g["out"]) mix["mix"].setValue(0.5) ########################################## # With a mix applied, the hashes don't match either input, # and the data window is merged ########################################## self.assertNotEqual(mix["out"].imageHash(), input1Hash) self.assertEqual(mix["out"]["dataWindow"].getValue(), imath.Box2i(imath.V2i(20), imath.V2i(75))) ########################################## # Test that if we disable the node the hash gets passed through. ########################################## mix["enabled"].setValue(False) self.assertEqual(mix["out"].imageHash(), input1Hash) self.assertEqual(mix["out"]["dataWindow"].getValue(), imath.Box2i(imath.V2i(20), imath.V2i(70))) self.assertImagesEqual(mix["out"], r["out"]) ########################################## # Or if we enable but set mix to 0 ########################################## mix["enabled"].setValue(True) mix["mix"].setValue(0) self.assertEqual(mix["out"].imageHash(), input1Hash) self.assertEqual(mix["out"]["dataWindow"].getValue(), imath.Box2i(imath.V2i(20), imath.V2i(70))) self.assertImagesEqual(mix["out"], r["out"]) ########################################## # Set mix to 1 to get pass through of other input # In this case, the overall image hash won't match because it still takes metadata from the first input # But we can check the other components ########################################## mix["mix"].setValue(1) self.assertEqual(mix["out"]["dataWindow"].hash(), g["out"]["dataWindow"].hash()) self.assertEqual(mix["out"]["channelNames"].hash(), g["out"]["channelNames"].hash()) # Just check the first tile of the data to make sure hashes are passing through with Gaffer.Context() as c: c["image:channelName"] = IECore.StringData("G") c["image:tileOrigin"] = IECore.V2iData(imath.V2i(0, 0)) self.assertEqual(mix["out"]["channelData"].hash(), g["out"]["channelData"].hash()) self.assertEqual(mix["out"]["dataWindow"].getValue(), imath.Box2i(imath.V2i(25), imath.V2i(75))) self.assertImagesEqual(mix["out"], g["out"], ignoreMetadata=True)
def testDeep(self): constantA = GafferImage.Constant() constantA["color"].setValue(imath.Color4f(0.1, 0.2, 0.3, 0.4)) constantB = GafferImage.Constant() constantB["color"].setValue(imath.Color4f(0.01, 0.02, 0.03, 0.04)) constantC = GafferImage.Constant() constantC["color"].setValue(imath.Color4f(0.001, 0.002, 0.003, 0.004)) constantD = GafferImage.Constant() constantD["color"].setValue( imath.Color4f(0.0001, 0.0002, 0.0003, 0.0004)) deepMergeAB = GafferImage.DeepMerge() deepMergeAB["in"][0].setInput(constantA["out"]) deepMergeAB["in"][1].setInput(constantB["out"]) deepMergeCD = GafferImage.DeepMerge() deepMergeCD["in"][0].setInput(constantC["out"]) deepMergeCD["in"][1].setInput(constantD["out"]) switch = Gaffer.Switch() switch.setup(GafferImage.ImagePlug("in", )) switch["in"][0].setInput(deepMergeAB["out"]) switch["in"][1].setInput(deepMergeCD["out"]) switchExpr = Gaffer.Expression() switch.addChild(switchExpr) switchExpr.setExpression( 'parent["index"] = context["collect:layerName"] == "CD"') collect = GafferImage.CollectImages() collect["in"].setInput(switch["out"]) collect["rootLayers"].setValue(IECore.StringVectorData(['AB', 'CD'])) o = imath.V2i(0) self.assertEqual(collect["out"].channelData("AB.R", o), deepMergeAB["out"].channelData("R", o)) self.assertEqual(collect["out"].channelData("AB.G", o), deepMergeAB["out"].channelData("G", o)) self.assertEqual(collect["out"].channelData("AB.B", o), deepMergeAB["out"].channelData("B", o)) self.assertEqual(collect["out"].channelData("AB.A", o), deepMergeAB["out"].channelData("A", o)) self.assertEqual(collect["out"].channelData("CD.R", o), deepMergeCD["out"].channelData("R", o)) self.assertEqual(collect["out"].channelData("CD.G", o), deepMergeCD["out"].channelData("G", o)) self.assertEqual(collect["out"].channelData("CD.B", o), deepMergeCD["out"].channelData("B", o)) self.assertEqual(collect["out"].channelData("CD.A", o), deepMergeCD["out"].channelData("A", o)) self.assertEqual(collect["out"].sampleOffsets(o), deepMergeAB["out"].sampleOffsets(o)) self.assertEqual(collect["out"].dataWindow(), deepMergeAB["out"].dataWindow()) self.assertEqual(collect["out"].deep(), True) self.assertEqual( collect["out"].channelNames(), IECore.StringVectorData([ 'AB.R', 'AB.G', 'AB.B', 'AB.A', 'CD.R', 'CD.G', 'CD.B', 'CD.A' ])) deepMergeAB["enabled"].setValue(False) with six.assertRaisesRegex( self, Gaffer.ProcessException, r'Input to CollectImages must be consistent, but it is sometimes deep.*' ) as raised: collect["out"].deep() deepMergeAB["enabled"].setValue(True) deepMergeAB["in"][2].setInput(constantB["out"]) with six.assertRaisesRegex( self, Gaffer.ProcessException, r'SampleOffsets on input to CollectImages must match. Pixel 0,0 received both 3 and 2 samples' ) as raised: collect["out"].sampleOffsets(o) offset = GafferImage.Offset() offset["in"].setInput(constantB["out"]) offset["offset"].setValue(imath.V2i(-5, -13)) deepMergeAB["in"][2].setInput(offset["out"]) with six.assertRaisesRegex( self, Gaffer.ProcessException, r'DataWindows on deep input to CollectImages must match. Received both -5,-13 -> 1920,1080 and 0,0 -> 1920,1080' ) as raised: collect["out"].dataWindow()
def testSmallDataWindowOverLarge(self): b = GafferImage.Constant() b["format"].setValue(GafferImage.Format(500, 500, 1.0)) b["color"].setValue(imath.Color4f(1, 0, 0, 1)) a = GafferImage.Constant() a["format"].setValue(GafferImage.Format(500, 500, 1.0)) a["color"].setValue(imath.Color4f(0, 1, 0, 1)) mask = GafferImage.Constant() mask["format"].setValue(GafferImage.Format(500, 500, 1.0)) mask["color"].setValue(imath.Color4f(0.75)) aCrop = GafferImage.Crop() aCrop["in"].setInput(a["out"]) aCrop["areaSource"].setValue(aCrop.AreaSource.Area) aCrop["area"].setValue(imath.Box2i(imath.V2i(50), imath.V2i(162))) aCrop["affectDisplayWindow"].setValue(False) m = GafferImage.Mix() m["in"][0].setInput(b["out"]) m["in"][1].setInput(aCrop["out"]) m["mask"].setInput(mask["out"]) redSampler = GafferImage.Sampler( m["out"], "R", m["out"]["format"].getValue().getDisplayWindow()) greenSampler = GafferImage.Sampler( m["out"], "G", m["out"]["format"].getValue().getDisplayWindow()) blueSampler = GafferImage.Sampler( m["out"], "B", m["out"]["format"].getValue().getDisplayWindow()) def sample(x, y): return imath.Color3f( redSampler.sample(x, y), greenSampler.sample(x, y), blueSampler.sample(x, y), ) # We should only have green in areas which are inside # the data window of aCrop. But we still only take 25% # of the red everywhere self.assertEqual(sample(49, 49), imath.Color3f(0.25, 0, 0)) self.assertEqual(sample(50, 50), imath.Color3f(0.25, 0.75, 0)) self.assertEqual(sample(161, 161), imath.Color3f(0.25, 0.75, 0)) self.assertEqual(sample(162, 162), imath.Color3f(0.25, 0, 0))
def nodeMenuCreateCommand(menu): blur = GafferImage.Blur() blur["radius"].gang() return blur
def testEnabledAffects(self): m = GafferImage.Merge() affected = m.affects(m["enabled"]) self.assertTrue(m["out"]["channelData"] in affected)
def testBoxAspectConstructor( self ) : f = GafferImage.Format( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 49, 149 ) ), 1.3 ) self.assertEqual( f.width(), 50 ) self.assertEqual( f.height(), 150 ) self.assertEqual( f.getPixelAspect(), 1.3 )