def renderQuadInto(self, mul=1, div=1, align=1, depthtex=None, colortex=None, auxtex0=None, auxtex1=None): """ Creates an offscreen buffer for an intermediate computation. Installs a quad into the buffer. Returns the fullscreen quad. The size of the buffer is initially equal to the size of the main window. The parameters 'mul', 'div', and 'align' can be used to adjust that size. """ texgroup = (depthtex, colortex, auxtex0, auxtex1) winx, winy = self.getScaledSize(mul, div, align) depthbits = bool(depthtex != None) buffer = self.createBuffer("filter-stage", winx, winy, texgroup, depthbits) if (buffer == None): return None cm = CardMaker("filter-stage-quad") cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(0) quad.setDepthWrite(0) quad.setColor(Vec4(1, 0.5, 0.5, 1)) quadcamnode = Camera("filter-quad-cam") lens = OrthographicLens() lens.setFilmSize(2, 2) lens.setFilmOffset(0, 0) lens.setNearFar(-1000, 1000) quadcamnode.setLens(lens) quadcam = quad.attachNewNode(quadcamnode) buffer.getDisplayRegion(0).setCamera(quadcam) buffer.getDisplayRegion(0).setActive(1) self.buffers.append(buffer) self.sizes.append((mul, div, align)) return quad
def __init__(self): # Preliminary capabilities check. if (base.win.getGsg().getSupportsBasicShaders() == 0): self.t = addTitle( "Firefly Demo: Video driver reports that shaders are not supported." ) return if (base.win.getGsg().getSupportsDepthTexture() == 0): self.t = addTitle( "Firefly Demo: Video driver reports that depth textures are not supported." ) return # This algorithm uses two offscreen buffers, one of which has # an auxiliary bitplane, and the offscreen buffers share a single # depth buffer. This is a heck of a complicated buffer setup. self.modelbuffer = self.makeFBO("model buffer", 1) self.lightbuffer = self.makeFBO("light buffer", 0) # Creation of a high-powered buffer can fail, if the graphics card # doesn't support the necessary OpenGL extensions. if (self.modelbuffer == None) or (self.lightbuffer == None): self.t = addTitle( "Toon Shader: Video driver does not support multiple render targets" ) return # Create four render textures: depth, normal, albedo, and final. # attach them to the various bitplanes of the offscreen buffers. self.texDepth = Texture() self.texDepth.setFormat(Texture.FDepthStencil) self.texAlbedo = Texture() self.texNormal = Texture() self.texFinal = Texture() self.modelbuffer.addRenderTexture(self.texDepth, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPDepthStencil) self.modelbuffer.addRenderTexture(self.texAlbedo, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPColor) self.modelbuffer.addRenderTexture(self.texNormal, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPAuxRgba0) self.lightbuffer.addRenderTexture(self.texFinal, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPColor) # Set the near and far clipping planes. base.cam.node().getLens().setNear(50.0) base.cam.node().getLens().setFar(500.0) lens = base.cam.node().getLens() # This algorithm uses three cameras: one to render the models into the # model buffer, one to render the lights into the light buffer, and # one to render "plain" stuff (non-deferred shaded) stuff into the light # buffer. Each camera has a bitmask to identify it. self.modelMask = 1 self.lightMask = 2 self.plainMask = 4 self.modelcam = base.makeCamera(self.modelbuffer, lens=lens, scene=render, mask=self.modelMask) self.lightcam = base.makeCamera(self.lightbuffer, lens=lens, scene=render, mask=self.lightMask) self.plaincam = base.makeCamera(self.lightbuffer, lens=lens, scene=render, mask=self.plainMask) # Panda's main camera is not used. base.cam.node().setActive(0) # Take explicit control over the order in which the three # buffers are rendered. self.modelbuffer.setSort(1) self.lightbuffer.setSort(2) base.win.setSort(3) # Within the light buffer, control the order of the two cams. self.lightcam.node().getDisplayRegion(0).setSort(1) self.plaincam.node().getDisplayRegion(0).setSort(2) # By default, panda usually clears the screen before every # camera and before every window. Tell it not to do that. # Then, tell it specifically when to clear and what to clear. self.modelcam.node().getDisplayRegion(0).disableClears() self.lightcam.node().getDisplayRegion(0).disableClears() self.plaincam.node().getDisplayRegion(0).disableClears() base.cam.node().getDisplayRegion(0).disableClears() base.cam2d.node().getDisplayRegion(0).disableClears() self.modelbuffer.disableClears() base.win.disableClears() self.modelbuffer.setClearColorActive(1) self.modelbuffer.setClearDepthActive(1) self.lightbuffer.setClearColorActive(1) self.lightbuffer.setClearColor(Vec4(0, 0, 0, 1)) # Miscellaneous stuff. base.disableMouse() base.camera.setPos(-9.112, -211.077, 46.951) base.camera.setHpr(0, -7.5, 2.4) base.setBackgroundColor(Vec4(0, 0, 0, 0)) random.seed() # Calculate the projection parameters for the final shader. # The math here is too complex to explain in an inline comment, # I've put in a full explanation into the HTML intro. proj = base.cam.node().getLens().getProjectionMat() proj_x = 0.5 * proj.getCell(3, 2) / proj.getCell(0, 0) proj_y = 0.5 * proj.getCell(3, 2) proj_z = 0.5 * proj.getCell(3, 2) / proj.getCell(2, 1) proj_w = -0.5 - 0.5 * proj.getCell(1, 2) # Configure the render state of the model camera. tempnode = NodePath(PandaNode("temp node")) tempnode.setAttrib( AlphaTestAttrib.make(RenderAttrib.MGreaterEqual, 0.5)) tempnode.setShader(Shader.load(os.path.join(PANDA_SHADER_PATH, \ "samples/fireflies/fireflies_model.sha"))) tempnode.setAttrib(DepthTestAttrib.make(RenderAttrib.MLessEqual)) self.modelcam.node().setInitialState(tempnode.getState()) # Configure the render state of the light camera. tempnode = NodePath(PandaNode("temp node")) tempnode.setShader(Shader.load(os.path.join(PANDA_SHADER_PATH, \ "samples/fireflies/fireflies_lighting.sha"))) tempnode.setShaderInput("texnormal", self.texNormal) tempnode.setShaderInput("texalbedo", self.texAlbedo) tempnode.setShaderInput("texdepth", self.texDepth) tempnode.setShaderInput("proj", Vec4(proj_x, proj_y, proj_z, proj_w)) tempnode.setAttrib( ColorBlendAttrib.make(ColorBlendAttrib.MAdd, ColorBlendAttrib.OOne, ColorBlendAttrib.OOne)) tempnode.setAttrib( CullFaceAttrib.make(CullFaceAttrib.MCullCounterClockwise)) # The next line causes problems on Linux. #tempnode.setAttrib(DepthTestAttrib.make(RenderAttrib.MGreaterEqual)) tempnode.setAttrib(DepthWriteAttrib.make(DepthWriteAttrib.MOff)) self.lightcam.node().setInitialState(tempnode.getState()) # Configure the render state of the plain camera. rs = RenderState.makeEmpty() self.plaincam.node().setInitialState(rs) # Clear any render attribs on the root node. This is necessary # because by default, panda assigns some attribs to the root # node. These default attribs will override the # carefully-configured render attribs that we just attached # to the cameras. The simplest solution is to just clear # them all out. render.setState(RenderState.makeEmpty()) # My artist created a model in which some of the polygons # don't have textures. This confuses the shader I wrote. # This little hack guarantees that everything has a texture. white = loader.loadTexture("models/samples/fireflies/white.jpg") render.setTexture(white, 0) # Create two subroots, to help speed cull traversal. self.lightroot = NodePath(PandaNode("lightroot")) self.lightroot.reparentTo(render) self.modelroot = NodePath(PandaNode("modelroot")) self.modelroot.reparentTo(render) self.lightroot.hide(BitMask32(self.modelMask)) self.modelroot.hide(BitMask32(self.lightMask)) self.modelroot.hide(BitMask32(self.plainMask)) # Load the model of a forest. Make it visible to the model camera. self.forest = NodePath(PandaNode("Forest Root")) self.forest.reparentTo(render) loader.loadModel( \ "models/samples/fireflies/background").reparentTo(self.forest) loader.loadModel( \ "models/samples/fireflies/foliage01").reparentTo(self.forest) loader.loadModel( \ "models/samples/fireflies/foliage02").reparentTo(self.forest) loader.loadModel( \ "models/samples/fireflies/foliage03").reparentTo(self.forest) loader.loadModel( \ "models/samples/fireflies/foliage04").reparentTo(self.forest) loader.loadModel( \ "models/samples/fireflies/foliage05").reparentTo(self.forest) loader.loadModel( \ "models/samples/fireflies/foliage06").reparentTo(self.forest) loader.loadModel( \ "models/samples/fireflies/foliage07").reparentTo(self.forest) loader.loadModel( \ "models/samples/fireflies/foliage08").reparentTo(self.forest) loader.loadModel( \ "models/samples/fireflies/foliage09").reparentTo(self.forest) self.forest.hide(BitMask32(self.lightMask | self.plainMask)) # Cause the final results to be rendered into the main window on a card. cm = CardMaker("card") cm.setFrameFullscreenQuad() self.card = render2d.attachNewNode(cm.generate()) self.card.setTexture(self.texFinal) # Post the instructions. self.title = addTitle( "Panda3D: Tutorial - Fireflies using Deferred Shading") self.inst1 = addInstructions(0.95, "ESC: Quit") self.inst2 = addInstructions( 0.90, "Up/Down: More / Fewer Fireflies (Count: unknown)") self.inst3 = addInstructions( 0.85, "Right/Left: Bigger / Smaller Fireflies (Radius: unknown)") self.inst4 = addInstructions(0.80, "V: View the render-to-texture results") # Panda contains a built-in viewer that lets you view the results of # your render-to-texture operations. This code configures the viewer. base.bufferViewer.setPosition("llcorner") base.bufferViewer.setCardSize(0, 0.40) base.bufferViewer.setLayout("vline") self.toggleCards() self.toggleCards() # Firefly parameters self.fireflies = [] self.sequences = [] self.scaleseqs = [] self.glowspheres = [] self.fireflysize = 1.0 self.spheremodel = loader.loadModel("models/misc/sphere.flt") self.setFireflySize(25.0) while (len(self.fireflies) < 5): self.addFirefly() self.updateReadout() # these allow you to change parameters in realtime self.accept("escape", sys.exit, [0]) self.accept("arrow_up", self.incFireflyCount, [1.1111111]) self.accept("arrow_down", self.decFireflyCount, [0.9000000]) self.accept("arrow_right", self.setFireflySize, [1.1111111]) self.accept("arrow_left", self.setFireflySize, [0.9000000]) self.accept("v", self.toggleCards) self.accept("V", self.toggleCards) self.nextadd = 0 taskMgr.add(self.spawnTask, "spawner")
def renderSceneInto(self, depthtex=None, colortex=None, auxtex=None, auxbits=0, textures=None): """ Causes the scene to be rendered into the supplied textures instead of into the original window. Puts a fullscreen quad into the original window to show the render-to-texture results. Returns the quad. Normally, the caller would then apply a shader to the quad. To elaborate on how this all works: * An offscreen buffer is created. It is set up to mimic the original display region - it is the same size, uses the same clear colors, and contains a DisplayRegion that uses the original camera. * A fullscreen quad and an orthographic camera to render that quad are both created. The original camera is removed from the original window, and in its place, the orthographic quad-camera is installed. * The fullscreen quad is textured with the data from the offscreen buffer. A shader is applied that tints the results pink. * Automatic shader generation NOT enabled. If you have a filter that depends on a render target from the auto-shader, you either need to set an auto-shader attrib on the main camera or scene, or, you need to provide these outputs in your own shader. * All clears are disabled on the original display region. If the display region fills the whole window, then clears are disabled on the original window as well. It is assumed that rendering the full-screen quad eliminates the need to do clears. Hence, the original window which used to contain the actual scene, now contains a pink-tinted quad with a texture of the scene. It is assumed that the user will replace the shader on the quad with a more interesting filter. """ if (textures): colortex = textures.get("color", None) depthtex = textures.get("depth", None) auxtex = textures.get("aux", None) if (colortex == None): colortex = Texture("filter-base-color") colortex.setWrapU(Texture.WMClamp) colortex.setWrapV(Texture.WMClamp) texgroup = (depthtex, colortex, auxtex, None) # Choose the size of the offscreen buffer. (winx, winy) = self.getScaledSize(1, 1, 1) buffer = self.createBuffer("filter-base", winx, winy, texgroup) if (buffer == None): return None cm = CardMaker("filter-base-quad") cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(0) quad.setDepthWrite(0) quad.setTexture(colortex) quad.setColor(Vec4(1, 0.5, 0.5, 1)) cs = NodePath("dummy") cs.setState(self.camstate) # Do we really need to turn on the Shader Generator? #cs.setShaderAuto() if (auxbits): cs.setAttrib(AuxBitplaneAttrib.make(auxbits)) self.camera.node().setInitialState(cs.getState()) quadcamnode = Camera("filter-quad-cam") lens = OrthographicLens() lens.setFilmSize(2, 2) lens.setFilmOffset(0, 0) lens.setNearFar(-1000, 1000) quadcamnode.setLens(lens) quadcam = quad.attachNewNode(quadcamnode) self.region.setCamera(quadcam) dr = buffer.getDisplayRegion(0) self.setStackedClears(dr, self.rclears, self.wclears) if (auxtex): dr.setClearActive(GraphicsOutput.RTPAuxRgba0, 1) dr.setClearValue(GraphicsOutput.RTPAuxRgba0, Vec4(0.5, 0.5, 1.0, 0.0)) self.region.disableClears() if (self.isFullscreen()): self.win.disableClears() dr.setCamera(self.camera) dr.setActive(1) self.buffers.append(buffer) self.sizes.append((1, 1, 1)) return quad
def __init__(self): # create a texture into which we can copy the main window. self.tex = Texture() self.tex.setMinfilter(Texture.FTLinear) base.win.addRenderTexture(self.tex, GraphicsOutput.RTMTriggeredCopyTexture) # Create another 2D camera. Tell it to render before the main camera. self.backcam = base.makeCamera2d(base.win, sort=-10) self.background = NodePath("background") self.backcam.reparentTo(self.background) self.background.setDepthTest(0) self.background.setDepthWrite(0) self.backcam.node().getDisplayRegion(0).setClearDepthActive(0) # Obtain two texture cards. One renders before the dragon, the other after. self.bcard = base.win.getTextureCard() self.bcard.reparentTo(self.background) self.bcard.setTransparency(1) self.fcard = base.win.getTextureCard() self.fcard.reparentTo(render2d) self.fcard.setTransparency(1) # Initialize one of the nice effects. self.chooseEffectGhost() # Add the task that initiates the screenshots. taskMgr.add(self.takeSnapShot, "takeSnapShot") # Create some black squares on top of which we will # place the instructions. blackmaker = CardMaker("blackmaker") blackmaker.setColor(0, 0, 0, 1) blackmaker.setFrame(-1.00, -0.50, 0.65, 1.00) instcard = NodePath(blackmaker.generate()) instcard.reparentTo(render2d) blackmaker.setFrame(-0.5, 0.5, -1.00, -0.85) titlecard = NodePath(blackmaker.generate()) titlecard.reparentTo(render2d) # Panda does its best to hide the differences between DirectX and # OpenGL. But there are a few differences that it cannot hide. # One such difference is that when OpenGL copies from a # visible window to a texture, it gets it right-side-up. When # DirectX does it, it gets it upside-down. There is nothing panda # can do to compensate except to expose a flag and let the # application programmer deal with it. You should only do this # in the rare event that you're copying from a visible window # to a texture. if (base.win.getGsg().getCopyTextureInverted()): print "Copy texture is inverted." self.bcard.setScale(1, 1, -1) self.fcard.setScale(1, 1, -1) # Put up the instructions title = OnscreenText(text="Panda3D: Tutorial - Motion Trails", style=1, fg=(1, 1, 1, 1), font=font, pos=(0, -0.95), scale=.07) instr0 = addInstructions(0.95, "Press ESC to exit") instr1 = addInstructions(0.90, "Press 1: Ghost effect") instr2 = addInstructions(0.85, "Press 2: PaintBrush effect") instr3 = addInstructions(0.80, "Press 3: Double Vision effect") instr4 = addInstructions(0.75, "Press 4: Wings of Blue effect") instr5 = addInstructions(0.70, "Press 5: Whirlpool effect") # enable the key events self.accept("escape", sys.exit, [0]) self.accept("1", self.chooseEffectGhost) self.accept("2", self.chooseEffectPaintBrush) self.accept("3", self.chooseEffectDoubleVision) self.accept("4", self.chooseEffectWingsOfBlue) self.accept("5", self.chooseEffectWhirlpool)