def setupLights(self): #This function sets up some default lighting lAttrib = LightAttrib.makeAllOff() ambientLight = AmbientLight( "ambientLight" ) ambientLight.setColor( Vec4(.8, .8, .8, 1) ) lAttrib = lAttrib.addLight( ambientLight ) directionalLight = DirectionalLight( "directionalLight" ) directionalLight.setDirection( Vec3( 0, 45, -45 ) ) directionalLight.setColor( Vec4( 0.2, 0.2, 0.2, 1 ) ) lAttrib = lAttrib.addLight( directionalLight ) render.attachNewNode( directionalLight ) render.attachNewNode( ambientLight ) render.node().setAttrib( lAttrib )
def setupLights(self): #Sets up some default lighting lAttrib = LightAttrib.makeAllOff() ambientLight = AmbientLight("ambientLight") ambientLight.setColor(Vec4(.4, .4, .35, 1)) lAttrib = lAttrib.addLight(ambientLight) directionalLight = DirectionalLight("directionalLight") directionalLight.setDirection(Vec3(0, 8, -2.5)) directionalLight.setColor(Vec4(0.9, 0.8, 0.9, 1)) lAttrib = lAttrib.addLight(directionalLight) render.attachNewNode(directionalLight) render.attachNewNode(ambientLight) render.node().setAttrib(lAttrib)
def setupLights(self): lAttrib = LightAttrib.makeAllOff() ambientLight = AmbientLight("ambientLight") ambientLight.setColor(Vec4(.8, .8, .75, 1)) lAttrib = lAttrib.addLight(ambientLight) directionalLight = DirectionalLight("directionalLight") directionalLight.setDirection(Vec3(0, 0, -2.5)) directionalLight.setColor(Vec4(0.9, 0.8, 0.9, 1)) lAttrib = lAttrib.addLight(directionalLight) render.attachNewNode(directionalLight) render.attachNewNode(ambientLight) render.node().setAttrib(lAttrib)
def setupLights(self): lAttrib = LightAttrib.makeAllOff() ambientLight = AmbientLight("ambientLight") ambientLight.setColor(Vec4(.4, .4, .35, 1)) lAttrib = lAttrib.addLight(ambientLight) directionalLight = DirectionalLight("directionalLight") directionalLight.setDirection(Vec3(0, 8, -2.5)) directionalLight.setColor(Vec4(0.9, 0.8, 0.9, 1)) lAttrib = lAttrib.addLight(directionalLight) #set lighting on teapot so steam doesn't get affected self.t.attachNewNode(directionalLight) self.t.attachNewNode(ambientLight) self.t.node().setAttrib(lAttrib)
def addBrightness( self, light, amount ): color = light.node().getColor() h, s, b = colorsys.rgb_to_hsv( color[0], color[1], color[2] ) brightness = restrain(b + amount) r, g, b = colorsys.hsv_to_rgb( h, s, brightness ) light.node().setColor( Vec4( r, g, b, 1 ) ) self.updateStatusLabel()
def setupLights(self): #Create some lights and add them to the scene. By setting the lights on #render they affect the entire scene #Check out the lighting tutorial for more information on lights lAttrib = LightAttrib.makeAllOff() ambientLight = AmbientLight("ambientLight") ambientLight.setColor(Vec4(.4, .4, .35, 1)) lAttrib = lAttrib.addLight(ambientLight) directionalLight = DirectionalLight("directionalLight") directionalLight.setDirection(Vec3(0, 8, -2.5)) directionalLight.setColor(Vec4(0.9, 0.8, 0.9, 1)) lAttrib = lAttrib.addLight(directionalLight) render.attachNewNode(directionalLight) render.attachNewNode(ambientLight) render.node().setAttrib(lAttrib) #Explicitly set the environment to not be lit lAttrib = LightAttrib.makeAllOff() self.env.node().setAttrib(lAttrib)
def makeFilterBuffer(srcbuffer, name, sort, prog): blurBuffer = base.win.makeTextureBuffer(name, 512, 512) blurBuffer.setSort(sort) blurBuffer.setClearColor(Vec4(1, 0, 0, 1)) blurCamera = base.makeCamera2d(blurBuffer) blurScene = NodePath("new Scene") blurCamera.node().setScene(blurScene) shader = Shader.load(prog) card = srcbuffer.getTextureCard() card.reparentTo(blurScene) card.setShader(shader) return blurBuffer
def __init__(self): self.testTexture = loader.loadTexture("maps/envir_reeds.png") self.accept("1", self.toggleTex) self.accept("2", self.toggleLightsSide) self.accept("3", self.toggleLightsUp) self.LightsOn = False self.LightsOn1 = False slight = Spotlight('slight') slight.setColor(Vec4(1, 1, 1, 1)) lens = PerspectiveLens() slight.setLens(lens) self.slnp = render.attachNewNode(slight.upcastToLensNode()) self.slnp1 = render.attachNewNode(slight.upcastToLensNode())
def update(self, task=None): """Updates the shader inputs that need to be updated every frame. Normally, you shouldn't call this, it's being called in a task.""" if self.configuration.has_key("VolumetricLighting"): caster = self.configuration["VolumetricLighting"].caster casterpos = Point2() self.manager.camera.node().getLens().project( caster.getPos(self.manager.camera), casterpos) self.finalQuad.setShaderInput( "casterpos", Vec4(casterpos.getX() * 0.5 + 0.5, (casterpos.getY() * 0.5 + 0.5), 0, 0)) if task != None: return task.cont
def renderQuadInto(self, mul=1, div=1, align=1, depthtex=None, colortex=None, auxtex0=None, auxtex1=None): """ Creates an offscreen buffer for an intermediate computation. Installs a quad into the buffer. Returns the fullscreen quad. The size of the buffer is initially equal to the size of the main window. The parameters 'mul', 'div', and 'align' can be used to adjust that size. """ texgroup = (depthtex, colortex, auxtex0, auxtex1) winx, winy = self.getScaledSize(mul, div, align) depthbits = bool(depthtex != None) buffer = self.createBuffer("filter-stage", winx, winy, texgroup, depthbits) if (buffer == None): return None cm = CardMaker("filter-stage-quad") cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(0) quad.setDepthWrite(0) quad.setColor(Vec4(1, 0.5, 0.5, 1)) quadcamnode = Camera("filter-quad-cam") lens = OrthographicLens() lens.setFilmSize(2, 2) lens.setFilmOffset(0, 0) lens.setNearFar(-1000, 1000) quadcamnode.setLens(lens) quadcam = quad.attachNewNode(quadcamnode) buffer.getDisplayRegion(0).setCamera(quadcam) buffer.getDisplayRegion(0).setActive(1) self.buffers.append(buffer) self.sizes.append((mul, div, align)) return quad
def drawLeaf(nodePath,vdata,pos=Vec3(0,0,0),vecList=[Vec3(0,0,1), Vec3(1,0,0),Vec3(0,-1,0)], scale=0.125): #use the vectors that describe the direction the branch grows to make the right #rotation matrix newCs=Mat4(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0) newCs.setRow(0, vecList[2]) #right newCs.setRow(1, vecList[1]) #up newCs.setRow(2, vecList[0]) #forward newCs.setRow(3, Vec3(0,0,0)) newCs.setCol(3,Vec4(0,0,0,1)) axisAdj=Mat4.scaleMat(scale)*newCs*Mat4.translateMat(pos) #orginlly made the leaf out of geometry but that didnt look good #I also think there should be a better way to handle the leaf texture other than #hardcoding the filename leafModel=loader.loadModel("models/samples/fractal_plants/shrubbery") leafTexture=loader.loadTexture("models/samples/fractal_plants/material-10-cl.png") leafModel.reparentTo(nodePath) leafModel.setTexture(leafTexture,1) leafModel.setTransform(TransformState.makeMat(axisAdj))
#decrease dimensions when we branch length=Vec3(length.getX()/2, length.getY()/2, length.getZ()/1.1) for i in range(numCopies): makeFractalTree(bodydata, nodePath,length,newPos, numIterations-1, numCopies,randomAxis(vecList)) else: #just make another branch connected to this one with a small variation in direction makeFractalTree(bodydata, nodePath,length,newPos, numIterations-1,numCopies,smallRandomAxis(vecList)) else: drawBody(nodePath,bodydata, pos, vecList, length.getX(),False) drawLeaf(nodePath,bodydata, pos,vecList) alight = AmbientLight('alight') alight.setColor(Vec4(0.5, 0.5, 0.5, 1)) alnp = render.attachNewNode(alight) render.setLight(alnp) slight = Spotlight('slight') slight.setColor(Vec4(1, 1, 1, 1)) lens = PerspectiveLens() slight.setLens(lens) slnp = render.attachNewNode(slight.upcastToLensNode()) render.setLight(slnp) slnp.setPos(0, 0,40) #rotating light to show that normals are calculated correctly def updateLight(task): global slnp
def __init__(self): base.disableMouse() base.setBackgroundColor(0, 0, 0) camera.setPos(0, -50, 0) # Check video card capabilities. if (base.win.getGsg().getSupportsBasicShaders() == 0): addTitle( "Glow Filter: Video driver reports that shaders are not supported." ) return # Use class 'CommonFilters' to enable a bloom filter. # The brightness of a pixel is measured using a weighted average # of R,G,B,A. We put all the weight on Alpha, meaning that for # us, the framebuffer's alpha channel alpha controls bloom. self.filters = CommonFilters(base.win, base.cam) filterok = self.filters.setBloom(blend=(0, 0, 0, 1), desat=-0.5, intensity=3.0, size="small") if (filterok == False): addTitle( "Toon Shader: Video card not powerful enough to do image postprocessing" ) return self.glowSize = 1 # Post the instructions self.title = addTitle("Panda3D: Tutorial - Glow Filter") self.inst1 = addInstructions(0.95, "ESC: Quit") self.inst2 = addInstructions( 0.90, "Space: Toggle Glow Filter Small/Med/Large/Off") self.inst3 = addInstructions(0.85, "Enter: Toggle Running/Spinning") self.inst4 = addInstructions(0.80, "V: View the render-to-texture results") # load our model self.tron = Actor() self.tron.loadModel("samples/glow/tron") self.tron.loadAnims({"running": "samples/glow/models/tron_anim"}) self.tron.reparentTo(render) self.interval = self.tron.hprInterval(60, Point3(360, 0, 0)) self.interval.loop() self.isRunning = False # put some lighting on the model dlight = DirectionalLight('dlight') alight = AmbientLight('alight') dlnp = render.attachNewNode(dlight) alnp = render.attachNewNode(alight) dlight.setColor(Vec4(1.0, 0.7, 0.2, 1)) alight.setColor(Vec4(0.2, 0.2, 0.2, 1)) dlnp.setHpr(0, -60, 0) render.setLight(dlnp) render.setLight(alnp) # Panda contains a built-in viewer that lets you view the results of # your render-to-texture operations. This code configures the viewer. self.accept("v", base.bufferViewer.toggleEnable) self.accept("V", base.bufferViewer.toggleEnable) base.bufferViewer.setPosition("llcorner") base.bufferViewer.setLayout("hline") #base.camLens.setFov(100) # event handling self.accept("space", self.toggleGlow) self.accept("enter", self.toggleDisplay) self.accept("escape", sys.exit, [0])
def decreaseCutoff(self): self.cutoff = self.cutoff * 0.90000000 print self.cutoff self.drawnScene.setShaderInput( "cutoff", Vec4(self.cutoff, self.cutoff, self.cutoff, self.cutoff))
def __init__(self): base.disableMouse() camera.setPos(0, -50, 0) # Check video card capabilities. if (base.win.getGsg().getSupportsBasicShaders() == 0): addTitle( "Toon Shader: Video driver reports that shaders are not supported." ) return # Post the instructions. self.title = addTitle( "Panda3D: Tutorial - Toon Shading with Normals-Based Inking") self.inst1 = addInstructions(0.95, "ESC: Quit") self.inst2 = addInstructions( 0.90, "Up/Down: Increase/Decrease Line Thickness") self.inst3 = addInstructions( 0.85, "Left/Right: Decrease/Increase Line Darkness") self.inst4 = addInstructions(0.80, "V: View the render-to-texture results") # This shader's job is to render the model with discrete lighting # levels. The lighting calculations built into the shader assume # a single nonattenuating point light. tempnode = NodePath(PandaNode("temp node")) tempnode.setShader(Shader.load(os.path.join(PANDA_SHADER_PATH, \ "samples/cartoon/cartoon_lighting.sha"))) base.cam.node().setInitialState(tempnode.getState()) # This is the object that represents the single "light", as far # the shader is concerned. It's not a real Panda3D LightNode, but # the shader doesn't care about that. light = render.attachNewNode("light") light.setPos(30, -50, 0) # this call puts the light's nodepath into the render state. # this enables the shader to access this light by name. render.setShaderInput("light", light) # The "normals buffer" will contain a picture of the model colorized # so that the color of the model is a representation of the model's # normal at that point. normalsBuffer = base.win.makeTextureBuffer("normalsBuffer", 0, 0) normalsBuffer.setClearColor(Vec4(0.5, 0.5, 0.5, 1)) self.normalsBuffer = normalsBuffer normalsCamera = base.makeCamera(normalsBuffer, lens=base.cam.node().getLens()) normalsCamera.node().setScene(render) tempnode = NodePath(PandaNode("temp node")) tempnode.setShader(Shader.load(os.path.join(PANDA_SHADER_PATH, \ "samples/cartoon/cartoon_normal.sha"))) normalsCamera.node().setInitialState(tempnode.getState()) #what we actually do to put edges on screen is apply them as a texture to #a transparent screen-fitted card drawnScene = normalsBuffer.getTextureCard() drawnScene.setTransparency(1) drawnScene.setColor(1, 1, 1, 0) drawnScene.reparentTo(render2d) self.drawnScene = drawnScene # this shader accepts, as input, the picture from the normals buffer. # it compares each adjacent pixel, looking for discontinuities. # wherever a discontinuity exists, it emits black ink. self.separation = 0.001 self.cutoff = 0.3 inkGen=Shader.load(os.path.join(PANDA_SHADER_PATH, \ "samples/cartoon/cartoon_ink.sha")) drawnScene.setShader(inkGen) drawnScene.setShaderInput("separation", Vec4(self.separation, 0, self.separation, 0)) drawnScene.setShaderInput( "cutoff", Vec4(self.cutoff, self.cutoff, self.cutoff, self.cutoff)) # Panda contains a built-in viewer that lets you view the results of # your render-to-texture operations. This code configures the viewer. self.accept("v", base.bufferViewer.toggleEnable) self.accept("V", base.bufferViewer.toggleEnable) base.bufferViewer.setPosition("llcorner") # Load a dragon model and animate it. self.character = Actor() self.character.loadModel('models/samples/cartoon/nik_dragon') self.character.reparentTo(render) self.character.loadAnims({'win': 'models/samples/cartoon/nik_dragon'}) self.character.loop('win') self.character.hprInterval(15, Point3(360, 0, 0)).loop() # these allow you to change cartooning parameters in realtime self.accept("escape", sys.exit, [0]) self.accept("arrow_up", self.increaseSeparation) self.accept("arrow_down", self.decreaseSeparation) self.accept("arrow_left", self.increaseCutoff) self.accept("arrow_right", self.decreaseCutoff)
def decreaseSeparation(self): self.separation = self.separation * 0.90000000 print self.separation self.drawnScene.setShaderInput( "separation", Vec4(self.separation, 0, self.separation, 0))
def increaseCutoff(self): self.cutoff = self.cutoff * 1.11111111 print self.cutoff self.drawnScene.setShaderInput( "cutoff", Vec4(self.cutoff, self.cutoff, self.cutoff, self.cutoff))
def __init__(self): # Post the instructions. self.title = addTitle("Panda3D: Tutorial - Using Render-to-Texture") self.inst1 = addInstructions(0.95,"ESC: Quit") self.inst2 = addInstructions(0.90,"Up/Down: Zoom in/out on the Teapot") self.inst3 = addInstructions(0.85,"Left/Right: Move teapot left/right") self.inst4 = addInstructions(0.80,"V: View the render-to-texture results") #we get a handle to the default window mainWindow=base.win #we now get buffer thats going to hold the texture of our new scene altBuffer=mainWindow.makeTextureBuffer("hello", 256, 256) #now we have to setup a new scene graph to make this scene altRender=NodePath("new render") #this takes care of setting up ther camera properly self.altCam=base.makeCamera(altBuffer) self.altCam.reparentTo(altRender) self.altCam.setPos(0,-10,0) #get the teapot and rotates it for a simple animation self.teapot=loader.loadModel('models/teapot') self.teapot.reparentTo(altRender) self.teapot.setPos(0,0,-1) self.teapot.hprInterval(1.5,Point3(360,360,360)).loop() #put some lighting on the teapot dlight = DirectionalLight('dlight') alight = AmbientLight('alight') dlnp = altRender.attachNewNode(dlight) alnp = altRender.attachNewNode(alight) dlight.setColor(Vec4(0.8, 0.8, 0.5, 1)) alight.setColor(Vec4(0.2, 0.2, 0.2, 1)) dlnp.setHpr(0, -60, 0) altRender.setLight(dlnp) altRender.setLight(alnp) # Panda contains a built-in viewer that lets you view the results of # your render-to-texture operations. This code configures the viewer. self.accept("v", base.bufferViewer.toggleEnable) self.accept("V", base.bufferViewer.toggleEnable) base.bufferViewer.setPosition("llcorner") base.bufferViewer.setCardSize(1.0, 0.0) # Create the tv-men. Each TV-man will display the # offscreen-texture on his TV screen. self.tvMen = [] self.makeTvMan(-5,30, 1,altBuffer.getTexture(),0.9) self.makeTvMan( 5,30, 1,altBuffer.getTexture(),1.4) self.makeTvMan( 0,23,-3,altBuffer.getTexture(),2.0) self.makeTvMan(-5,20,-6,altBuffer.getTexture(),1.1) self.makeTvMan( 5,18,-5,altBuffer.getTexture(),1.7) self.accept("escape", sys.exit, [0]) self.accept("arrow_up", self.zoomIn) self.accept("arrow_down", self.zoomOut) self.accept("arrow_left", self.moveLeft) self.accept("arrow_right", self.moveRight)
def increaseSeparation(self): self.separation = self.separation * 1.11111111 print self.separation self.drawnScene.setShaderInput( "separation", Vec4(self.separation, 0, self.separation, 0))
def __init__(self): base.disableMouse() base.setBackgroundColor(0, 0, 0) camera.setPos(0, -50, 0) # Check video card capabilities. if (base.win.getGsg().getSupportsBasicShaders() == 0): addTitle( "Glow Filter: Video driver reports that shaders are not supported." ) return # Post the instructions self.title = addTitle("Panda3D: Tutorial - Glow Filter") self.inst1 = addInstructions(0.95, "ESC: Quit") self.inst2 = addInstructions(0.90, "Space: Toggle Glow Filter On/Off") self.inst3 = addInstructions(0.85, "Enter: Toggle Running/Spinning") self.inst4 = addInstructions(0.80, "V: View the render-to-texture results") #create the shader that will determime what parts of the scene will glow glowShader=Shader.load(os.path.join(PANDA_SHADER_PATH, \ "samples/glow/glow_shader.sha")) # load our model self.tron = Actor() self.tron.loadModel("models/samples/glow/tron") self.tron.loadAnims({"running": "models/samples/glow/tron_anim"}) self.tron.reparentTo(render) self.interval = self.tron.hprInterval(60, Point3(360, 0, 0)) self.interval.loop() self.isRunning = False #put some lighting on the tron model dlight = DirectionalLight('dlight') alight = AmbientLight('alight') dlnp = render.attachNewNode(dlight) alnp = render.attachNewNode(alight) dlight.setColor(Vec4(1.0, 0.7, 0.2, 1)) alight.setColor(Vec4(0.2, 0.2, 0.2, 1)) dlnp.setHpr(0, -60, 0) render.setLight(dlnp) render.setLight(alnp) # create the glow buffer. This buffer renders like a normal scene, # except that only the glowing materials should show up nonblack. glowBuffer = base.win.makeTextureBuffer("Glow scene", 512, 512) glowBuffer.setSort(-3) glowBuffer.setClearColor(Vec4(0, 0, 0, 1)) # We have to attach a camera to the glow buffer. The glow camera # must have the same frustum as the main camera. As long as the aspect # ratios match, the rest will take care of itself. glowCamera = base.makeCamera(glowBuffer, lens=base.cam.node().getLens()) # Tell the glow camera to use the glow shader tempnode = NodePath(PandaNode("temp node")) tempnode.setShader(glowShader) glowCamera.node().setInitialState(tempnode.getState()) # set up the pipeline: from glow scene to blur x to blur y to main window. blurXBuffer=makeFilterBuffer(glowBuffer, "Blur X", -2, \ os.path.join(PANDA_SHADER_PATH, "samples/glow/glow_xblur.sha")) blurYBuffer=makeFilterBuffer(blurXBuffer, "Blur Y", -1, \ os.path.join(PANDA_SHADER_PATH, "samples/glow/glow_yblur.sha")) self.finalcard = blurYBuffer.getTextureCard() self.finalcard.reparentTo(render2d) self.finalcard.setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.MAdd)) # Panda contains a built-in viewer that lets you view the results of # your render-to-texture operations. This code configures the viewer. self.accept("v", base.bufferViewer.toggleEnable) self.accept("V", base.bufferViewer.toggleEnable) base.bufferViewer.setPosition("llcorner") base.bufferViewer.setLayout("hline") base.bufferViewer.setCardSize(0.652, 0) # event handling self.accept("space", self.toggleGlow) self.accept("enter", self.toggleDisplay) self.accept("escape", sys.exit, [0]) self.glowOn = True
def reconfigure(self, fullrebuild, changed): """ Reconfigure is called whenever any configuration change is made. """ configuration = self.configuration if (fullrebuild): self.cleanup() if (len(configuration) == 0): return auxbits = 0 needtex = {} needtex["color"] = True if (configuration.has_key("CartoonInk")): needtex["aux"] = True auxbits |= AuxBitplaneAttrib.ABOAuxNormal if (configuration.has_key("Bloom")): needtex["bloom0"] = True needtex["bloom1"] = True needtex["bloom2"] = True needtex["bloom3"] = True auxbits |= AuxBitplaneAttrib.ABOGlow if (configuration.has_key("ViewGlow")): auxbits |= AuxBitplaneAttrib.ABOGlow for tex in needtex: self.textures[tex] = Texture("scene-" + tex) self.textures[tex].setWrapU(Texture.WMClamp) self.textures[tex].setWrapV(Texture.WMClamp) needtexpix = True self.finalQuad = self.manager.renderSceneInto( textures=self.textures, auxbits=auxbits) if (self.finalQuad == None): self.cleanup() return False if (configuration.has_key("Bloom")): bloomconf = configuration["Bloom"] bloom0 = self.textures["bloom0"] bloom1 = self.textures["bloom1"] bloom2 = self.textures["bloom2"] bloom3 = self.textures["bloom3"] if (bloomconf.size == "large"): scale = 8 downsampler = "filter_down4.sha" elif (bloomconf.size == "medium"): scale = 4 downsampler = "filter_copy.sha" else: scale = 2 downsampler = "filter_copy.sha" self.bloom.append( self.manager.renderQuadInto(colortex=bloom0, div=2, align=scale)) self.bloom.append( self.manager.renderQuadInto(colortex=bloom1, div=scale, align=scale)) self.bloom.append( self.manager.renderQuadInto(colortex=bloom2, div=scale, align=scale)) self.bloom.append( self.manager.renderQuadInto(colortex=bloom3, div=scale, align=scale)) self.bloom[0].setShaderInput("src", self.textures["color"]) self.bloom[0].setShader(self.loadShader("filter_bloomi.sha")) self.bloom[1].setShaderInput("src", bloom0) self.bloom[1].setShader(self.loadShader(downsampler)) self.bloom[2].setShaderInput("src", bloom1) self.bloom[2].setShader(self.loadShader("filter_bloomx.sha")) self.bloom[3].setShaderInput("src", bloom2) self.bloom[3].setShader(self.loadShader("filter_bloomy.sha")) text = "//Cg\n" text += "void vshader(float4 vtx_position : POSITION,\n" text += " out float4 l_position : POSITION,\n" text += " uniform float4 texpad_txcolor,\n" text += " uniform float4 texpix_txcolor,\n" text += " out float4 l_texcoordC : TEXCOORD0,\n" if (configuration.has_key("CartoonInk")): text += " uniform float4 texpad_txaux,\n" text += " uniform float4 texpix_txaux,\n" text += " out float4 l_texcoordN : TEXCOORD1,\n" if (configuration.has_key("Bloom")): text += " uniform float4 texpad_txbloom3,\n" text += " out float4 l_texcoordB : TEXCOORD2,\n" text += " uniform float4x4 mat_modelproj)\n" text += "{\n" text += " l_position=mul(mat_modelproj, vtx_position);\n" text += " l_texcoordC=(vtx_position.xzxz * texpad_txcolor) + texpad_txcolor;\n" if (configuration.has_key("CartoonInk")): text += " l_texcoordN=(vtx_position.xzxz * texpad_txaux) + texpad_txaux;\n" if (configuration.has_key("Bloom")): text += " l_texcoordB=(vtx_position.xzxz * texpad_txbloom3) + texpad_txbloom3;\n" if (configuration.has_key("HalfPixelShift")): text += " l_texcoordC+=texpix_txcolor*0.5;\n" if (configuration.has_key("CartoonInk")): text += " l_texcoordN+=texpix_txaux*0.5;\n" text += "}\n" text += "void fshader(\n" text += "float4 l_texcoordC : TEXCOORD0,\n" text += "uniform float4 texpix_txcolor,\n" if (configuration.has_key("CartoonInk")): text += "float4 l_texcoordN : TEXCOORD1,\n" text += "uniform float4 texpix_txaux,\n" if (configuration.has_key("Bloom")): text += "float4 l_texcoordB : TEXCOORD2,\n" for key in self.textures: text += "uniform sampler2D k_tx" + key + ",\n" if (configuration.has_key("CartoonInk")): text += "uniform float4 k_cartoonseparation,\n" if (configuration.has_key("VolumetricLighting")): text += "uniform float4 k_casterpos,\n" text += "uniform float4 k_vlparams,\n" text += "out float4 o_color : COLOR)\n" text += "{\n" text += " o_color = tex2D(k_txcolor, l_texcoordC.xy);\n" if (configuration.has_key("CartoonInk")): text += CARTOON_BODY if (configuration.has_key("Bloom")): text += "o_color = saturate(o_color);\n" text += "float4 bloom = 0.5*tex2D(k_txbloom3, l_texcoordB.xy);\n" text += "o_color = 1-((1-bloom)*(1-o_color));\n" if (configuration.has_key("ViewGlow")): text += "o_color.r = o_color.a;\n" if (configuration.has_key("VolumetricLighting")): text += "float decay = 1.0f;\n" text += "float2 curcoord = l_texcoordC.xy;\n" text += "float2 lightdir = curcoord - k_casterpos.xy;\n" text += "lightdir *= k_vlparams.y;\n" text += "half4 sample = tex2D(k_txcolor, curcoord);\n" text += "float3 vlcolor = sample.rgb * sample.a;\n" text += "for (int i = 0; i < k_vlparams.x; i++) {\n" text += " curcoord -= lightdir;\n" text += " sample = tex2D(k_txcolor, curcoord);\n" text += " sample *= sample.a * decay;//*weight\n" text += " vlcolor += sample.rgb;\n" text += " decay *= k_vlparams.z;\n" text += "}\n" text += "o_color += float4(vlcolor * k_vlparams.w, 1);\n" if (configuration.has_key("Inverted")): text += "o_color = float4(1, 1, 1, 1) - o_color;\n" text += "}\n" self.finalQuad.setShader(Shader.make(text)) for tex in self.textures: self.finalQuad.setShaderInput("tx" + tex, self.textures[tex]) if (changed == "CartoonInk") or fullrebuild: if (configuration.has_key("CartoonInk")): separation = configuration["CartoonInk"] self.finalQuad.setShaderInput( "cartoonseparation", Vec4(separation, 0, separation, 0)) if (changed == "Bloom") or fullrebuild: if (configuration.has_key("Bloom")): bloomconf = configuration["Bloom"] intensity = bloomconf.intensity * 3.0 self.bloom[0].setShaderInput("blend", bloomconf.blendx, bloomconf.blendy, bloomconf.blendz, bloomconf.blendw * 2.0) self.bloom[0].setShaderInput( "trigger", bloomconf.mintrigger, 1.0 / (bloomconf.maxtrigger - bloomconf.mintrigger), 0.0, 0.0) self.bloom[0].setShaderInput("desat", bloomconf.desat) self.bloom[3].setShaderInput("intensity", intensity, intensity, intensity, intensity) if (changed == "VolumetricLighting") or fullrebuild: if (configuration.has_key("VolumetricLighting")): config = configuration["VolumetricLighting"] tcparam = config.density / float(config.numsamples) self.finalQuad.setShaderInput("vlparams", config.numsamples, tcparam, config.decay, config.exposure) self.update() return True
def __init__(self): # Preliminary capabilities check. if (base.win.getGsg().getSupportsBasicShaders() == 0): self.t = addTitle( "Firefly Demo: Video driver reports that shaders are not supported." ) return if (base.win.getGsg().getSupportsDepthTexture() == 0): self.t = addTitle( "Firefly Demo: Video driver reports that depth textures are not supported." ) return # This algorithm uses two offscreen buffers, one of which has # an auxiliary bitplane, and the offscreen buffers share a single # depth buffer. This is a heck of a complicated buffer setup. self.modelbuffer = self.makeFBO("model buffer", 1) self.lightbuffer = self.makeFBO("light buffer", 0) # Creation of a high-powered buffer can fail, if the graphics card # doesn't support the necessary OpenGL extensions. if (self.modelbuffer == None) or (self.lightbuffer == None): self.t = addTitle( "Toon Shader: Video driver does not support multiple render targets" ) return # Create four render textures: depth, normal, albedo, and final. # attach them to the various bitplanes of the offscreen buffers. self.texDepth = Texture() self.texDepth.setFormat(Texture.FDepthStencil) self.texAlbedo = Texture() self.texNormal = Texture() self.texFinal = Texture() self.modelbuffer.addRenderTexture(self.texDepth, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPDepthStencil) self.modelbuffer.addRenderTexture(self.texAlbedo, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPColor) self.modelbuffer.addRenderTexture(self.texNormal, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPAuxRgba0) self.lightbuffer.addRenderTexture(self.texFinal, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPColor) # Set the near and far clipping planes. base.cam.node().getLens().setNear(50.0) base.cam.node().getLens().setFar(500.0) lens = base.cam.node().getLens() # This algorithm uses three cameras: one to render the models into the # model buffer, one to render the lights into the light buffer, and # one to render "plain" stuff (non-deferred shaded) stuff into the light # buffer. Each camera has a bitmask to identify it. self.modelMask = 1 self.lightMask = 2 self.plainMask = 4 self.modelcam = base.makeCamera(self.modelbuffer, lens=lens, scene=render, mask=self.modelMask) self.lightcam = base.makeCamera(self.lightbuffer, lens=lens, scene=render, mask=self.lightMask) self.plaincam = base.makeCamera(self.lightbuffer, lens=lens, scene=render, mask=self.plainMask) # Panda's main camera is not used. base.cam.node().setActive(0) # Take explicit control over the order in which the three # buffers are rendered. self.modelbuffer.setSort(1) self.lightbuffer.setSort(2) base.win.setSort(3) # Within the light buffer, control the order of the two cams. self.lightcam.node().getDisplayRegion(0).setSort(1) self.plaincam.node().getDisplayRegion(0).setSort(2) # By default, panda usually clears the screen before every # camera and before every window. Tell it not to do that. # Then, tell it specifically when to clear and what to clear. self.modelcam.node().getDisplayRegion(0).disableClears() self.lightcam.node().getDisplayRegion(0).disableClears() self.plaincam.node().getDisplayRegion(0).disableClears() base.cam.node().getDisplayRegion(0).disableClears() base.cam2d.node().getDisplayRegion(0).disableClears() self.modelbuffer.disableClears() base.win.disableClears() self.modelbuffer.setClearColorActive(1) self.modelbuffer.setClearDepthActive(1) self.lightbuffer.setClearColorActive(1) self.lightbuffer.setClearColor(Vec4(0, 0, 0, 1)) # Miscellaneous stuff. base.disableMouse() base.camera.setPos(-9.112, -211.077, 46.951) base.camera.setHpr(0, -7.5, 2.4) base.setBackgroundColor(Vec4(0, 0, 0, 0)) random.seed() # Calculate the projection parameters for the final shader. # The math here is too complex to explain in an inline comment, # I've put in a full explanation into the HTML intro. proj = base.cam.node().getLens().getProjectionMat() proj_x = 0.5 * proj.getCell(3, 2) / proj.getCell(0, 0) proj_y = 0.5 * proj.getCell(3, 2) proj_z = 0.5 * proj.getCell(3, 2) / proj.getCell(2, 1) proj_w = -0.5 - 0.5 * proj.getCell(1, 2) # Configure the render state of the model camera. tempnode = NodePath(PandaNode("temp node")) tempnode.setAttrib( AlphaTestAttrib.make(RenderAttrib.MGreaterEqual, 0.5)) tempnode.setShader(Shader.load(os.path.join(PANDA_SHADER_PATH, \ "samples/fireflies/fireflies_model.sha"))) tempnode.setAttrib(DepthTestAttrib.make(RenderAttrib.MLessEqual)) self.modelcam.node().setInitialState(tempnode.getState()) # Configure the render state of the light camera. tempnode = NodePath(PandaNode("temp node")) tempnode.setShader(Shader.load(os.path.join(PANDA_SHADER_PATH, \ "samples/fireflies/fireflies_lighting.sha"))) tempnode.setShaderInput("texnormal", self.texNormal) tempnode.setShaderInput("texalbedo", self.texAlbedo) tempnode.setShaderInput("texdepth", self.texDepth) tempnode.setShaderInput("proj", Vec4(proj_x, proj_y, proj_z, proj_w)) tempnode.setAttrib( ColorBlendAttrib.make(ColorBlendAttrib.MAdd, ColorBlendAttrib.OOne, ColorBlendAttrib.OOne)) tempnode.setAttrib( CullFaceAttrib.make(CullFaceAttrib.MCullCounterClockwise)) # The next line causes problems on Linux. #tempnode.setAttrib(DepthTestAttrib.make(RenderAttrib.MGreaterEqual)) tempnode.setAttrib(DepthWriteAttrib.make(DepthWriteAttrib.MOff)) self.lightcam.node().setInitialState(tempnode.getState()) # Configure the render state of the plain camera. rs = RenderState.makeEmpty() self.plaincam.node().setInitialState(rs) # Clear any render attribs on the root node. This is necessary # because by default, panda assigns some attribs to the root # node. These default attribs will override the # carefully-configured render attribs that we just attached # to the cameras. The simplest solution is to just clear # them all out. render.setState(RenderState.makeEmpty()) # My artist created a model in which some of the polygons # don't have textures. This confuses the shader I wrote. # This little hack guarantees that everything has a texture. white = loader.loadTexture("models/samples/fireflies/white.jpg") render.setTexture(white, 0) # Create two subroots, to help speed cull traversal. self.lightroot = NodePath(PandaNode("lightroot")) self.lightroot.reparentTo(render) self.modelroot = NodePath(PandaNode("modelroot")) self.modelroot.reparentTo(render) self.lightroot.hide(BitMask32(self.modelMask)) self.modelroot.hide(BitMask32(self.lightMask)) self.modelroot.hide(BitMask32(self.plainMask)) # Load the model of a forest. Make it visible to the model camera. self.forest = NodePath(PandaNode("Forest Root")) self.forest.reparentTo(render) loader.loadModel( \ "models/samples/fireflies/background").reparentTo(self.forest) loader.loadModel( \ "models/samples/fireflies/foliage01").reparentTo(self.forest) loader.loadModel( \ "models/samples/fireflies/foliage02").reparentTo(self.forest) loader.loadModel( \ "models/samples/fireflies/foliage03").reparentTo(self.forest) loader.loadModel( \ "models/samples/fireflies/foliage04").reparentTo(self.forest) loader.loadModel( \ "models/samples/fireflies/foliage05").reparentTo(self.forest) loader.loadModel( \ "models/samples/fireflies/foliage06").reparentTo(self.forest) loader.loadModel( \ "models/samples/fireflies/foliage07").reparentTo(self.forest) loader.loadModel( \ "models/samples/fireflies/foliage08").reparentTo(self.forest) loader.loadModel( \ "models/samples/fireflies/foliage09").reparentTo(self.forest) self.forest.hide(BitMask32(self.lightMask | self.plainMask)) # Cause the final results to be rendered into the main window on a card. cm = CardMaker("card") cm.setFrameFullscreenQuad() self.card = render2d.attachNewNode(cm.generate()) self.card.setTexture(self.texFinal) # Post the instructions. self.title = addTitle( "Panda3D: Tutorial - Fireflies using Deferred Shading") self.inst1 = addInstructions(0.95, "ESC: Quit") self.inst2 = addInstructions( 0.90, "Up/Down: More / Fewer Fireflies (Count: unknown)") self.inst3 = addInstructions( 0.85, "Right/Left: Bigger / Smaller Fireflies (Radius: unknown)") self.inst4 = addInstructions(0.80, "V: View the render-to-texture results") # Panda contains a built-in viewer that lets you view the results of # your render-to-texture operations. This code configures the viewer. base.bufferViewer.setPosition("llcorner") base.bufferViewer.setCardSize(0, 0.40) base.bufferViewer.setLayout("vline") self.toggleCards() self.toggleCards() # Firefly parameters self.fireflies = [] self.sequences = [] self.scaleseqs = [] self.glowspheres = [] self.fireflysize = 1.0 self.spheremodel = loader.loadModel("models/misc/sphere.flt") self.setFireflySize(25.0) while (len(self.fireflies) < 5): self.addFirefly() self.updateReadout() # these allow you to change parameters in realtime self.accept("escape", sys.exit, [0]) self.accept("arrow_up", self.incFireflyCount, [1.1111111]) self.accept("arrow_down", self.decFireflyCount, [0.9000000]) self.accept("arrow_right", self.setFireflySize, [1.1111111]) self.accept("arrow_left", self.setFireflySize, [0.9000000]) self.accept("v", self.toggleCards) self.accept("V", self.toggleCards) self.nextadd = 0 taskMgr.add(self.spawnTask, "spawner")
def __init__(self): base.disableMouse() base.setBackgroundColor(0, 0, 0) taskMgr.add(self.updateScene, "updateScene") # Show the instructions self.title = addTitle("Panda3D: Tutorial - Distortion Effect") self.inst1 = addInstructions(0.95, "ESC: Quit") self.inst2 = addInstructions(0.90, "Space: Toggle distortion filter On/Off") self.inst4 = addInstructions(0.85, "V: View the render-to-texture results") # Load background self.seascape = loader.loadModel("models/samples/distortion/plane") self.seascape.reparentTo(render) self.seascape.setPosHpr(0, 145, 0, 0, 0, 0) self.seascape.setScale(100) self.seascape.setTexture(loader.loadTexture( \ "models/samples/distortion/ocean.jpg")) # Create the distortion buffer. This buffer renders like a normal scene, self.distortionBuffer = self.makeFBO("model buffer") self.distortionBuffer.setSort(-3) self.distortionBuffer.setClearColor(Vec4(0, 0, 0, 0)) # We have to attach a camera to the distortion buffer. The distortion # camera must have the same frustum as the main camera. As long # as the aspect ratios match, the rest will take care of itself. distortionCamera = base.makeCamera(self.distortionBuffer, scene = \ render, lens = base.cam.node().getLens(), mask = BitMask32.bit(4)) # load the object with the distortion self.distortionObject = loader.loadModel( \ "models/samples/distortion/boat") self.distortionObject.setScale(1) self.distortionObject.setPos(0, 20, -3) self.distortionObject.hprInterval(10, Point3(360, 0, 0)).loop() self.distortionObject.reparentTo(render) # Create the shader that will determime what parts of the scene will # distortion distortionShader = loader.loadShader(os.path.join(PANDA_SHADER_PATH, \ "samples/distortion/distortion.sha")) self.distortionObject.setShader(distortionShader) self.distortionObject.hide(BitMask32.bit(4)) # Textures tex1 = loader.loadTexture("models/samples/distortion/water.png") self.distortionObject.setShaderInput("waves", tex1) self.texDistortion = Texture() self.distortionBuffer.addRenderTexture(self.texDistortion, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPColor) self.distortionObject.setShaderInput("screen", self.texDistortion) # Panda contains a built-in viewer that lets you view the results of # your render-to-texture operations. This code configures the viewer. self.accept("v", base.bufferViewer.toggleEnable) self.accept("V", base.bufferViewer.toggleEnable) base.bufferViewer.setPosition("llcorner") base.bufferViewer.setLayout("hline") base.bufferViewer.setCardSize(0.652, 0) # event handling self.accept("space", self.toggleDistortion) self.accept("escape", exit, [0]) self.distortionOn = True
def __init__( self ): #The main initialization of our class #This creates the on screen title that is in every tutorial self.title = OnscreenText(text="Panda3D: Tutorial - Lighting", style=1, fg=(1,1,0,1), font = font, pos=(0.87,-0.95), scale = .07) #Creates labels used for onscreen instructions self.ambientText = self.makeStatusLabel(0) self.directionalText = self.makeStatusLabel(1) self.spotlightText = self.makeStatusLabel(2) self.pointLightText = self.makeStatusLabel(3) self.spinningText = self.makeStatusLabel(4) self.ambientBrightnessText = self.makeStatusLabel(5) self.directionalBrightnessText = self.makeStatusLabel(6) self.spotlightBrightnessText = self.makeStatusLabel(7) self.spotlightExponentText = self.makeStatusLabel(8) self.lightingPerPixelText = self.makeStatusLabel(9) self.disco = loader.loadModel("models/samples/disco_lights/disco_hall") self.disco.reparentTo(render) self.disco.setPosHpr(0, 50, -4, 90, 0, 0) # First we create an ambient light. All objects are affected by ambient # light equally #Create and name the ambient light self.ambientLight = render.attachNewNode( AmbientLight( "ambientLight" ) ) #Set the color of the ambient light self.ambientLight.node().setColor( Vec4( .1, .1, .1, 1 ) ) #add the newly created light to the lightAttrib # Now we create a directional light. Directional lights add shading from a # given angle. This is good for far away sources like the sun self.directionalLight = render.attachNewNode( DirectionalLight( "directionalLight" ) ) self.directionalLight.node().setColor( Vec4( .35, .35, .35, 1 ) ) # The direction of a directional light is set as a 3D vector self.directionalLight.node().setDirection( Vec3( 1, 1, -2 ) ) # Now we create a spotlight. Spotlights light objects in a given cone # They are good for simulating things like flashlights self.spotlight = camera.attachNewNode( Spotlight( "spotlight" ) ) self.spotlight.node().setColor( Vec4( .45, .45, .45, 1 ) ) #The cone of a spotlight is controlled by it's lens. This creates the lens self.spotlight.node().setLens( PerspectiveLens() ) #This sets the Field of View (fov) of the lens, in degrees for width and #height. The lower the numbers, the tighter the spotlight. self.spotlight.node().getLens().setFov( 16, 16 ) # Attenuation controls how the light fades with distance. The numbers are # The three values represent the three constants (constant, linear, and # quadratic) in the internal lighting equation. The higher the numbers the # shorter the light goes. self.spotlight.node().setAttenuation( Vec3( 1, 0.0, 0.0 ) ) # This exponent value sets how soft the edge of the spotlight is. 0 means a # hard edge. 128 means a very soft edge. self.spotlight.node().setExponent( 60.0 ) # Now we create three colored Point lights. Point lights are lights that # radiate from a single point, like a light bulb. Like spotlights, they # are given position by attaching them to NodePaths in the world self.redHelper = loader.loadModel('models/samples/disco_lights/sphere') self.redHelper.setColor( Vec4( 1, 0, 0, 1 ) ) self.redHelper.setPos( -6.5, -3.75, 0 ) self.redHelper.setScale(.25) self.redPointLight = self.redHelper.attachNewNode( PointLight( "redPointLight" ) ) self.redPointLight.node().setColor( Vec4( .35, 0, 0, 1 ) ) self.redPointLight.node().setAttenuation( Vec3( .1, 0.04, 0.0 ) ) #The green point light and helper self.greenHelper = loader.loadModel('models/samples/disco_lights/sphere') self.greenHelper.setColor( Vec4( 0, 1, 0, 1 ) ) self.greenHelper.setPos( 0, 7.5, 0 ) self.greenHelper.setScale(.25) self.greenPointLight = self.greenHelper.attachNewNode( PointLight( "greenPointLight" ) ) self.greenPointLight.node().setAttenuation( Vec3( .1, .04, .0 ) ) self.greenPointLight.node().setColor( Vec4( 0, .35, 0, 1 ) ) #The blue point light and helper self.blueHelper = loader.loadModel('models/samples/disco_lights/sphere') self.blueHelper.setColor( Vec4( 0, 0, 1, 1 ) ) self.blueHelper.setPos( 6.5, -3.75, 0 ) self.blueHelper.setScale(.25) self.bluePointLight = self.blueHelper.attachNewNode( PointLight( "bluePointLight" ) ) self.bluePointLight.node().setAttenuation( Vec3( .1, 0.04, 0.0 ) ) self.bluePointLight.node().setColor( Vec4( 0, 0, .35, 1 ) ) self.bluePointLight.node().setSpecularColor( Vec4( 1 ) ) #Create a dummy node so the lights can be spun with one command self.pointLightHelper = render.attachNewNode( "pointLightHelper" ) self.pointLightHelper.setPos(0, 50, 11) self.redHelper.reparentTo( self.pointLightHelper ) self.greenHelper.reparentTo( self.pointLightHelper ) self.blueHelper.reparentTo( self.pointLightHelper ) #Finally we store the lights on the root of the scene graph. #This will cause them to affect everything in the scene. render.setLight( self.ambientLight ) render.setLight( self.directionalLight ) render.setLight( self.spotlight ) render.setLight( self.redPointLight ) render.setLight( self.greenPointLight ) render.setLight( self.bluePointLight ) # Create and start interval to spin the lights, and a variable to # manage them. self.pointLightsSpin = self.pointLightHelper.hprInterval(6, Vec3(360, 0, 0)) self.pointLightsSpin.loop() self.arePointLightsSpinning = True # Per-pixel lighting is initially off self.perPixelEnabled = False # listen to keys for controlling the lights self.accept( "escape", sys.exit) self.accept( "a", self.toggleLights, [[self.ambientLight]] ) self.accept( "d", self.toggleLights, [[self.directionalLight]] ) self.accept( "s", self.toggleLights, [[self.spotlight]] ) self.accept( "p", self.toggleLights, [[self.redPointLight, self.greenPointLight, self.bluePointLight]] ) self.accept( "r", self.toggleSpinningPointLights ) self.accept( "l", self.togglePerPixelLighting ) self.accept( "z", self.addBrightness, [self.ambientLight, -.05] ) self.accept( "x", self.addBrightness, [self.ambientLight, .05] ) self.accept( "c", self.addBrightness, [self.directionalLight, -.05] ) self.accept( "v", self.addBrightness, [self.directionalLight, .05] ) self.accept( "b", self.addBrightness, [self.spotlight, -.05] ) self.accept( "n", self.addBrightness, [self.spotlight, .05] ) self.accept( "q", self.adjustSpotlightExponent, [self.spotlight, -1] ) self.accept( "w", self.adjustSpotlightExponent, [self.spotlight, 1] ) #Finally call the function that builds the instruction texts self.updateStatusLabel()
def renderSceneInto(self, depthtex=None, colortex=None, auxtex=None, auxbits=0, textures=None): """ Causes the scene to be rendered into the supplied textures instead of into the original window. Puts a fullscreen quad into the original window to show the render-to-texture results. Returns the quad. Normally, the caller would then apply a shader to the quad. To elaborate on how this all works: * An offscreen buffer is created. It is set up to mimic the original display region - it is the same size, uses the same clear colors, and contains a DisplayRegion that uses the original camera. * A fullscreen quad and an orthographic camera to render that quad are both created. The original camera is removed from the original window, and in its place, the orthographic quad-camera is installed. * The fullscreen quad is textured with the data from the offscreen buffer. A shader is applied that tints the results pink. * Automatic shader generation NOT enabled. If you have a filter that depends on a render target from the auto-shader, you either need to set an auto-shader attrib on the main camera or scene, or, you need to provide these outputs in your own shader. * All clears are disabled on the original display region. If the display region fills the whole window, then clears are disabled on the original window as well. It is assumed that rendering the full-screen quad eliminates the need to do clears. Hence, the original window which used to contain the actual scene, now contains a pink-tinted quad with a texture of the scene. It is assumed that the user will replace the shader on the quad with a more interesting filter. """ if (textures): colortex = textures.get("color", None) depthtex = textures.get("depth", None) auxtex = textures.get("aux", None) if (colortex == None): colortex = Texture("filter-base-color") colortex.setWrapU(Texture.WMClamp) colortex.setWrapV(Texture.WMClamp) texgroup = (depthtex, colortex, auxtex, None) # Choose the size of the offscreen buffer. (winx, winy) = self.getScaledSize(1, 1, 1) buffer = self.createBuffer("filter-base", winx, winy, texgroup) if (buffer == None): return None cm = CardMaker("filter-base-quad") cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(0) quad.setDepthWrite(0) quad.setTexture(colortex) quad.setColor(Vec4(1, 0.5, 0.5, 1)) cs = NodePath("dummy") cs.setState(self.camstate) # Do we really need to turn on the Shader Generator? #cs.setShaderAuto() if (auxbits): cs.setAttrib(AuxBitplaneAttrib.make(auxbits)) self.camera.node().setInitialState(cs.getState()) quadcamnode = Camera("filter-quad-cam") lens = OrthographicLens() lens.setFilmSize(2, 2) lens.setFilmOffset(0, 0) lens.setNearFar(-1000, 1000) quadcamnode.setLens(lens) quadcam = quad.attachNewNode(quadcamnode) self.region.setCamera(quadcam) dr = buffer.getDisplayRegion(0) self.setStackedClears(dr, self.rclears, self.wclears) if (auxtex): dr.setClearActive(GraphicsOutput.RTPAuxRgba0, 1) dr.setClearValue(GraphicsOutput.RTPAuxRgba0, Vec4(0.5, 0.5, 1.0, 0.0)) self.region.disableClears() if (self.isFullscreen()): self.win.disableClears() dr.setCamera(self.camera) dr.setActive(1) self.buffers.append(buffer) self.sizes.append((1, 1, 1)) return quad
def __init__(self): #This code puts the standard title and instruction text on screen self.title = OnscreenText( text="Panda3D: Tutorial - Collision Detection", style=1, fg=(1, 1, 1, 1), pos=(0.7, -0.95), scale=.07, font=font) self.instructions = OnscreenText(text="Mouse pointer tilts the board", pos=(-1.3, .95), fg=(1, 1, 1, 1), font=font, align=TextNode.ALeft, scale=.05) self.accept("escape", sys.exit) #Escape quits base.disableMouse() #Disable mouse-based camera control camera.setPosHpr(0, 0, 25, 0, -90, 0) #Place the camera #Load the maze and place it in the scene self.maze = loader.loadModel("models/samples/ball_in_maze/maze") self.maze.reparentTo(render) #Most times, you want collisions to be tested against invisible geometry #rather than every polygon. This is because testing against every polygon #in the scene is usually too slow. You can have simplified or approximate #geometry for the solids and still get good results. # #Sometimes you'll want to create and position your own collision solids in #code, but it's often easier to have them built automatically. This can be #done by adding special tags into an egg file. Check maze.egg and ball.egg #and look for lines starting with <Collide>. The part is brackets tells #Panda exactly what to do. Polyset means to use the polygons in that group #as solids, while Sphere tells panda to make a collision sphere around them #Keep means to keep the polygons in the group as visable geometry (good #for the ball, not for the triggers), and descend means to make sure that #the settings are applied to any subgroups. # #Once we have the collision tags in the models, we can get to them using #NodePath's find command #Find the collision node named wall_collide self.walls = self.maze.find("**/wall_collide") #Collision objects are sorted using BitMasks. BitMasks are ordinary numbers #with extra methods for working with them as binary bits. Every collision #solid has both a from mask and an into mask. Before Panda tests two #objects, it checks to make sure that the from and into collision masks #have at least one bit in common. That way things that shouldn't interact #won't. Normal model nodes have collision masks as well. By default they #are set to bit 20. If you want to collide against actual visable polygons, #set a from collide mask to include bit 20 # #For this example, we will make everything we want the ball to collide with #include bit 0 self.walls.node().setIntoCollideMask(BitMask32.bit(0)) #CollisionNodes are usually invisible but can be shown. Uncomment the next #line to see the collision walls #self.walls.show() #We will now find the triggers for the holes and set their masks to 0 as #well. We also set their names to make them easier to identify during #collisions self.loseTriggers = [] for i in range(6): trigger = self.maze.find("**/hole_collide" + str(i)) trigger.node().setIntoCollideMask(BitMask32.bit(0)) trigger.node().setName("loseTrigger") self.loseTriggers.append(trigger) #Uncomment this line to see the triggers #trigger.show() #Ground_collide is a single polygon on the same plane as the ground in the #maze. We will use a ray to collide with it so that we will know exactly #what height to put the ball at every frame. Since this is not something #that we want the ball itself to collide with, it has a different #bitmask. self.mazeGround = self.maze.find("**/ground_collide") self.mazeGround.node().setIntoCollideMask(BitMask32.bit(1)) #Load the ball and attach it to the scene #It is on a root dummy node so that we can rotate the ball itself without #rotating the ray that will be attached to it self.ballRoot = render.attachNewNode("ballRoot") self.ball = loader.loadModel("models/samples/ball_in_maze/ball") self.ball.reparentTo(self.ballRoot) #Find the collison sphere for the ball which was created in the egg file #Notice that it has a from collision mask of bit 0, and an into collison #mask of no bits. This means that the ball can only cause collisions, not #be collided into self.ballSphere = self.ball.find("**/ball") self.ballSphere.node().setFromCollideMask(BitMask32.bit(0)) self.ballSphere.node().setIntoCollideMask(BitMask32.allOff()) #No we create a ray to start above the ball and cast down. This is to #Determine the height the ball should be at and the angle the floor is #tilting. We could have used the sphere around the ball itself, but it #would not be as reliable self.ballGroundRay = CollisionRay() #Create the ray self.ballGroundRay.setOrigin(0, 0, 10) #Set its origin self.ballGroundRay.setDirection(0, 0, -1) #And its direction #Collision solids go in CollisionNode self.ballGroundCol = CollisionNode( 'groundRay') #Create and name the node self.ballGroundCol.addSolid(self.ballGroundRay) #Add the ray self.ballGroundCol.setFromCollideMask( BitMask32.bit(1)) #Set its bitmasks self.ballGroundCol.setIntoCollideMask(BitMask32.allOff()) #Attach the node to the ballRoot so that the ray is relative to the ball #(it will always be 10 feet over the ball and point down) self.ballGroundColNp = self.ballRoot.attachNewNode(self.ballGroundCol) #Uncomment this line to see the ray #self.ballGroundColNp.show() #Finally, we create a CollisionTraverser. CollisionTraversers are what #do the job of calculating collisions self.cTrav = CollisionTraverser() #Collision traverservs tell collision handlers about collisions, and then #the handler decides what to do with the information. We are using a #CollisionHandlerQueue, which simply creates a list of all of the #collisions in a given pass. There are more sophisticated handlers like #one that sends events and another that tries to keep collided objects #apart, but the results are often better with a simple queue self.cHandler = CollisionHandlerQueue() #Now we add the collision nodes that can create a collision to the #traverser. The traverser will compare these to all others nodes in the #scene. There is a limit of 32 CollisionNodes per traverser #We add the collider, and the handler to use as a pair self.cTrav.addCollider(self.ballSphere, self.cHandler) self.cTrav.addCollider(self.ballGroundColNp, self.cHandler) #Collision traversers have a built in tool to help visualize collisions. #Uncomment the next line to see it. #self.cTrav.showCollisions(render) #This section deals with lighting for the ball. Only the ball was lit #because the maze has static lighting pregenerated by the modeler lAttrib = LightAttrib.makeAllOff() ambientLight = AmbientLight("ambientLight") ambientLight.setColor(Vec4(.55, .55, .55, 1)) lAttrib = lAttrib.addLight(ambientLight) directionalLight = DirectionalLight("directionalLight") directionalLight.setDirection(Vec3(0, 0, -1)) directionalLight.setColor(Vec4(0.375, 0.375, 0.375, 1)) directionalLight.setSpecularColor(Vec4(1, 1, 1, 1)) lAttrib = lAttrib.addLight(directionalLight) self.ballRoot.node().setAttrib(lAttrib) #This section deals with adding a specular highlight to the ball to make #it look shiny m = Material() m.setSpecular(Vec4(1, 1, 1, 1)) m.setShininess(96) self.ball.setMaterial(m, 1) #Finally, we call start for more initialization self.start()
def __init__(self): #This code puts the standard title and instruction text on screen self.title = OnscreenText(text="Panda3D: Tutorial - Actors", style=1, fg=(0, 0, 0, 1), font=font, pos=(0.8, -0.95), scale=.07) self.escapeEventText = self.genLabelText("ESC: Quit", 0) self.akeyEventText = self.genLabelText("[A]: Robot 1 Left Punch", 1) self.skeyEventText = self.genLabelText("[S]: Robot 1 Right Punch", 2) self.kkeyEventText = self.genLabelText("[K]: Robot 2 Left Punch", 3) self.lkeyEventText = self.genLabelText("[L]: Robot 2 Right Punch", 4) #Set the camera in a fixed position base.disableMouse() camera.setPosHpr(14.5, -15.4, 14, 45, -14, 0) base.setBackgroundColor(0, 0, 0) #Add lighting so that the objects are not drawn flat self.setupLights() #Load the ring self.ring = loader.loadModel('models/samples/boxing_robots/ring') self.ring.reparentTo(render) #Models that use skeletal animation are known as Actors instead of models #Instead of just one file, the have one file for the main model, and an #additional file for each playable animation. #They are loaded using Actor.Actor instead of loader.LoadModel. #The constructor takes the location of the main object as with a normal #model and a dictionary (A fancy python structure that is like a lookup #table) that contains names for animations, and paths to the appropriate #files self.robot1 = Actor.Actor( 'models/samples/boxing_robots/robot', { 'leftPunch': 'models/samples/boxing_robots/robot_left_punch', 'rightPunch': 'models/samples/boxing_robots/robot_right_punch', 'headUp': 'models/samples/boxing_robots/robot_head_up', 'headDown': 'models/samples/boxing_robots/robot_head_down' }) #Actors need to be positioned and parented like normal objects self.robot1.setPosHprScale(-1, -2.5, 4, 45, 0, 0, 1.25, 1.25, 1.25) self.robot1.reparentTo(render) #We'll repeat the process for the second robot. The only thing that changes #here is the robot's color and position self.robot2 = Actor.Actor( 'models/samples/boxing_robots/robot', { 'leftPunch': 'models/samples/boxing_robots/robot_left_punch', 'rightPunch': 'models/samples/boxing_robots/robot_right_punch', 'headUp': 'models/samples/boxing_robots/robot_head_up', 'headDown': 'models/samples/boxing_robots/robot_head_down' }) #Set the properties of this robot self.robot2.setPosHprScale(1, 1.5, 4, 225, 0, 0, 1.25, 1.25, 1.25) self.robot2.setColor(Vec4(.7, 0, 0, 1)) self.robot2.reparentTo(render) #Now we define how the animated models will move. Animations are played #through special intervals. In this case we use actor intervals in a #sequence to play the part of the punch animation where the arm extends, #call a function to check if the punch landed, and then play the part of the #animation where the arm retracts #Punch sequence for robot 1's left arm self.robot1.punchLeft = Sequence( #Interval for the outstreched animation self.robot1.actorInterval('leftPunch', startFrame=1, endFrame=10), Func(self.checkPunch, 2), #Function to check if the punch was successful #Interval for the retract animation self.robot1.actorInterval('leftPunch', startFrame=11, endFrame=32)) #Punch sequence for robot 1's right arm self.robot1.punchRight = Sequence( self.robot1.actorInterval('rightPunch', startFrame=1, endFrame=10), Func(self.checkPunch, 2), self.robot1.actorInterval('rightPunch', startFrame=11, endFrame=32)) #Punch sequence for robot 2's left arm self.robot2.punchLeft = Sequence( self.robot2.actorInterval('leftPunch', startFrame=1, endFrame=10), Func(self.checkPunch, 1), self.robot2.actorInterval('leftPunch', startFrame=11, endFrame=32)) #Punch sequence for robot 2's right arm self.robot2.punchRight = Sequence( self.robot2.actorInterval('rightPunch', startFrame=1, endFrame=10), Func(self.checkPunch, 1), self.robot2.actorInterval('rightPunch', startFrame=11, endFrame=32)) #We use the same techinique to create a sequence for when a robot is knocked #out where the head pops up, waits a while, and then resets #Head animation for robot 1 self.robot1.resetHead = Sequence( #Interval for the head going up. Since no start or end frames were given, #the entire animation is played. self.robot1.actorInterval('headUp'), Wait(3), #The head down animation was animated a little too quickly, so this will #play it at 75% of it's normal speed self.robot1.actorInterval('headDown', playRate=.75)) #Head animation for robot 2 self.robot2.resetHead = Sequence( self.robot2.actorInterval('headUp'), Wait(3), self.robot2.actorInterval('headDown', playRate=.75)) #Now that we have defined the motion, we can define our key input. #Each fist is bound to a key. When a key is pressed, self.tryPunch checks to #make sure that the both robots have their heads down, and if they do it #plays the given interval self.accept('escape', sys.exit) self.accept('a', self.tryPunch, [self.robot1.punchLeft]) self.accept('s', self.tryPunch, [self.robot1.punchRight]) self.accept('k', self.tryPunch, [self.robot2.punchLeft]) self.accept('l', self.tryPunch, [self.robot2.punchRight])
MYDIR=Filename.fromOsSpecific(MYDIR).getFullpath() font = loader.loadFont("cmss12") # Function to put instructions on the screen. def addInstructions(pos, msg): return OnscreenText(text=msg, style=1, fg=(1,1,1,1), font = font, pos=(-1.3, pos), align=TextNode.ALeft, scale = .05) # Function to put title on the screen. def addTitle(text): return OnscreenText(text=text, style=1, fg=(1,1,1,1), font = font, pos=(1.3,-0.95), align=TextNode.ARight, scale = .07) random.seed() base.setBackgroundColor(Vec4(0.0,0.0,0.0,1.0)) class World(DirectObject): def __init__(self): # Post the instructions. self.title = addTitle("Panda3D: Tutorial - Using Render-to-Texture") self.inst1 = addInstructions(0.95,"ESC: Quit") self.inst2 = addInstructions(0.90,"Up/Down: Zoom in/out on the Teapot") self.inst3 = addInstructions(0.85,"Left/Right: Move teapot left/right") self.inst4 = addInstructions(0.80,"V: View the render-to-texture results") #we get a handle to the default window mainWindow=base.win #we now get buffer thats going to hold the texture of our new scene
def __init__(self): base.disableMouse() base.cam.node().getLens().setNear(10.0) base.cam.node().getLens().setFar(200.0) camera.setPos(0, -50, 0) # Check video card capabilities. if (base.win.getGsg().getSupportsBasicShaders() == 0): addTitle( "Toon Shader: Video driver reports that shaders are not supported." ) return # Enable a 'light ramp' - this discretizes the lighting, # which is half of what makes a model look like a cartoon. # Light ramps only work if shader generation is enabled, # so we call 'setShaderAuto'. tempnode = NodePath(PandaNode("temp node")) tempnode.setAttrib(LightRampAttrib.makeSingleThreshold(0.5, 0.4)) tempnode.setShaderAuto() base.cam.node().setInitialState(tempnode.getState()) # Use class 'CommonFilters' to enable a cartoon inking filter. # This can fail if the video card is not powerful enough, if so, # display an error and exit. self.separation = 1 # Pixels self.filters = CommonFilters(base.win, base.cam) filterok = self.filters.setCartoonInk(separation=self.separation) if (filterok == False): addTitle( "Toon Shader: Video card not powerful enough to do image postprocessing" ) return # Post the instructions. self.title = addTitle( "Panda3D: Tutorial - Toon Shading with Normals-Based Inking") self.inst1 = addInstructions(0.95, "ESC: Quit") self.inst2 = addInstructions( 0.90, "Up/Down: Increase/Decrease Line Thickness") self.inst3 = addInstructions(0.85, "V: View the render-to-texture results") # Load a dragon model and animate it. self.character = Actor() self.character.loadModel('models/samples/cartoon/nik_dragon') self.character.reparentTo(render) self.character.loadAnims({'win': 'models/samples/cartoon/nik_dragon'}) self.character.loop('win') self.character.hprInterval(15, Point3(360, 0, 0)).loop() # Create a non-attenuating point light and an ambient light. plightnode = PointLight("point light") plightnode.setAttenuation(Vec3(1, 0, 0)) plight = render.attachNewNode(plightnode) plight.setPos(30, -50, 0) alightnode = AmbientLight("ambient light") alightnode.setColor(Vec4(0.8, 0.8, 0.8, 1)) alight = render.attachNewNode(alightnode) render.setLight(alight) render.setLight(plight) # Panda contains a built-in viewer that lets you view the # results of all render-to-texture operations. This lets you # see what class CommonFilters is doing behind the scenes. self.accept("v", base.bufferViewer.toggleEnable) self.accept("V", base.bufferViewer.toggleEnable) base.bufferViewer.setPosition("llcorner") self.accept("s", self.filters.manager.resizeBuffers) # These allow you to change cartooning parameters in realtime self.accept("escape", sys.exit, [0]) self.accept("arrow_up", self.increaseSeparation) self.accept("arrow_down", self.decreaseSeparation)
# and points straight into the scene, and see what it collides with. We pick # the object with the closest collision import panda3d.direct.directbase.DirectStart from panda3d.pandac import CollisionTraverser,CollisionNode from panda3d.pandac import CollisionHandlerQueue,CollisionRay from panda3d.pandac import AmbientLight,DirectionalLight,LightAttrib from panda3d.pandac import TextNode from panda3d.pandac import Point3,Vec3,Vec4,BitMask32 from panda3d.direct.gui.OnscreenText import OnscreenText from panda3d.direct.showbase.DirectObject import DirectObject from panda3d.direct.task.Task import Task import sys #First we define some contants for the colors BLACK = Vec4(0,0,0,1) WHITE = Vec4(1,1,1,1) HIGHLIGHT = Vec4(0,1,1,1) PIECEBLACK = Vec4(.15, .15, .15, 1) #Now we define some helper functions that we will need later #This function, given a line (vector plus origin point) and a desired z value, #will give us the point on the line where the desired z value is what we want. #This is how we know where to position an object in 3D space based on a 2D mouse #position. It also assumes that we are dragging in the XY plane. # #This is derived from the mathmatical of a plane, solved for a given point def PointAtZ(z, point, vec): return point + vec * ((z-point.getZ()) / vec.getZ())