Esempio n. 1
0
 def __init__(self, air, name=None):
     # Be careful not to create multiple NodePath objects
     try:
         self.DistributedNodeAI_initialized
     except:
         self.DistributedNodeAI_initialized = 1
         DistributedObjectAI.DistributedObjectAI.__init__(self, air)
         if name is None:
             name = self.__class__.__name__
         NodePath.__init__(self, name)
         self.gridParent = None
Esempio n. 2
0
    def renderQuadInto(self,
                       mul=1,
                       div=1,
                       align=1,
                       depthtex=None,
                       colortex=None,
                       auxtex0=None,
                       auxtex1=None):
        """ Creates an offscreen buffer for an intermediate
        computation. Installs a quad into the buffer.  Returns
        the fullscreen quad.  The size of the buffer is initially
        equal to the size of the main window.  The parameters 'mul',
        'div', and 'align' can be used to adjust that size. """

        texgroup = (depthtex, colortex, auxtex0, auxtex1)

        winx, winy = self.getScaledSize(mul, div, align)

        depthbits = bool(depthtex != None)

        buffer = self.createBuffer("filter-stage", winx, winy, texgroup,
                                   depthbits)

        if (buffer == None):
            return None

        cm = CardMaker("filter-stage-quad")
        cm.setFrameFullscreenQuad()
        quad = NodePath(cm.generate())
        quad.setDepthTest(0)
        quad.setDepthWrite(0)
        quad.setColor(Vec4(1, 0.5, 0.5, 1))

        quadcamnode = Camera("filter-quad-cam")
        lens = OrthographicLens()
        lens.setFilmSize(2, 2)
        lens.setFilmOffset(0, 0)
        lens.setNearFar(-1000, 1000)
        quadcamnode.setLens(lens)
        quadcam = quad.attachNewNode(quadcamnode)

        buffer.getDisplayRegion(0).setCamera(quadcam)
        buffer.getDisplayRegion(0).setActive(1)

        self.buffers.append(buffer)
        self.sizes.append((mul, div, align))

        return quad
Esempio n. 3
0
def makeFilterBuffer(srcbuffer, name, sort, prog):
    blurBuffer = base.win.makeTextureBuffer(name, 512, 512)
    blurBuffer.setSort(sort)
    blurBuffer.setClearColor(Vec4(1, 0, 0, 1))
    blurCamera = base.makeCamera2d(blurBuffer)
    blurScene = NodePath("new Scene")
    blurCamera.node().setScene(blurScene)
    shader = Shader.load(prog)
    card = srcbuffer.getTextureCard()
    card.reparentTo(blurScene)
    card.setShader(shader)
    return blurBuffer
Esempio n. 4
0
  def regenTree(self):
    forest=	render.findAllMatches("Tree Holder")
    forest.detach()


    bodydata=GeomVertexData("body vertices", self.format, Geom.UHStatic)

    treeNodePath=NodePath("Tree Holder")
    makeFractalTree(bodydata, treeNodePath,Vec3(4,4,7), Vec3(0,0,0),self.numIterations, self.numCopies)

    treeNodePath.setTexture(self.barkTexture,1)
    treeNodePath.reparentTo(render)
Esempio n. 5
0
  def addTree(self):

    bodydata=GeomVertexData("body vertices", self.format, Geom.UHStatic)

    randomPlace=Vec3(200*random.random()-100, 200*random.random()-100, 0)
    #randomPlace.normalize()


    treeNodePath=NodePath("Tree Holder")
    makeFractalTree(bodydata, treeNodePath,Vec3(4,4,7), randomPlace, self.numIterations, self.numCopies)

    treeNodePath.setTexture(self.barkTexture,1)
    treeNodePath.reparentTo(render)
Esempio n. 6
0
    def renderQuadInto(self, mul=1, div=1, align=1, depthtex=None, colortex=None, auxtex0=None, auxtex1=None):

        """ Creates an offscreen buffer for an intermediate
        computation. Installs a quad into the buffer.  Returns
        the fullscreen quad.  The size of the buffer is initially
        equal to the size of the main window.  The parameters 'mul',
        'div', and 'align' can be used to adjust that size. """

        texgroup = (depthtex, colortex, auxtex0, auxtex1)

        winx, winy = self.getScaledSize(mul, div, align)

        depthbits = bool(depthtex != None)

        buffer = self.createBuffer("filter-stage", winx, winy, texgroup, depthbits)

        if buffer == None:
            return None

        cm = CardMaker("filter-stage-quad")
        cm.setFrameFullscreenQuad()
        quad = NodePath(cm.generate())
        quad.setDepthTest(0)
        quad.setDepthWrite(0)
        quad.setColor(Vec4(1, 0.5, 0.5, 1))

        quadcamnode = Camera("filter-quad-cam")
        lens = OrthographicLens()
        lens.setFilmSize(2, 2)
        lens.setFilmOffset(0, 0)
        lens.setNearFar(-1000, 1000)
        quadcamnode.setLens(lens)
        quadcam = quad.attachNewNode(quadcamnode)

        buffer.getDisplayRegion(0).setCamera(quadcam)
        buffer.getDisplayRegion(0).setActive(1)

        self.buffers.append(buffer)
        self.sizes.append((mul, div, align))

        return quad
Esempio n. 7
0
  def __init__(self):
    formatArray=GeomVertexArrayFormat()
    formatArray.addColumn(InternalName.make("drawFlag"), 1, Geom.NTUint8, Geom.COther)

    format=GeomVertexFormat(GeomVertexFormat.getV3n3cpt2())
    format.addArray(formatArray)
    self.format=GeomVertexFormat.registerFormat(format)

    bodydata=GeomVertexData("body vertices", format, Geom.UHStatic)

    self.barkTexture=loader.loadTexture( \
      "models/samples/fractal_plants/bark.jpg")
    treeNodePath=NodePath("Tree Holder")
    makeFractalTree(bodydata,treeNodePath,Vec3(4,4,7))

    treeNodePath.setTexture(self.barkTexture,1)
    treeNodePath.reparentTo(render)

    self.accept("q", self.regenTree)
    self.accept("w", self.addTree)
    self.accept("arrow_up", self.upIterations)
    self.accept("arrow_down", self.downIterations)
    self.accept("arrow_right", self.upCopies)
    self.accept("arrow_left", self.downCopies)

    self.numIterations=11
    self.numCopies=4


    self.upDownEvent = OnscreenText(
      text="Up/Down: Increase/Decrease the number of iterations ("+str(self.numIterations)+")",
          style=1, fg=(1,1,1,1), pos=(-1.3, 0.85), font = font,
      align=TextNode.ALeft, scale = .05, mayChange=True)

    self.leftRightEvent = OnscreenText(
      text="Left/Right: Increase/Decrease branching("+str(self.numCopies)+")",
          style=1, fg=(1,1,1,1), pos=(-1.3, 0.80), font = font,
      align=TextNode.ALeft, scale = .05, mayChange=True)
Esempio n. 8
0
    def __init__(self):
        base.disableMouse()
        camera.setPos(0, -50, 0)

        # Check video card capabilities.

        if (base.win.getGsg().getSupportsBasicShaders() == 0):
            addTitle(
                "Toon Shader: Video driver reports that shaders are not supported."
            )
            return

        # Post the instructions.
        self.title = addTitle(
            "Panda3D: Tutorial - Toon Shading with Normals-Based Inking")
        self.inst1 = addInstructions(0.95, "ESC: Quit")
        self.inst2 = addInstructions(
            0.90, "Up/Down: Increase/Decrease Line Thickness")
        self.inst3 = addInstructions(
            0.85, "Left/Right: Decrease/Increase Line Darkness")
        self.inst4 = addInstructions(0.80,
                                     "V: View the render-to-texture results")

        # This shader's job is to render the model with discrete lighting
        # levels.  The lighting calculations built into the shader assume
        # a single nonattenuating point light.

        tempnode = NodePath(PandaNode("temp node"))
        tempnode.setShader(Shader.load(os.path.join(PANDA_SHADER_PATH, \
          "samples/cartoon/cartoon_lighting.sha")))
        base.cam.node().setInitialState(tempnode.getState())

        # This is the object that represents the single "light", as far
        # the shader is concerned.  It's not a real Panda3D LightNode, but
        # the shader doesn't care about that.

        light = render.attachNewNode("light")
        light.setPos(30, -50, 0)

        # this call puts the light's nodepath into the render state.
        # this enables the shader to access this light by name.

        render.setShaderInput("light", light)

        # The "normals buffer" will contain a picture of the model colorized
        # so that the color of the model is a representation of the model's
        # normal at that point.

        normalsBuffer = base.win.makeTextureBuffer("normalsBuffer", 0, 0)
        normalsBuffer.setClearColor(Vec4(0.5, 0.5, 0.5, 1))
        self.normalsBuffer = normalsBuffer
        normalsCamera = base.makeCamera(normalsBuffer,
                                        lens=base.cam.node().getLens())
        normalsCamera.node().setScene(render)
        tempnode = NodePath(PandaNode("temp node"))
        tempnode.setShader(Shader.load(os.path.join(PANDA_SHADER_PATH, \
          "samples/cartoon/cartoon_normal.sha")))
        normalsCamera.node().setInitialState(tempnode.getState())

        #what we actually do to put edges on screen is apply them as a texture to
        #a transparent screen-fitted card

        drawnScene = normalsBuffer.getTextureCard()
        drawnScene.setTransparency(1)
        drawnScene.setColor(1, 1, 1, 0)
        drawnScene.reparentTo(render2d)
        self.drawnScene = drawnScene

        # this shader accepts, as input, the picture from the normals buffer.
        # it compares each adjacent pixel, looking for discontinuities.
        # wherever a discontinuity exists, it emits black ink.

        self.separation = 0.001
        self.cutoff = 0.3
        inkGen=Shader.load(os.path.join(PANDA_SHADER_PATH, \
          "samples/cartoon/cartoon_ink.sha"))
        drawnScene.setShader(inkGen)
        drawnScene.setShaderInput("separation",
                                  Vec4(self.separation, 0, self.separation, 0))
        drawnScene.setShaderInput(
            "cutoff", Vec4(self.cutoff, self.cutoff, self.cutoff, self.cutoff))

        # Panda contains a built-in viewer that lets you view the results of
        # your render-to-texture operations.  This code configures the viewer.

        self.accept("v", base.bufferViewer.toggleEnable)
        self.accept("V", base.bufferViewer.toggleEnable)
        base.bufferViewer.setPosition("llcorner")

        # Load a dragon model and animate it.

        self.character = Actor()
        self.character.loadModel('models/samples/cartoon/nik_dragon')
        self.character.reparentTo(render)
        self.character.loadAnims({'win': 'models/samples/cartoon/nik_dragon'})
        self.character.loop('win')
        self.character.hprInterval(15, Point3(360, 0, 0)).loop()

        # these allow you to change cartooning parameters in realtime

        self.accept("escape", sys.exit, [0])
        self.accept("arrow_up", self.increaseSeparation)
        self.accept("arrow_down", self.decreaseSeparation)
        self.accept("arrow_left", self.increaseCutoff)
        self.accept("arrow_right", self.decreaseCutoff)
Esempio n. 9
0
    def __init__(self):
        base.disableMouse()
        base.cam.node().getLens().setNear(10.0)
        base.cam.node().getLens().setFar(200.0)
        camera.setPos(0, -50, 0)
        
        # Check video card capabilities.
        
        if (base.win.getGsg().getSupportsBasicShaders() == 0):
            addTitle("Toon Shader: Video driver reports that shaders are not supported.")
            return
        
        # Enable a 'light ramp' - this discretizes the lighting,
        # which is half of what makes a model look like a cartoon.
        # Light ramps only work if shader generation is enabled,
        # so we call 'setShaderAuto'.

        tempnode = NodePath(PandaNode("temp node"))
        tempnode.setAttrib(LightRampAttrib.makeSingleThreshold(0.5, 0.4))
        tempnode.setShaderAuto()
        base.cam.node().setInitialState(tempnode.getState())
        
        # Use class 'CommonFilters' to enable a cartoon inking filter.
        # This can fail if the video card is not powerful enough, if so,
        # display an error and exit.
        
        self.separation = 1 # Pixels
        self.filters = CommonFilters(base.win, base.cam)
        filterok = self.filters.setCartoonInk(separation=self.separation)
        if (filterok == False):
            addTitle("Toon Shader: Video card not powerful enough to do image postprocessing")
            return
        
        # Post the instructions.
        
        self.title = addTitle("Panda3D: Tutorial - Toon Shading with Normals-Based Inking")
        self.inst1 = addInstructions(0.95,"ESC: Quit")
        self.inst2 = addInstructions(0.90,"Up/Down: Increase/Decrease Line Thickness")
        self.inst3 = addInstructions(0.85,"V: View the render-to-texture results")
        
        # Load a dragon model and animate it.
        
        self.character = Actor()
        self.character.loadModel('models/samples/cartoon/nik_dragon')
        self.character.reparentTo(render)
        self.character.loadAnims({'win': 'models/samples/cartoon/nik_dragon'})
        self.character.loop('win')
        self.character.hprInterval(15, Point3(360, 0,0)).loop()
        
        # Create a non-attenuating point light and an ambient light.
        
        plightnode = PointLight("point light")
        plightnode.setAttenuation(Vec3(1,0,0))
        plight = render.attachNewNode(plightnode)
        plight.setPos(30,-50,0)
        alightnode = AmbientLight("ambient light")
        alightnode.setColor(Vec4(0.8,0.8,0.8,1))
        alight = render.attachNewNode(alightnode)
        render.setLight(alight)
        render.setLight(plight)
        
        # Panda contains a built-in viewer that lets you view the 
        # results of all render-to-texture operations.  This lets you
        # see what class CommonFilters is doing behind the scenes.
        
        self.accept("v", base.bufferViewer.toggleEnable)
        self.accept("V", base.bufferViewer.toggleEnable)
        base.bufferViewer.setPosition("llcorner")
        self.accept("s", self.filters.manager.resizeBuffers)
        
        # These allow you to change cartooning parameters in realtime
        
        self.accept("escape", sys.exit, [0])
        self.accept("arrow_up", self.increaseSeparation)
        self.accept("arrow_down", self.decreaseSeparation)
Esempio n. 10
0
    def __init__(self):
        
        self.keyMap = {"left":0, "right":0, "forward":0, "cam-left":0, "cam-right":0}
        base.win.setClearColor(Vec4(0,0,0,1))

        # Post the instructions

        self.title = addTitle("Panda3D Tutorial: Roaming Ralph (Walking on Uneven Terrain)")
        self.inst1 = addInstructions(0.95, "[ESC]: Quit")
        self.inst2 = addInstructions(0.90, "[Left Arrow]: Rotate Ralph Left")
        self.inst3 = addInstructions(0.85, "[Right Arrow]: Rotate Ralph Right")
        self.inst4 = addInstructions(0.80, "[Up Arrow]: Run Ralph Forward")
        self.inst6 = addInstructions(0.70, "[A]: Rotate Camera Left")
        self.inst7 = addInstructions(0.65, "[S]: Rotate Camera Right")
        
        # Set up the environment
        #
        # This environment model contains collision meshes.  If you look
        # in the egg file, you will see the following:
        #
        #    <Collide> { Polyset keep descend }
        #
        # This tag causes the following mesh to be converted to a collision
        # mesh -- a mesh which is optimized for collision, not rendering.
        # It also keeps the original mesh, so there are now two copies ---
        # one optimized for rendering, one for collisions.  

        self.environ = loader.loadModel("models/samples/roaming_ralph/world")
        self.environ.reparentTo(render)
        self.environ.setPos(0,0,0)
        
        # Create the main character, Ralph

        ralphStartPos = self.environ.find("**/start_point").getPos()
        self.ralph = Actor("models/samples/roaming_ralph/ralph",
            {"run":"models/samples/roaming_ralph/ralph_run",
             "walk":"models/samples/roaming_ralph/ralph_walk"})
        self.ralph.reparentTo(render)
        self.ralph.setScale(.2)
        self.ralph.setPos(ralphStartPos)

        # Create a floater object.  We use the "floater" as a temporary
        # variable in a variety of calculations.
        
        self.floater = NodePath(PandaNode("floater"))
        self.floater.reparentTo(render)

        # Accept the control keys for movement and rotation

        self.accept("escape", sys.exit)
        self.accept("arrow_left", self.setKey, ["left",1])
        self.accept("arrow_right", self.setKey, ["right",1])
        self.accept("arrow_up", self.setKey, ["forward",1])
        self.accept("a", self.setKey, ["cam-left",1])
        self.accept("s", self.setKey, ["cam-right",1])
        self.accept("arrow_left-up", self.setKey, ["left",0])
        self.accept("arrow_right-up", self.setKey, ["right",0])
        self.accept("arrow_up-up", self.setKey, ["forward",0])
        self.accept("a-up", self.setKey, ["cam-left",0])
        self.accept("s-up", self.setKey, ["cam-right",0])

        taskMgr.add(self.move,"moveTask")

        # Game state variables
        self.isMoving = False

        # Set up the camera
        
        base.disableMouse()
        base.camera.setPos(self.ralph.getX(),self.ralph.getY()+10,2)
        
        # We will detect the height of the terrain by creating a collision
        # ray and casting it downward toward the terrain.  One ray will
        # start above ralph's head, and the other will start above the camera.
        # A ray may hit the terrain, or it may hit a rock or a tree.  If it
        # hits the terrain, we can detect the height.  If it hits anything
        # else, we rule that the move is illegal.

        self.cTrav = CollisionTraverser()

        self.ralphGroundRay = CollisionRay()
        self.ralphGroundRay.setOrigin(0,0,1000)
        self.ralphGroundRay.setDirection(0,0,-1)
        self.ralphGroundCol = CollisionNode('ralphRay')
        self.ralphGroundCol.addSolid(self.ralphGroundRay)
        self.ralphGroundCol.setFromCollideMask(BitMask32.bit(0))
        self.ralphGroundCol.setIntoCollideMask(BitMask32.allOff())
        self.ralphGroundColNp = self.ralph.attachNewNode(self.ralphGroundCol)
        self.ralphGroundHandler = CollisionHandlerQueue()
        self.cTrav.addCollider(self.ralphGroundColNp, self.ralphGroundHandler)

        self.camGroundRay = CollisionRay()
        self.camGroundRay.setOrigin(0,0,1000)
        self.camGroundRay.setDirection(0,0,-1)
        self.camGroundCol = CollisionNode('camRay')
        self.camGroundCol.addSolid(self.camGroundRay)
        self.camGroundCol.setFromCollideMask(BitMask32.bit(0))
        self.camGroundCol.setIntoCollideMask(BitMask32.allOff())
        self.camGroundColNp = base.camera.attachNewNode(self.camGroundCol)
        self.camGroundHandler = CollisionHandlerQueue()
        self.cTrav.addCollider(self.camGroundColNp, self.camGroundHandler)
Esempio n. 11
0
class World(DirectObject):

    def __init__(self):
        
        self.keyMap = {"left":0, "right":0, "forward":0, "cam-left":0, "cam-right":0}
        base.win.setClearColor(Vec4(0,0,0,1))

        # Post the instructions

        self.title = addTitle("Panda3D Tutorial: Roaming Ralph (Walking on Uneven Terrain)")
        self.inst1 = addInstructions(0.95, "[ESC]: Quit")
        self.inst2 = addInstructions(0.90, "[Left Arrow]: Rotate Ralph Left")
        self.inst3 = addInstructions(0.85, "[Right Arrow]: Rotate Ralph Right")
        self.inst4 = addInstructions(0.80, "[Up Arrow]: Run Ralph Forward")
        self.inst6 = addInstructions(0.70, "[A]: Rotate Camera Left")
        self.inst7 = addInstructions(0.65, "[S]: Rotate Camera Right")
        
        # Set up the environment
        #
        # This environment model contains collision meshes.  If you look
        # in the egg file, you will see the following:
        #
        #    <Collide> { Polyset keep descend }
        #
        # This tag causes the following mesh to be converted to a collision
        # mesh -- a mesh which is optimized for collision, not rendering.
        # It also keeps the original mesh, so there are now two copies ---
        # one optimized for rendering, one for collisions.  

        self.environ = loader.loadModel("models/samples/roaming_ralph/world")
        self.environ.reparentTo(render)
        self.environ.setPos(0,0,0)
        
        # Create the main character, Ralph

        ralphStartPos = self.environ.find("**/start_point").getPos()
        self.ralph = Actor("models/samples/roaming_ralph/ralph",
            {"run":"models/samples/roaming_ralph/ralph_run",
             "walk":"models/samples/roaming_ralph/ralph_walk"})
        self.ralph.reparentTo(render)
        self.ralph.setScale(.2)
        self.ralph.setPos(ralphStartPos)

        # Create a floater object.  We use the "floater" as a temporary
        # variable in a variety of calculations.
        
        self.floater = NodePath(PandaNode("floater"))
        self.floater.reparentTo(render)

        # Accept the control keys for movement and rotation

        self.accept("escape", sys.exit)
        self.accept("arrow_left", self.setKey, ["left",1])
        self.accept("arrow_right", self.setKey, ["right",1])
        self.accept("arrow_up", self.setKey, ["forward",1])
        self.accept("a", self.setKey, ["cam-left",1])
        self.accept("s", self.setKey, ["cam-right",1])
        self.accept("arrow_left-up", self.setKey, ["left",0])
        self.accept("arrow_right-up", self.setKey, ["right",0])
        self.accept("arrow_up-up", self.setKey, ["forward",0])
        self.accept("a-up", self.setKey, ["cam-left",0])
        self.accept("s-up", self.setKey, ["cam-right",0])

        taskMgr.add(self.move,"moveTask")

        # Game state variables
        self.isMoving = False

        # Set up the camera
        
        base.disableMouse()
        base.camera.setPos(self.ralph.getX(),self.ralph.getY()+10,2)
        
        # We will detect the height of the terrain by creating a collision
        # ray and casting it downward toward the terrain.  One ray will
        # start above ralph's head, and the other will start above the camera.
        # A ray may hit the terrain, or it may hit a rock or a tree.  If it
        # hits the terrain, we can detect the height.  If it hits anything
        # else, we rule that the move is illegal.

        self.cTrav = CollisionTraverser()

        self.ralphGroundRay = CollisionRay()
        self.ralphGroundRay.setOrigin(0,0,1000)
        self.ralphGroundRay.setDirection(0,0,-1)
        self.ralphGroundCol = CollisionNode('ralphRay')
        self.ralphGroundCol.addSolid(self.ralphGroundRay)
        self.ralphGroundCol.setFromCollideMask(BitMask32.bit(0))
        self.ralphGroundCol.setIntoCollideMask(BitMask32.allOff())
        self.ralphGroundColNp = self.ralph.attachNewNode(self.ralphGroundCol)
        self.ralphGroundHandler = CollisionHandlerQueue()
        self.cTrav.addCollider(self.ralphGroundColNp, self.ralphGroundHandler)

        self.camGroundRay = CollisionRay()
        self.camGroundRay.setOrigin(0,0,1000)
        self.camGroundRay.setDirection(0,0,-1)
        self.camGroundCol = CollisionNode('camRay')
        self.camGroundCol.addSolid(self.camGroundRay)
        self.camGroundCol.setFromCollideMask(BitMask32.bit(0))
        self.camGroundCol.setIntoCollideMask(BitMask32.allOff())
        self.camGroundColNp = base.camera.attachNewNode(self.camGroundCol)
        self.camGroundHandler = CollisionHandlerQueue()
        self.cTrav.addCollider(self.camGroundColNp, self.camGroundHandler)

        # Uncomment this line to see the collision rays
        #self.ralphGroundColNp.show()
        #self.camGroundColNp.show()
       
        #Uncomment this line to show a visual representation of the 
        #collisions occuring
        #self.cTrav.showCollisions(render)
       

    
    #Records the state of the arrow keys
    def setKey(self, key, value):
        self.keyMap[key] = value
    

    # Accepts arrow keys to move either the player or the menu cursor,
    # Also deals with grid checking and collision detection
    def move(self, task):

        # Get the time elapsed since last frame. We need this
        # for framerate-independent movement.
        elapsed = globalClock.getDt()

        # If the camera-left key is pressed, move camera left.
        # If the camera-right key is pressed, move camera right.

        base.camera.lookAt(self.ralph)
        if (self.keyMap["cam-left"]!=0):
            base.camera.setX(base.camera, -(elapsed*20))
        if (self.keyMap["cam-right"]!=0):
            base.camera.setX(base.camera, +(elapsed*20))

        # save ralph's initial position so that we can restore it,
        # in case he falls off the map or runs into something.

        startpos = self.ralph.getPos()

        # If a move-key is pressed, move ralph in the specified direction.

        if (self.keyMap["left"]!=0):
            self.ralph.setH(self.ralph.getH() + elapsed*300)
        if (self.keyMap["right"]!=0):
            self.ralph.setH(self.ralph.getH() - elapsed*300)
        if (self.keyMap["forward"]!=0):
            self.ralph.setY(self.ralph, -(elapsed*25))

        # If ralph is moving, loop the run animation.
        # If he is standing still, stop the animation.

        if (self.keyMap["forward"]!=0) or (self.keyMap["left"]!=0) or (self.keyMap["right"]!=0):
            if self.isMoving is False:
                self.ralph.loop("run")
                self.isMoving = True
        else:
            if self.isMoving:
                self.ralph.stop()
                self.ralph.pose("walk",5)
                self.isMoving = False

        # If the camera is too far from ralph, move it closer.
        # If the camera is too close to ralph, move it farther.

        camvec = self.ralph.getPos() - base.camera.getPos()
        camvec.setZ(0)
        camdist = camvec.length()
        camvec.normalize()
        if (camdist > 10.0):
            base.camera.setPos(base.camera.getPos() + camvec*(camdist-10))
            camdist = 10.0
        if (camdist < 5.0):
            base.camera.setPos(base.camera.getPos() - camvec*(5-camdist))
            camdist = 5.0

        # Now check for collisions.

        self.cTrav.traverse(render)

        # Adjust ralph's Z coordinate.  If ralph's ray hit terrain,
        # update his Z. If it hit anything else, or didn't hit anything, put
        # him back where he was last frame.

        entries = []
        for i in range(self.ralphGroundHandler.getNumEntries()):
            entry = self.ralphGroundHandler.getEntry(i)
            entries.append(entry)
        entries.sort(lambda x,y: cmp(y.getSurfacePoint(render).getZ(),
                                     x.getSurfacePoint(render).getZ()))
        if (len(entries)>0) and (entries[0].getIntoNode().getName() == "terrain"):
            self.ralph.setZ(entries[0].getSurfacePoint(render).getZ())
        else:
            self.ralph.setPos(startpos)

        # Keep the camera at one foot above the terrain,
        # or two feet above ralph, whichever is greater.
        
        entries = []
        for i in range(self.camGroundHandler.getNumEntries()):
            entry = self.camGroundHandler.getEntry(i)
            entries.append(entry)
        entries.sort(lambda x,y: cmp(y.getSurfacePoint(render).getZ(),
                                     x.getSurfacePoint(render).getZ()))
        if (len(entries)>0) and (entries[0].getIntoNode().getName() == "terrain"):
            base.camera.setZ(entries[0].getSurfacePoint(render).getZ()+1.0)
        if (base.camera.getZ() < self.ralph.getZ() + 2.0):
            base.camera.setZ(self.ralph.getZ() + 2.0)
            
        # The camera should look in ralph's direction,
        # but it should also try to stay horizontal, so look at
        # a floater which hovers above ralph's head.
        
        self.floater.setPos(self.ralph.getPos())
        self.floater.setZ(self.ralph.getZ() + 2.0)
        base.camera.lookAt(self.floater)

        return Task.cont
Esempio n. 12
0
    def __init__(self):
        
        # Post the instructions.
        self.title = addTitle("Panda3D: Tutorial - Using Render-to-Texture")
        self.inst1 = addInstructions(0.95,"ESC: Quit")
        self.inst2 = addInstructions(0.90,"Up/Down: Zoom in/out on the Teapot")
        self.inst3 = addInstructions(0.85,"Left/Right: Move teapot left/right")
        self.inst4 = addInstructions(0.80,"V: View the render-to-texture results")

        #we get a handle to the default window
        mainWindow=base.win

        #we now get buffer thats going to hold the texture of our new scene   
        altBuffer=mainWindow.makeTextureBuffer("hello", 256, 256)
        
        #now we have to setup a new scene graph to make this scene
        altRender=NodePath("new render")

        #this takes care of setting up ther camera properly
        self.altCam=base.makeCamera(altBuffer)
        self.altCam.reparentTo(altRender)        
        self.altCam.setPos(0,-10,0)

        #get the teapot and rotates it for a simple animation
        self.teapot=loader.loadModel('models/teapot')
        self.teapot.reparentTo(altRender)
        self.teapot.setPos(0,0,-1)
        self.teapot.hprInterval(1.5,Point3(360,360,360)).loop()
        
        #put some lighting on the teapot
        dlight = DirectionalLight('dlight')
        alight = AmbientLight('alight')
        dlnp = altRender.attachNewNode(dlight) 
        alnp = altRender.attachNewNode(alight)
        dlight.setColor(Vec4(0.8, 0.8, 0.5, 1))
        alight.setColor(Vec4(0.2, 0.2, 0.2, 1))
        dlnp.setHpr(0, -60, 0) 
        altRender.setLight(dlnp)
        altRender.setLight(alnp)
        
        # Panda contains a built-in viewer that lets you view the results of
        # your render-to-texture operations.  This code configures the viewer.

        self.accept("v", base.bufferViewer.toggleEnable)
        self.accept("V", base.bufferViewer.toggleEnable)
        base.bufferViewer.setPosition("llcorner")
        base.bufferViewer.setCardSize(1.0, 0.0)

        # Create the tv-men. Each TV-man will display the
        # offscreen-texture on his TV screen.
        self.tvMen = []
        self.makeTvMan(-5,30, 1,altBuffer.getTexture(),0.9)
        self.makeTvMan( 5,30, 1,altBuffer.getTexture(),1.4)
        self.makeTvMan( 0,23,-3,altBuffer.getTexture(),2.0)
        self.makeTvMan(-5,20,-6,altBuffer.getTexture(),1.1)
        self.makeTvMan( 5,18,-5,altBuffer.getTexture(),1.7)

        self.accept("escape", sys.exit, [0])
        self.accept("arrow_up", self.zoomIn)
        self.accept("arrow_down", self.zoomOut)
        self.accept("arrow_left", self.moveLeft)
        self.accept("arrow_right", self.moveRight)
Esempio n. 13
0
    def __init__(self):

        # Preliminary capabilities check.

        if (base.win.getGsg().getSupportsBasicShaders()==0):
            self.t=addTitle("Firefly Demo: Video driver reports that shaders are not supported.")
            return
        if (base.win.getGsg().getSupportsDepthTexture()==0):
            self.t=addTitle("Firefly Demo: Video driver reports that depth textures are not supported.")
            return
        
        # This algorithm uses two offscreen buffers, one of which has
        # an auxiliary bitplane, and the offscreen buffers share a single
        # depth buffer.  This is a heck of a complicated buffer setup.

        self.modelbuffer = self.makeFBO("model buffer",1)
        self.lightbuffer = self.makeFBO("light buffer",0)
        
        # Creation of a high-powered buffer can fail, if the graphics card
        # doesn't support the necessary OpenGL extensions.

        if (self.modelbuffer == None) or (self.lightbuffer == None):
            self.t=addTitle("Toon Shader: Video driver does not support multiple render targets")
            return

        # Create four render textures: depth, normal, albedo, and final.
        # attach them to the various bitplanes of the offscreen buffers.

        self.texDepth = Texture()
        self.texDepth.setFormat(Texture.FDepthStencil)
        self.texAlbedo = Texture()
        self.texNormal = Texture()
        self.texFinal = Texture()

        self.modelbuffer.addRenderTexture(self.texDepth,  GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPDepthStencil)
        self.modelbuffer.addRenderTexture(self.texAlbedo, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPColor)
        self.modelbuffer.addRenderTexture(self.texNormal, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPAuxRgba0)

        self.lightbuffer.addRenderTexture(self.texFinal,  GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPColor)

        # Set the near and far clipping planes.
        
        base.cam.node().getLens().setNear(50.0)
        base.cam.node().getLens().setFar(500.0)
        lens = base.cam.node().getLens()

        # This algorithm uses three cameras: one to render the models into the
        # model buffer, one to render the lights into the light buffer, and
        # one to render "plain" stuff (non-deferred shaded) stuff into the light
        # buffer.  Each camera has a bitmask to identify it.

        self.modelMask = 1
        self.lightMask = 2
        self.plainMask = 4
        
        self.modelcam=base.makeCamera(self.modelbuffer, lens=lens, scene=render, mask=self.modelMask)
        self.lightcam=base.makeCamera(self.lightbuffer, lens=lens, scene=render, mask=self.lightMask)
        self.plaincam=base.makeCamera(self.lightbuffer, lens=lens, scene=render, mask=self.plainMask)

        # Panda's main camera is not used.
        
        base.cam.node().setActive(0)
        
        # Take explicit control over the order in which the three
        # buffers are rendered.

        self.modelbuffer.setSort(1)
        self.lightbuffer.setSort(2)
        base.win.setSort(3)

        # Within the light buffer, control the order of the two cams.

        self.lightcam.node().getDisplayRegion(0).setSort(1)
        self.plaincam.node().getDisplayRegion(0).setSort(2)

        # By default, panda usually clears the screen before every
        # camera and before every window.  Tell it not to do that.
        # Then, tell it specifically when to clear and what to clear.

        self.modelcam.node().getDisplayRegion(0).disableClears()
        self.lightcam.node().getDisplayRegion(0).disableClears()
        self.plaincam.node().getDisplayRegion(0).disableClears()
        base.cam.node().getDisplayRegion(0).disableClears()
        base.cam2d.node().getDisplayRegion(0).disableClears()
        self.modelbuffer.disableClears()
        base.win.disableClears()

        self.modelbuffer.setClearColorActive(1)
        self.modelbuffer.setClearDepthActive(1)
        self.lightbuffer.setClearColorActive(1)
        self.lightbuffer.setClearColor(Vec4(0,0,0,1))

        # Miscellaneous stuff.
        
        base.disableMouse()
        base.camera.setPos(-9.112,-211.077,46.951)
        base.camera.setHpr(0, -7.5, 2.4)
        base.setBackgroundColor(Vec4(0,0,0,0))
        random.seed()

        # Calculate the projection parameters for the final shader.
        # The math here is too complex to explain in an inline comment,
        # I've put in a full explanation into the HTML intro.

        proj = base.cam.node().getLens().getProjectionMat()
        proj_x = 0.5 * proj.getCell(3,2) / proj.getCell(0,0)
        proj_y = 0.5 * proj.getCell(3,2)
        proj_z = 0.5 * proj.getCell(3,2) / proj.getCell(2,1)
        proj_w = -0.5 - 0.5*proj.getCell(1,2)
        
        # Configure the render state of the model camera.
        
        tempnode = NodePath(PandaNode("temp node"))
        tempnode.setAttrib(AlphaTestAttrib.make(RenderAttrib.MGreaterEqual, 0.5))
        tempnode.setShader(Shader.load(os.path.join(PANDA_SHADER_PATH, \
          "samples/fireflies/fireflies_model.sha")))
        tempnode.setAttrib(DepthTestAttrib.make(RenderAttrib.MLessEqual))
        self.modelcam.node().setInitialState(tempnode.getState())
        
        # Configure the render state of the light camera.

        tempnode = NodePath(PandaNode("temp node"))
        tempnode.setShader(Shader.load(os.path.join(PANDA_SHADER_PATH, \
          "samples/fireflies/fireflies_lighting.sha")))
        tempnode.setShaderInput("texnormal",self.texNormal)
        tempnode.setShaderInput("texalbedo",self.texAlbedo)
        tempnode.setShaderInput("texdepth",self.texDepth)
        tempnode.setShaderInput("proj",Vec4(proj_x,proj_y,proj_z,proj_w))
        tempnode.setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.MAdd, ColorBlendAttrib.OOne, ColorBlendAttrib.OOne))
        tempnode.setAttrib(CullFaceAttrib.make(CullFaceAttrib.MCullCounterClockwise))
        # The next line causes problems on Linux.
        #tempnode.setAttrib(DepthTestAttrib.make(RenderAttrib.MGreaterEqual))
        tempnode.setAttrib(DepthWriteAttrib.make(DepthWriteAttrib.MOff))
        self.lightcam.node().setInitialState(tempnode.getState())

        # Configure the render state of the plain camera.
        
        rs = RenderState.makeEmpty()
        self.plaincam.node().setInitialState(rs)
        
        # Clear any render attribs on the root node. This is necessary
        # because by default, panda assigns some attribs to the root
        # node.  These default attribs will override the
        # carefully-configured render attribs that we just attached
        # to the cameras.  The simplest solution is to just clear
        # them all out.

        render.setState(RenderState.makeEmpty())

        # My artist created a model in which some of the polygons
        # don't have textures.  This confuses the shader I wrote. 
        # This little hack guarantees that everything has a texture.
        
        white = loader.loadTexture("models/samples/fireflies/white.jpg")
        render.setTexture(white,0)

        # Create two subroots, to help speed cull traversal.
        
        self.lightroot = NodePath(PandaNode("lightroot"))
        self.lightroot.reparentTo(render)
        self.modelroot = NodePath(PandaNode("modelroot"))
        self.modelroot.reparentTo(render) 
        self.lightroot.hide(BitMask32(self.modelMask))
        self.modelroot.hide(BitMask32(self.lightMask))
        self.modelroot.hide(BitMask32(self.plainMask))

        # Load the model of a forest. Make it visible to the model camera.

        self.forest=NodePath(PandaNode("Forest Root"))
        self.forest.reparentTo(render)

        loader.loadModel( \
          "models/samples/fireflies/background").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage01").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage02").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage03").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage04").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage05").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage06").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage07").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage08").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage09").reparentTo(self.forest)
        self.forest.hide(BitMask32(self.lightMask | self.plainMask))

        # Cause the final results to be rendered into the main window on a card.
        
        cm = CardMaker("card")
        cm.setFrameFullscreenQuad()
        self.card = render2d.attachNewNode(cm.generate())
        self.card.setTexture(self.texFinal)
        
        # Post the instructions.

        self.title = addTitle("Panda3D: Tutorial - Fireflies using Deferred Shading")
        self.inst1 = addInstructions(0.95,"ESC: Quit")
        self.inst2 = addInstructions(0.90,"Up/Down: More / Fewer Fireflies (Count: unknown)")
        self.inst3 = addInstructions(0.85,"Right/Left: Bigger / Smaller Fireflies (Radius: unknown)")
        self.inst4 = addInstructions(0.80,"V: View the render-to-texture results")

        # Panda contains a built-in viewer that lets you view the results of
        # your render-to-texture operations.  This code configures the viewer.

        base.bufferViewer.setPosition("llcorner")
        base.bufferViewer.setCardSize(0,0.40)
        base.bufferViewer.setLayout("vline")
        self.toggleCards()
        self.toggleCards()

        # Firefly parameters

        self.fireflies = []
        self.sequences = []
        self.scaleseqs = []
        self.glowspheres = []
        self.fireflysize = 1.0
        self.spheremodel = loader.loadModel("models/misc/sphere.flt")
        self.setFireflySize(25.0)
        while (len(self.fireflies)<5): self.addFirefly()
        self.updateReadout()

        # these allow you to change parameters in realtime

        self.accept("escape", sys.exit, [0])
        self.accept("arrow_up",   self.incFireflyCount, [1.1111111])
        self.accept("arrow_down", self.decFireflyCount, [0.9000000])
        self.accept("arrow_right", self.setFireflySize, [1.1111111])
        self.accept("arrow_left",  self.setFireflySize, [0.9000000])
        self.accept("v", self.toggleCards)
        self.accept("V", self.toggleCards)

        self.nextadd = 0
        taskMgr.add(self.spawnTask, "spawner")
Esempio n. 14
0
class FireflyDemo(DirectObject):
    def __init__(self):

        # Preliminary capabilities check.

        if (base.win.getGsg().getSupportsBasicShaders() == 0):
            self.t = addTitle(
                "Firefly Demo: Video driver reports that shaders are not supported."
            )
            return
        if (base.win.getGsg().getSupportsDepthTexture() == 0):
            self.t = addTitle(
                "Firefly Demo: Video driver reports that depth textures are not supported."
            )
            return

        # This algorithm uses two offscreen buffers, one of which has
        # an auxiliary bitplane, and the offscreen buffers share a single
        # depth buffer.  This is a heck of a complicated buffer setup.

        self.modelbuffer = self.makeFBO("model buffer", 1)
        self.lightbuffer = self.makeFBO("light buffer", 0)

        # Creation of a high-powered buffer can fail, if the graphics card
        # doesn't support the necessary OpenGL extensions.

        if (self.modelbuffer == None) or (self.lightbuffer == None):
            self.t = addTitle(
                "Toon Shader: Video driver does not support multiple render targets"
            )
            return

        # Create four render textures: depth, normal, albedo, and final.
        # attach them to the various bitplanes of the offscreen buffers.

        self.texDepth = Texture()
        self.texDepth.setFormat(Texture.FDepthStencil)
        self.texAlbedo = Texture()
        self.texNormal = Texture()
        self.texFinal = Texture()

        self.modelbuffer.addRenderTexture(self.texDepth,
                                          GraphicsOutput.RTMBindOrCopy,
                                          GraphicsOutput.RTPDepthStencil)
        self.modelbuffer.addRenderTexture(self.texAlbedo,
                                          GraphicsOutput.RTMBindOrCopy,
                                          GraphicsOutput.RTPColor)
        self.modelbuffer.addRenderTexture(self.texNormal,
                                          GraphicsOutput.RTMBindOrCopy,
                                          GraphicsOutput.RTPAuxRgba0)

        self.lightbuffer.addRenderTexture(self.texFinal,
                                          GraphicsOutput.RTMBindOrCopy,
                                          GraphicsOutput.RTPColor)

        # Set the near and far clipping planes.

        base.cam.node().getLens().setNear(50.0)
        base.cam.node().getLens().setFar(500.0)
        lens = base.cam.node().getLens()

        # This algorithm uses three cameras: one to render the models into the
        # model buffer, one to render the lights into the light buffer, and
        # one to render "plain" stuff (non-deferred shaded) stuff into the light
        # buffer.  Each camera has a bitmask to identify it.

        self.modelMask = 1
        self.lightMask = 2
        self.plainMask = 4

        self.modelcam = base.makeCamera(self.modelbuffer,
                                        lens=lens,
                                        scene=render,
                                        mask=self.modelMask)
        self.lightcam = base.makeCamera(self.lightbuffer,
                                        lens=lens,
                                        scene=render,
                                        mask=self.lightMask)
        self.plaincam = base.makeCamera(self.lightbuffer,
                                        lens=lens,
                                        scene=render,
                                        mask=self.plainMask)

        # Panda's main camera is not used.

        base.cam.node().setActive(0)

        # Take explicit control over the order in which the three
        # buffers are rendered.

        self.modelbuffer.setSort(1)
        self.lightbuffer.setSort(2)
        base.win.setSort(3)

        # Within the light buffer, control the order of the two cams.

        self.lightcam.node().getDisplayRegion(0).setSort(1)
        self.plaincam.node().getDisplayRegion(0).setSort(2)

        # By default, panda usually clears the screen before every
        # camera and before every window.  Tell it not to do that.
        # Then, tell it specifically when to clear and what to clear.

        self.modelcam.node().getDisplayRegion(0).disableClears()
        self.lightcam.node().getDisplayRegion(0).disableClears()
        self.plaincam.node().getDisplayRegion(0).disableClears()
        base.cam.node().getDisplayRegion(0).disableClears()
        base.cam2d.node().getDisplayRegion(0).disableClears()
        self.modelbuffer.disableClears()
        base.win.disableClears()

        self.modelbuffer.setClearColorActive(1)
        self.modelbuffer.setClearDepthActive(1)
        self.lightbuffer.setClearColorActive(1)
        self.lightbuffer.setClearColor(Vec4(0, 0, 0, 1))

        # Miscellaneous stuff.

        base.disableMouse()
        base.camera.setPos(-9.112, -211.077, 46.951)
        base.camera.setHpr(0, -7.5, 2.4)
        base.setBackgroundColor(Vec4(0, 0, 0, 0))
        random.seed()

        # Calculate the projection parameters for the final shader.
        # The math here is too complex to explain in an inline comment,
        # I've put in a full explanation into the HTML intro.

        proj = base.cam.node().getLens().getProjectionMat()
        proj_x = 0.5 * proj.getCell(3, 2) / proj.getCell(0, 0)
        proj_y = 0.5 * proj.getCell(3, 2)
        proj_z = 0.5 * proj.getCell(3, 2) / proj.getCell(2, 1)
        proj_w = -0.5 - 0.5 * proj.getCell(1, 2)

        # Configure the render state of the model camera.

        tempnode = NodePath(PandaNode("temp node"))
        tempnode.setAttrib(
            AlphaTestAttrib.make(RenderAttrib.MGreaterEqual, 0.5))
        tempnode.setShader(Shader.load(os.path.join(PANDA_SHADER_PATH, \
          "samples/fireflies/fireflies_model.sha")))
        tempnode.setAttrib(DepthTestAttrib.make(RenderAttrib.MLessEqual))
        self.modelcam.node().setInitialState(tempnode.getState())

        # Configure the render state of the light camera.

        tempnode = NodePath(PandaNode("temp node"))
        tempnode.setShader(Shader.load(os.path.join(PANDA_SHADER_PATH, \
          "samples/fireflies/fireflies_lighting.sha")))
        tempnode.setShaderInput("texnormal", self.texNormal)
        tempnode.setShaderInput("texalbedo", self.texAlbedo)
        tempnode.setShaderInput("texdepth", self.texDepth)
        tempnode.setShaderInput("proj", Vec4(proj_x, proj_y, proj_z, proj_w))
        tempnode.setAttrib(
            ColorBlendAttrib.make(ColorBlendAttrib.MAdd, ColorBlendAttrib.OOne,
                                  ColorBlendAttrib.OOne))
        tempnode.setAttrib(
            CullFaceAttrib.make(CullFaceAttrib.MCullCounterClockwise))
        # The next line causes problems on Linux.
        #tempnode.setAttrib(DepthTestAttrib.make(RenderAttrib.MGreaterEqual))
        tempnode.setAttrib(DepthWriteAttrib.make(DepthWriteAttrib.MOff))
        self.lightcam.node().setInitialState(tempnode.getState())

        # Configure the render state of the plain camera.

        rs = RenderState.makeEmpty()
        self.plaincam.node().setInitialState(rs)

        # Clear any render attribs on the root node. This is necessary
        # because by default, panda assigns some attribs to the root
        # node.  These default attribs will override the
        # carefully-configured render attribs that we just attached
        # to the cameras.  The simplest solution is to just clear
        # them all out.

        render.setState(RenderState.makeEmpty())

        # My artist created a model in which some of the polygons
        # don't have textures.  This confuses the shader I wrote.
        # This little hack guarantees that everything has a texture.

        white = loader.loadTexture("models/samples/fireflies/white.jpg")
        render.setTexture(white, 0)

        # Create two subroots, to help speed cull traversal.

        self.lightroot = NodePath(PandaNode("lightroot"))
        self.lightroot.reparentTo(render)
        self.modelroot = NodePath(PandaNode("modelroot"))
        self.modelroot.reparentTo(render)
        self.lightroot.hide(BitMask32(self.modelMask))
        self.modelroot.hide(BitMask32(self.lightMask))
        self.modelroot.hide(BitMask32(self.plainMask))

        # Load the model of a forest. Make it visible to the model camera.

        self.forest = NodePath(PandaNode("Forest Root"))
        self.forest.reparentTo(render)

        loader.loadModel( \
          "models/samples/fireflies/background").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage01").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage02").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage03").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage04").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage05").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage06").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage07").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage08").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage09").reparentTo(self.forest)
        self.forest.hide(BitMask32(self.lightMask | self.plainMask))

        # Cause the final results to be rendered into the main window on a card.

        cm = CardMaker("card")
        cm.setFrameFullscreenQuad()
        self.card = render2d.attachNewNode(cm.generate())
        self.card.setTexture(self.texFinal)

        # Post the instructions.

        self.title = addTitle(
            "Panda3D: Tutorial - Fireflies using Deferred Shading")
        self.inst1 = addInstructions(0.95, "ESC: Quit")
        self.inst2 = addInstructions(
            0.90, "Up/Down: More / Fewer Fireflies (Count: unknown)")
        self.inst3 = addInstructions(
            0.85, "Right/Left: Bigger / Smaller Fireflies (Radius: unknown)")
        self.inst4 = addInstructions(0.80,
                                     "V: View the render-to-texture results")

        # Panda contains a built-in viewer that lets you view the results of
        # your render-to-texture operations.  This code configures the viewer.

        base.bufferViewer.setPosition("llcorner")
        base.bufferViewer.setCardSize(0, 0.40)
        base.bufferViewer.setLayout("vline")
        self.toggleCards()
        self.toggleCards()

        # Firefly parameters

        self.fireflies = []
        self.sequences = []
        self.scaleseqs = []
        self.glowspheres = []
        self.fireflysize = 1.0
        self.spheremodel = loader.loadModel("models/misc/sphere.flt")
        self.setFireflySize(25.0)
        while (len(self.fireflies) < 5):
            self.addFirefly()
        self.updateReadout()

        # these allow you to change parameters in realtime

        self.accept("escape", sys.exit, [0])
        self.accept("arrow_up", self.incFireflyCount, [1.1111111])
        self.accept("arrow_down", self.decFireflyCount, [0.9000000])
        self.accept("arrow_right", self.setFireflySize, [1.1111111])
        self.accept("arrow_left", self.setFireflySize, [0.9000000])
        self.accept("v", self.toggleCards)
        self.accept("V", self.toggleCards)

        self.nextadd = 0
        taskMgr.add(self.spawnTask, "spawner")

    def makeFBO(self, name, auxrgba):
        # This routine creates an offscreen buffer.  All the complicated
        # parameters are basically demanding capabilities from the offscreen
        # buffer - we demand that it be able to render to texture on every
        # bitplane, that it can support aux bitplanes, that it track
        # the size of the host window, that it can render to texture
        # cumulatively, and so forth.
        winprops = WindowProperties()
        props = FrameBufferProperties()
        props.setRgbColor(1)
        props.setAlphaBits(1)
        props.setDepthBits(1)
        props.setAuxRgba(auxrgba)
        return base.graphicsEngine.makeOutput(
            base.pipe, "model buffer", -2, props, winprops,
            GraphicsPipe.BFSizeTrackHost | GraphicsPipe.BFCanBindEvery
            | GraphicsPipe.BFRttCumulative | GraphicsPipe.BFRefuseWindow,
            base.win.getGsg(), base.win)

    def addFirefly(self):
        pos1 = Point3(random.uniform(-50, 50), random.uniform(-100, 150),
                      random.uniform(-10, 80))
        dir = Vec3(random.uniform(-1, 1), random.uniform(-1, 1),
                   random.uniform(-1, 1))
        dir.normalize()
        pos2 = pos1 + (dir * 20)
        fly = self.lightroot.attachNewNode(PandaNode("fly"))
        glow = fly.attachNewNode(PandaNode("glow"))
        dot = fly.attachNewNode(PandaNode("dot"))
        color_r = random.uniform(0.7, 1.0)
        color_g = 1.0
        color_b = 0.8
        fly.setShaderInput("lightcolor", color_r, color_g, color_b, 1.0)
        int1 = fly.posInterval(random.uniform(7, 12), pos1, pos2)
        int2 = fly.posInterval(random.uniform(7, 12), pos2, pos1)
        si1 = fly.scaleInterval(random.uniform(0.8, 1.5),
                                Point3(0.2, 0.2, 0.2), Point3(0.2, 0.2, 0.2))
        si2 = fly.scaleInterval(random.uniform(1.5, 0.8),
                                Point3(1.0, 1.0, 1.0), Point3(0.2, 0.2, 0.2))
        si3 = fly.scaleInterval(random.uniform(1.0, 2.0),
                                Point3(0.2, 0.2, 0.2), Point3(1.0, 1.0, 1.0))
        siseq = Sequence(si1, si2, si3)
        siseq.loop()
        siseq.setT(random.uniform(0, 1000))
        seq = Sequence(int1, int2)
        seq.loop()
        self.spheremodel.instanceTo(glow)
        self.spheremodel.instanceTo(dot)
        glow.setScale(self.fireflysize * 1.1)
        glow.hide(BitMask32(self.modelMask | self.plainMask))
        dot.setScale(0.6)
        dot.hide(BitMask32(self.modelMask | self.lightMask))
        dot.setColor(color_r, color_g, color_b, 1.0)
        self.fireflies.append(fly)
        self.sequences.append(seq)
        self.glowspheres.append(glow)
        self.scaleseqs.append(siseq)

    def updateReadout(self):
        self.inst2.destroy()
        self.inst2 = addInstructions(
            0.90, "Up/Down: More / Fewer Fireflies (Currently: " +
            str(len(self.fireflies)) + ")")
        self.inst3.destroy()
        self.inst3 = addInstructions(
            0.85, "Right/Left: Bigger / Smaller Fireflies (Radius: " +
            str(self.fireflysize) + " ft)")

    def toggleCards(self):
        base.bufferViewer.toggleEnable()
        # When the cards are not visible, I also disable the color clear.
        # This color-clear is actually not necessary, the depth-clear is
        # sufficient for the purposes of the algorithm.
        if (base.bufferViewer.isEnabled()):
            self.modelbuffer.setClearColorActive(1)
        else:
            self.modelbuffer.setClearColorActive(0)

    def incFireflyCount(self, scale):
        n = int((len(self.fireflies) * scale) + 1)
        while (n > len(self.fireflies)):
            self.addFirefly()
        self.updateReadout()

    def decFireflyCount(self, scale):
        n = int(len(self.fireflies) * scale)
        if (n < 1): n = 1
        while (len(self.fireflies) > n):
            self.glowspheres.pop()
            self.sequences.pop().finish()
            self.scaleseqs.pop().finish()
            self.fireflies.pop().removeNode()
        self.updateReadout()

    def setFireflySize(self, n):
        n = n * self.fireflysize
        self.fireflysize = n
        for x in self.glowspheres:
            x.setScale(self.fireflysize * 1.1)
        self.updateReadout()

    def spawnTask(self, task):
        if task.time > self.nextadd:
            self.nextadd = task.time + 1.0
            if (len(self.fireflies) < 300):
                self.incFireflyCount(1.03)
        return Task.cont
Esempio n. 15
0
    def __init__(self):
        base.disableMouse()
        base.setBackgroundColor(0, 0, 0)
        camera.setPos(0, -50, 0)

        # Check video card capabilities.

        if (base.win.getGsg().getSupportsBasicShaders() == 0):
            addTitle(
                "Glow Filter: Video driver reports that shaders are not supported."
            )
            return

        # Post the instructions
        self.title = addTitle("Panda3D: Tutorial - Glow Filter")
        self.inst1 = addInstructions(0.95, "ESC: Quit")
        self.inst2 = addInstructions(0.90, "Space: Toggle Glow Filter On/Off")
        self.inst3 = addInstructions(0.85, "Enter: Toggle Running/Spinning")
        self.inst4 = addInstructions(0.80,
                                     "V: View the render-to-texture results")

        #create the shader that will determime what parts of the scene will glow
        glowShader=Shader.load(os.path.join(PANDA_SHADER_PATH, \
          "samples/glow/glow_shader.sha"))

        # load our model
        self.tron = Actor()
        self.tron.loadModel("models/samples/glow/tron")
        self.tron.loadAnims({"running": "models/samples/glow/tron_anim"})
        self.tron.reparentTo(render)
        self.interval = self.tron.hprInterval(60, Point3(360, 0, 0))
        self.interval.loop()
        self.isRunning = False

        #put some lighting on the tron model
        dlight = DirectionalLight('dlight')
        alight = AmbientLight('alight')
        dlnp = render.attachNewNode(dlight)
        alnp = render.attachNewNode(alight)
        dlight.setColor(Vec4(1.0, 0.7, 0.2, 1))
        alight.setColor(Vec4(0.2, 0.2, 0.2, 1))
        dlnp.setHpr(0, -60, 0)
        render.setLight(dlnp)
        render.setLight(alnp)

        # create the glow buffer. This buffer renders like a normal scene,
        # except that only the glowing materials should show up nonblack.
        glowBuffer = base.win.makeTextureBuffer("Glow scene", 512, 512)
        glowBuffer.setSort(-3)
        glowBuffer.setClearColor(Vec4(0, 0, 0, 1))

        # We have to attach a camera to the glow buffer. The glow camera
        # must have the same frustum as the main camera. As long as the aspect
        # ratios match, the rest will take care of itself.
        glowCamera = base.makeCamera(glowBuffer,
                                     lens=base.cam.node().getLens())

        # Tell the glow camera to use the glow shader
        tempnode = NodePath(PandaNode("temp node"))
        tempnode.setShader(glowShader)
        glowCamera.node().setInitialState(tempnode.getState())

        # set up the pipeline: from glow scene to blur x to blur y to main window.
        blurXBuffer=makeFilterBuffer(glowBuffer,  "Blur X", -2, \
          os.path.join(PANDA_SHADER_PATH, "samples/glow/glow_xblur.sha"))
        blurYBuffer=makeFilterBuffer(blurXBuffer, "Blur Y", -1, \
          os.path.join(PANDA_SHADER_PATH, "samples/glow/glow_yblur.sha"))
        self.finalcard = blurYBuffer.getTextureCard()
        self.finalcard.reparentTo(render2d)
        self.finalcard.setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.MAdd))

        # Panda contains a built-in viewer that lets you view the results of
        # your render-to-texture operations.  This code configures the viewer.

        self.accept("v", base.bufferViewer.toggleEnable)
        self.accept("V", base.bufferViewer.toggleEnable)
        base.bufferViewer.setPosition("llcorner")
        base.bufferViewer.setLayout("hline")
        base.bufferViewer.setCardSize(0.652, 0)

        # event handling
        self.accept("space", self.toggleGlow)
        self.accept("enter", self.toggleDisplay)
        self.accept("escape", sys.exit, [0])

        self.glowOn = True
Esempio n. 16
0
    def __init__(self):

        self.keyMap = {
            "left": 0,
            "right": 0,
            "forward": 0,
            "cam-left": 0,
            "cam-right": 0
        }
        base.win.setClearColor(Vec4(0, 0, 0, 1))

        # Post the instructions

        self.title = addTitle(
            "Panda3D Tutorial: Roaming Ralph (Walking on Uneven Terrain)")
        self.inst1 = addInstructions(0.95, "[ESC]: Quit")
        self.inst2 = addInstructions(0.90, "[Left Arrow]: Rotate Ralph Left")
        self.inst3 = addInstructions(0.85, "[Right Arrow]: Rotate Ralph Right")
        self.inst4 = addInstructions(0.80, "[Up Arrow]: Run Ralph Forward")
        self.inst6 = addInstructions(0.70, "[A]: Rotate Camera Left")
        self.inst7 = addInstructions(0.65, "[S]: Rotate Camera Right")

        # Set up the environment
        #
        # This environment model contains collision meshes.  If you look
        # in the egg file, you will see the following:
        #
        #    <Collide> { Polyset keep descend }
        #
        # This tag causes the following mesh to be converted to a collision
        # mesh -- a mesh which is optimized for collision, not rendering.
        # It also keeps the original mesh, so there are now two copies ---
        # one optimized for rendering, one for collisions.

        self.environ = loader.loadModel("models/samples/roaming_ralph/world")
        self.environ.reparentTo(render)
        self.environ.setPos(0, 0, 0)

        # Create the main character, Ralph

        ralphStartPos = self.environ.find("**/start_point").getPos()
        self.ralph = Actor(
            "models/samples/roaming_ralph/ralph", {
                "run": "models/samples/roaming_ralph/ralph_run",
                "walk": "models/samples/roaming_ralph/ralph_walk"
            })
        self.ralph.reparentTo(render)
        self.ralph.setScale(.2)
        self.ralph.setPos(ralphStartPos)

        # Create a floater object.  We use the "floater" as a temporary
        # variable in a variety of calculations.

        self.floater = NodePath(PandaNode("floater"))
        self.floater.reparentTo(render)

        # Accept the control keys for movement and rotation

        self.accept("escape", sys.exit)
        self.accept("arrow_left", self.setKey, ["left", 1])
        self.accept("arrow_right", self.setKey, ["right", 1])
        self.accept("arrow_up", self.setKey, ["forward", 1])
        self.accept("a", self.setKey, ["cam-left", 1])
        self.accept("s", self.setKey, ["cam-right", 1])
        self.accept("arrow_left-up", self.setKey, ["left", 0])
        self.accept("arrow_right-up", self.setKey, ["right", 0])
        self.accept("arrow_up-up", self.setKey, ["forward", 0])
        self.accept("a-up", self.setKey, ["cam-left", 0])
        self.accept("s-up", self.setKey, ["cam-right", 0])

        taskMgr.add(self.move, "moveTask")

        # Game state variables
        self.isMoving = False

        # Set up the camera

        base.disableMouse()
        base.camera.setPos(self.ralph.getX(), self.ralph.getY() + 10, 2)

        # We will detect the height of the terrain by creating a collision
        # ray and casting it downward toward the terrain.  One ray will
        # start above ralph's head, and the other will start above the camera.
        # A ray may hit the terrain, or it may hit a rock or a tree.  If it
        # hits the terrain, we can detect the height.  If it hits anything
        # else, we rule that the move is illegal.

        self.cTrav = CollisionTraverser()

        self.ralphGroundRay = CollisionRay()
        self.ralphGroundRay.setOrigin(0, 0, 1000)
        self.ralphGroundRay.setDirection(0, 0, -1)
        self.ralphGroundCol = CollisionNode('ralphRay')
        self.ralphGroundCol.addSolid(self.ralphGroundRay)
        self.ralphGroundCol.setFromCollideMask(BitMask32.bit(0))
        self.ralphGroundCol.setIntoCollideMask(BitMask32.allOff())
        self.ralphGroundColNp = self.ralph.attachNewNode(self.ralphGroundCol)
        self.ralphGroundHandler = CollisionHandlerQueue()
        self.cTrav.addCollider(self.ralphGroundColNp, self.ralphGroundHandler)

        self.camGroundRay = CollisionRay()
        self.camGroundRay.setOrigin(0, 0, 1000)
        self.camGroundRay.setDirection(0, 0, -1)
        self.camGroundCol = CollisionNode('camRay')
        self.camGroundCol.addSolid(self.camGroundRay)
        self.camGroundCol.setFromCollideMask(BitMask32.bit(0))
        self.camGroundCol.setIntoCollideMask(BitMask32.allOff())
        self.camGroundColNp = base.camera.attachNewNode(self.camGroundCol)
        self.camGroundHandler = CollisionHandlerQueue()
        self.cTrav.addCollider(self.camGroundColNp, self.camGroundHandler)
Esempio n. 17
0
    def renderSceneInto(self,
                        depthtex=None,
                        colortex=None,
                        auxtex=None,
                        auxbits=0,
                        textures=None):
        """ Causes the scene to be rendered into the supplied textures
        instead of into the original window.  Puts a fullscreen quad
        into the original window to show the render-to-texture results.
        Returns the quad.  Normally, the caller would then apply a
        shader to the quad.

        To elaborate on how this all works:

        * An offscreen buffer is created.  It is set up to mimic
          the original display region - it is the same size,
          uses the same clear colors, and contains a DisplayRegion
          that uses the original camera.

        * A fullscreen quad and an orthographic camera to render
          that quad are both created.  The original camera is
          removed from the original window, and in its place, the
          orthographic quad-camera is installed.

        * The fullscreen quad is textured with the data from the
          offscreen buffer.  A shader is applied that tints the
          results pink.

        * Automatic shader generation NOT enabled.
          If you have a filter that depends on a render target from
          the auto-shader, you either need to set an auto-shader
          attrib on the main camera or scene, or, you need to provide
          these outputs in your own shader.

        * All clears are disabled on the original display region.
          If the display region fills the whole window, then clears
          are disabled on the original window as well.  It is
          assumed that rendering the full-screen quad eliminates
          the need to do clears.

        Hence, the original window which used to contain the actual
        scene, now contains a pink-tinted quad with a texture of the
        scene.  It is assumed that the user will replace the shader
        on the quad with a more interesting filter. """

        if (textures):
            colortex = textures.get("color", None)
            depthtex = textures.get("depth", None)
            auxtex = textures.get("aux", None)

        if (colortex == None):
            colortex = Texture("filter-base-color")
            colortex.setWrapU(Texture.WMClamp)
            colortex.setWrapV(Texture.WMClamp)

        texgroup = (depthtex, colortex, auxtex, None)

        # Choose the size of the offscreen buffer.

        (winx, winy) = self.getScaledSize(1, 1, 1)
        buffer = self.createBuffer("filter-base", winx, winy, texgroup)

        if (buffer == None):
            return None

        cm = CardMaker("filter-base-quad")
        cm.setFrameFullscreenQuad()
        quad = NodePath(cm.generate())
        quad.setDepthTest(0)
        quad.setDepthWrite(0)
        quad.setTexture(colortex)
        quad.setColor(Vec4(1, 0.5, 0.5, 1))

        cs = NodePath("dummy")
        cs.setState(self.camstate)
        # Do we really need to turn on the Shader Generator?
        #cs.setShaderAuto()
        if (auxbits):
            cs.setAttrib(AuxBitplaneAttrib.make(auxbits))
        self.camera.node().setInitialState(cs.getState())

        quadcamnode = Camera("filter-quad-cam")
        lens = OrthographicLens()
        lens.setFilmSize(2, 2)
        lens.setFilmOffset(0, 0)
        lens.setNearFar(-1000, 1000)
        quadcamnode.setLens(lens)
        quadcam = quad.attachNewNode(quadcamnode)

        self.region.setCamera(quadcam)

        dr = buffer.getDisplayRegion(0)
        self.setStackedClears(dr, self.rclears, self.wclears)
        if (auxtex):
            dr.setClearActive(GraphicsOutput.RTPAuxRgba0, 1)
            dr.setClearValue(GraphicsOutput.RTPAuxRgba0,
                             Vec4(0.5, 0.5, 1.0, 0.0))
        self.region.disableClears()
        if (self.isFullscreen()):
            self.win.disableClears()
        dr.setCamera(self.camera)
        dr.setActive(1)

        self.buffers.append(buffer)
        self.sizes.append((1, 1, 1))

        return quad
Esempio n. 18
0
    def renderSceneInto(self, depthtex=None, colortex=None, auxtex=None, auxbits=0, textures=None):

        """ Causes the scene to be rendered into the supplied textures
        instead of into the original window.  Puts a fullscreen quad
        into the original window to show the render-to-texture results.
        Returns the quad.  Normally, the caller would then apply a
        shader to the quad.

        To elaborate on how this all works:

        * An offscreen buffer is created.  It is set up to mimic
          the original display region - it is the same size,
          uses the same clear colors, and contains a DisplayRegion
          that uses the original camera.

        * A fullscreen quad and an orthographic camera to render
          that quad are both created.  The original camera is
          removed from the original window, and in its place, the
          orthographic quad-camera is installed.

        * The fullscreen quad is textured with the data from the
          offscreen buffer.  A shader is applied that tints the
          results pink.

        * Automatic shader generation NOT enabled.
          If you have a filter that depends on a render target from
          the auto-shader, you either need to set an auto-shader
          attrib on the main camera or scene, or, you need to provide
          these outputs in your own shader.

        * All clears are disabled on the original display region.
          If the display region fills the whole window, then clears
          are disabled on the original window as well.  It is
          assumed that rendering the full-screen quad eliminates
          the need to do clears.

        Hence, the original window which used to contain the actual
        scene, now contains a pink-tinted quad with a texture of the
        scene.  It is assumed that the user will replace the shader
        on the quad with a more interesting filter. """

        if textures:
            colortex = textures.get("color", None)
            depthtex = textures.get("depth", None)
            auxtex = textures.get("aux", None)

        if colortex == None:
            colortex = Texture("filter-base-color")
            colortex.setWrapU(Texture.WMClamp)
            colortex.setWrapV(Texture.WMClamp)

        texgroup = (depthtex, colortex, auxtex, None)

        # Choose the size of the offscreen buffer.

        (winx, winy) = self.getScaledSize(1, 1, 1)
        buffer = self.createBuffer("filter-base", winx, winy, texgroup)

        if buffer == None:
            return None

        cm = CardMaker("filter-base-quad")
        cm.setFrameFullscreenQuad()
        quad = NodePath(cm.generate())
        quad.setDepthTest(0)
        quad.setDepthWrite(0)
        quad.setTexture(colortex)
        quad.setColor(Vec4(1, 0.5, 0.5, 1))

        cs = NodePath("dummy")
        cs.setState(self.camstate)
        # Do we really need to turn on the Shader Generator?
        # cs.setShaderAuto()
        if auxbits:
            cs.setAttrib(AuxBitplaneAttrib.make(auxbits))
        self.camera.node().setInitialState(cs.getState())

        quadcamnode = Camera("filter-quad-cam")
        lens = OrthographicLens()
        lens.setFilmSize(2, 2)
        lens.setFilmOffset(0, 0)
        lens.setNearFar(-1000, 1000)
        quadcamnode.setLens(lens)
        quadcam = quad.attachNewNode(quadcamnode)

        self.region.setCamera(quadcam)

        dr = buffer.getDisplayRegion(0)
        self.setStackedClears(dr, self.rclears, self.wclears)
        if auxtex:
            dr.setClearActive(GraphicsOutput.RTPAuxRgba0, 1)
            dr.setClearValue(GraphicsOutput.RTPAuxRgba0, Vec4(0.5, 0.5, 1.0, 0.0))
        self.region.disableClears()
        if self.isFullscreen():
            self.win.disableClears()
        dr.setCamera(self.camera)
        dr.setActive(1)

        self.buffers.append(buffer)
        self.sizes.append((1, 1, 1))

        return quad
Esempio n. 19
0
    def __init__(self):
        base.disableMouse()
        base.setBackgroundColor(0,0,0)
        camera.setPos(0,-50,0)
        
        # Check video card capabilities.

        if (base.win.getGsg().getSupportsBasicShaders() == 0):
            addTitle("Glow Filter: Video driver reports that shaders are not supported.")
            return

        # Post the instructions
        self.title = addTitle("Panda3D: Tutorial - Glow Filter")
        self.inst1 = addInstructions(0.95,"ESC: Quit")
        self.inst2 = addInstructions(0.90,"Space: Toggle Glow Filter On/Off")
        self.inst3 = addInstructions(0.85,"Enter: Toggle Running/Spinning")
        self.inst4 = addInstructions(0.80,"V: View the render-to-texture results")

        #create the shader that will determime what parts of the scene will glow
        glowShader=Shader.load(os.path.join(PANDA_SHADER_PATH, \
          "samples/glow/glow_shader.sha"))

        # load our model
        self.tron=Actor()
        self.tron.loadModel("models/samples/glow/tron")
        self.tron.loadAnims({"running":"models/samples/glow/tron_anim"})
        self.tron.reparentTo(render)
        self.interval = self.tron.hprInterval(60,Point3(360,0,0))
        self.interval.loop()
        self.isRunning=False

        #put some lighting on the tron model
        dlight = DirectionalLight('dlight')
        alight = AmbientLight('alight')
        dlnp = render.attachNewNode(dlight) 
        alnp = render.attachNewNode(alight)
        dlight.setColor(Vec4(1.0, 0.7, 0.2, 1))
        alight.setColor(Vec4(0.2, 0.2, 0.2, 1))
        dlnp.setHpr(0, -60, 0) 
        render.setLight(dlnp)
        render.setLight(alnp)

        # create the glow buffer. This buffer renders like a normal scene,
        # except that only the glowing materials should show up nonblack.
        glowBuffer=base.win.makeTextureBuffer("Glow scene", 512, 512)
        glowBuffer.setSort(-3)
        glowBuffer.setClearColor(Vec4(0,0,0,1))

        # We have to attach a camera to the glow buffer. The glow camera
        # must have the same frustum as the main camera. As long as the aspect
        # ratios match, the rest will take care of itself.
        glowCamera=base.makeCamera(glowBuffer, lens=base.cam.node().getLens())

        # Tell the glow camera to use the glow shader
        tempnode = NodePath(PandaNode("temp node"))
        tempnode.setShader(glowShader)
        glowCamera.node().setInitialState(tempnode.getState())

        # set up the pipeline: from glow scene to blur x to blur y to main window.
        blurXBuffer=makeFilterBuffer(glowBuffer,  "Blur X", -2, \
          os.path.join(PANDA_SHADER_PATH, "samples/glow/glow_xblur.sha"))
        blurYBuffer=makeFilterBuffer(blurXBuffer, "Blur Y", -1, \
          os.path.join(PANDA_SHADER_PATH, "samples/glow/glow_yblur.sha"))
        self.finalcard = blurYBuffer.getTextureCard()
        self.finalcard.reparentTo(render2d)
        self.finalcard.setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.MAdd))
        
        # Panda contains a built-in viewer that lets you view the results of
        # your render-to-texture operations.  This code configures the viewer.

        self.accept("v", base.bufferViewer.toggleEnable)
        self.accept("V", base.bufferViewer.toggleEnable)
        base.bufferViewer.setPosition("llcorner")
        base.bufferViewer.setLayout("hline")
        base.bufferViewer.setCardSize(0.652,0)

        # event handling
        self.accept("space", self.toggleGlow)
        self.accept("enter", self.toggleDisplay)
        self.accept("escape", sys.exit, [0])

        self.glowOn=True;
Esempio n. 20
0
class World(DirectObject):
    def __init__(self):

        self.keyMap = {
            "left": 0,
            "right": 0,
            "forward": 0,
            "cam-left": 0,
            "cam-right": 0
        }
        base.win.setClearColor(Vec4(0, 0, 0, 1))

        # Post the instructions

        self.title = addTitle(
            "Panda3D Tutorial: Roaming Ralph (Walking on Uneven Terrain)")
        self.inst1 = addInstructions(0.95, "[ESC]: Quit")
        self.inst2 = addInstructions(0.90, "[Left Arrow]: Rotate Ralph Left")
        self.inst3 = addInstructions(0.85, "[Right Arrow]: Rotate Ralph Right")
        self.inst4 = addInstructions(0.80, "[Up Arrow]: Run Ralph Forward")
        self.inst6 = addInstructions(0.70, "[A]: Rotate Camera Left")
        self.inst7 = addInstructions(0.65, "[S]: Rotate Camera Right")

        # Set up the environment
        #
        # This environment model contains collision meshes.  If you look
        # in the egg file, you will see the following:
        #
        #    <Collide> { Polyset keep descend }
        #
        # This tag causes the following mesh to be converted to a collision
        # mesh -- a mesh which is optimized for collision, not rendering.
        # It also keeps the original mesh, so there are now two copies ---
        # one optimized for rendering, one for collisions.

        self.environ = loader.loadModel("models/samples/roaming_ralph/world")
        self.environ.reparentTo(render)
        self.environ.setPos(0, 0, 0)

        # Create the main character, Ralph

        ralphStartPos = self.environ.find("**/start_point").getPos()
        self.ralph = Actor(
            "models/samples/roaming_ralph/ralph", {
                "run": "models/samples/roaming_ralph/ralph_run",
                "walk": "models/samples/roaming_ralph/ralph_walk"
            })
        self.ralph.reparentTo(render)
        self.ralph.setScale(.2)
        self.ralph.setPos(ralphStartPos)

        # Create a floater object.  We use the "floater" as a temporary
        # variable in a variety of calculations.

        self.floater = NodePath(PandaNode("floater"))
        self.floater.reparentTo(render)

        # Accept the control keys for movement and rotation

        self.accept("escape", sys.exit)
        self.accept("arrow_left", self.setKey, ["left", 1])
        self.accept("arrow_right", self.setKey, ["right", 1])
        self.accept("arrow_up", self.setKey, ["forward", 1])
        self.accept("a", self.setKey, ["cam-left", 1])
        self.accept("s", self.setKey, ["cam-right", 1])
        self.accept("arrow_left-up", self.setKey, ["left", 0])
        self.accept("arrow_right-up", self.setKey, ["right", 0])
        self.accept("arrow_up-up", self.setKey, ["forward", 0])
        self.accept("a-up", self.setKey, ["cam-left", 0])
        self.accept("s-up", self.setKey, ["cam-right", 0])

        taskMgr.add(self.move, "moveTask")

        # Game state variables
        self.isMoving = False

        # Set up the camera

        base.disableMouse()
        base.camera.setPos(self.ralph.getX(), self.ralph.getY() + 10, 2)

        # We will detect the height of the terrain by creating a collision
        # ray and casting it downward toward the terrain.  One ray will
        # start above ralph's head, and the other will start above the camera.
        # A ray may hit the terrain, or it may hit a rock or a tree.  If it
        # hits the terrain, we can detect the height.  If it hits anything
        # else, we rule that the move is illegal.

        self.cTrav = CollisionTraverser()

        self.ralphGroundRay = CollisionRay()
        self.ralphGroundRay.setOrigin(0, 0, 1000)
        self.ralphGroundRay.setDirection(0, 0, -1)
        self.ralphGroundCol = CollisionNode('ralphRay')
        self.ralphGroundCol.addSolid(self.ralphGroundRay)
        self.ralphGroundCol.setFromCollideMask(BitMask32.bit(0))
        self.ralphGroundCol.setIntoCollideMask(BitMask32.allOff())
        self.ralphGroundColNp = self.ralph.attachNewNode(self.ralphGroundCol)
        self.ralphGroundHandler = CollisionHandlerQueue()
        self.cTrav.addCollider(self.ralphGroundColNp, self.ralphGroundHandler)

        self.camGroundRay = CollisionRay()
        self.camGroundRay.setOrigin(0, 0, 1000)
        self.camGroundRay.setDirection(0, 0, -1)
        self.camGroundCol = CollisionNode('camRay')
        self.camGroundCol.addSolid(self.camGroundRay)
        self.camGroundCol.setFromCollideMask(BitMask32.bit(0))
        self.camGroundCol.setIntoCollideMask(BitMask32.allOff())
        self.camGroundColNp = base.camera.attachNewNode(self.camGroundCol)
        self.camGroundHandler = CollisionHandlerQueue()
        self.cTrav.addCollider(self.camGroundColNp, self.camGroundHandler)

        # Uncomment this line to see the collision rays
        #self.ralphGroundColNp.show()
        #self.camGroundColNp.show()

        #Uncomment this line to show a visual representation of the
        #collisions occuring
        #self.cTrav.showCollisions(render)

    #Records the state of the arrow keys
    def setKey(self, key, value):
        self.keyMap[key] = value

    # Accepts arrow keys to move either the player or the menu cursor,
    # Also deals with grid checking and collision detection
    def move(self, task):

        # Get the time elapsed since last frame. We need this
        # for framerate-independent movement.
        elapsed = globalClock.getDt()

        # If the camera-left key is pressed, move camera left.
        # If the camera-right key is pressed, move camera right.

        base.camera.lookAt(self.ralph)
        if (self.keyMap["cam-left"] != 0):
            base.camera.setX(base.camera, -(elapsed * 20))
        if (self.keyMap["cam-right"] != 0):
            base.camera.setX(base.camera, +(elapsed * 20))

        # save ralph's initial position so that we can restore it,
        # in case he falls off the map or runs into something.

        startpos = self.ralph.getPos()

        # If a move-key is pressed, move ralph in the specified direction.

        if (self.keyMap["left"] != 0):
            self.ralph.setH(self.ralph.getH() + elapsed * 300)
        if (self.keyMap["right"] != 0):
            self.ralph.setH(self.ralph.getH() - elapsed * 300)
        if (self.keyMap["forward"] != 0):
            self.ralph.setY(self.ralph, -(elapsed * 25))

        # If ralph is moving, loop the run animation.
        # If he is standing still, stop the animation.

        if (self.keyMap["forward"] != 0) or (self.keyMap["left"] !=
                                             0) or (self.keyMap["right"] != 0):
            if self.isMoving is False:
                self.ralph.loop("run")
                self.isMoving = True
        else:
            if self.isMoving:
                self.ralph.stop()
                self.ralph.pose("walk", 5)
                self.isMoving = False

        # If the camera is too far from ralph, move it closer.
        # If the camera is too close to ralph, move it farther.

        camvec = self.ralph.getPos() - base.camera.getPos()
        camvec.setZ(0)
        camdist = camvec.length()
        camvec.normalize()
        if (camdist > 10.0):
            base.camera.setPos(base.camera.getPos() + camvec * (camdist - 10))
            camdist = 10.0
        if (camdist < 5.0):
            base.camera.setPos(base.camera.getPos() - camvec * (5 - camdist))
            camdist = 5.0

        # Now check for collisions.

        self.cTrav.traverse(render)

        # Adjust ralph's Z coordinate.  If ralph's ray hit terrain,
        # update his Z. If it hit anything else, or didn't hit anything, put
        # him back where he was last frame.

        entries = []
        for i in range(self.ralphGroundHandler.getNumEntries()):
            entry = self.ralphGroundHandler.getEntry(i)
            entries.append(entry)
        entries.sort(lambda x, y: cmp(
            y.getSurfacePoint(render).getZ(),
            x.getSurfacePoint(render).getZ()))
        if (len(entries) > 0) and (entries[0].getIntoNode().getName()
                                   == "terrain"):
            self.ralph.setZ(entries[0].getSurfacePoint(render).getZ())
        else:
            self.ralph.setPos(startpos)

        # Keep the camera at one foot above the terrain,
        # or two feet above ralph, whichever is greater.

        entries = []
        for i in range(self.camGroundHandler.getNumEntries()):
            entry = self.camGroundHandler.getEntry(i)
            entries.append(entry)
        entries.sort(lambda x, y: cmp(
            y.getSurfacePoint(render).getZ(),
            x.getSurfacePoint(render).getZ()))
        if (len(entries) > 0) and (entries[0].getIntoNode().getName()
                                   == "terrain"):
            base.camera.setZ(entries[0].getSurfacePoint(render).getZ() + 1.0)
        if (base.camera.getZ() < self.ralph.getZ() + 2.0):
            base.camera.setZ(self.ralph.getZ() + 2.0)

        # The camera should look in ralph's direction,
        # but it should also try to stay horizontal, so look at
        # a floater which hovers above ralph's head.

        self.floater.setPos(self.ralph.getPos())
        self.floater.setZ(self.ralph.getZ() + 2.0)
        base.camera.lookAt(self.floater)

        return Task.cont
Esempio n. 21
0
    def __init__(self):
        base.disableMouse()
        camera.setPos(0, -50, 0)
        
        # Check video card capabilities.

        if (base.win.getGsg().getSupportsBasicShaders() == 0):
            addTitle("Toon Shader: Video driver reports that shaders are not supported.")
            return

        # Post the instructions.
        self.title = addTitle("Panda3D: Tutorial - Toon Shading with Normals-Based Inking")
        self.inst1 = addInstructions(0.95,"ESC: Quit")
        self.inst2 = addInstructions(0.90,"Up/Down: Increase/Decrease Line Thickness")
        self.inst3 = addInstructions(0.85,"Left/Right: Decrease/Increase Line Darkness")
        self.inst4 = addInstructions(0.80,"V: View the render-to-texture results")

        # This shader's job is to render the model with discrete lighting
        # levels.  The lighting calculations built into the shader assume
        # a single nonattenuating point light.

        tempnode = NodePath(PandaNode("temp node"))
        tempnode.setShader(Shader.load(os.path.join(PANDA_SHADER_PATH, \
          "samples/cartoon/cartoon_lighting.sha")))
        base.cam.node().setInitialState(tempnode.getState())
        
        # This is the object that represents the single "light", as far
        # the shader is concerned.  It's not a real Panda3D LightNode, but
        # the shader doesn't care about that.

        light = render.attachNewNode("light")
        light.setPos(30,-50,0)
                
        # this call puts the light's nodepath into the render state.
        # this enables the shader to access this light by name.

        render.setShaderInput("light", light)

        # The "normals buffer" will contain a picture of the model colorized
        # so that the color of the model is a representation of the model's
        # normal at that point.

        normalsBuffer=base.win.makeTextureBuffer("normalsBuffer", 0, 0)
        normalsBuffer.setClearColor(Vec4(0.5,0.5,0.5,1))
        self.normalsBuffer=normalsBuffer
        normalsCamera=base.makeCamera(normalsBuffer, lens=base.cam.node().getLens())
        normalsCamera.node().setScene(render)
        tempnode = NodePath(PandaNode("temp node"))
        tempnode.setShader(Shader.load(os.path.join(PANDA_SHADER_PATH, \
          "samples/cartoon/cartoon_normal.sha")))
        normalsCamera.node().setInitialState(tempnode.getState())

        #what we actually do to put edges on screen is apply them as a texture to 
        #a transparent screen-fitted card

        drawnScene=normalsBuffer.getTextureCard()
        drawnScene.setTransparency(1)
        drawnScene.setColor(1,1,1,0)
        drawnScene.reparentTo(render2d)
        self.drawnScene = drawnScene

        # this shader accepts, as input, the picture from the normals buffer.
        # it compares each adjacent pixel, looking for discontinuities.
        # wherever a discontinuity exists, it emits black ink.
                
        self.separation = 0.001
        self.cutoff = 0.3
        inkGen=Shader.load(os.path.join(PANDA_SHADER_PATH, \
          "samples/cartoon/cartoon_ink.sha"))
        drawnScene.setShader(inkGen)
        drawnScene.setShaderInput("separation", Vec4(self.separation,0,self.separation,0));
        drawnScene.setShaderInput("cutoff", Vec4(self.cutoff,self.cutoff,self.cutoff,self.cutoff));
        
        # Panda contains a built-in viewer that lets you view the results of
        # your render-to-texture operations.  This code configures the viewer.

        self.accept("v", base.bufferViewer.toggleEnable)
        self.accept("V", base.bufferViewer.toggleEnable)
        base.bufferViewer.setPosition("llcorner")

        # Load a dragon model and animate it.

        self.character=Actor()
        self.character.loadModel('models/samples/cartoon/nik_dragon')
        self.character.reparentTo(render)
        self.character.loadAnims({'win': 'models/samples/cartoon/nik_dragon'})
        self.character.loop('win')
        self.character.hprInterval(15, Point3(360, 0,0)).loop()

        # these allow you to change cartooning parameters in realtime

        self.accept("escape", sys.exit, [0])
        self.accept("arrow_up", self.increaseSeparation)
        self.accept("arrow_down", self.decreaseSeparation)
        self.accept("arrow_left", self.increaseCutoff)
        self.accept("arrow_right", self.decreaseCutoff)
Esempio n. 22
0
    def __init__(self):

        # Preliminary capabilities check.

        if (base.win.getGsg().getSupportsBasicShaders() == 0):
            self.t = addTitle(
                "Firefly Demo: Video driver reports that shaders are not supported."
            )
            return
        if (base.win.getGsg().getSupportsDepthTexture() == 0):
            self.t = addTitle(
                "Firefly Demo: Video driver reports that depth textures are not supported."
            )
            return

        # This algorithm uses two offscreen buffers, one of which has
        # an auxiliary bitplane, and the offscreen buffers share a single
        # depth buffer.  This is a heck of a complicated buffer setup.

        self.modelbuffer = self.makeFBO("model buffer", 1)
        self.lightbuffer = self.makeFBO("light buffer", 0)

        # Creation of a high-powered buffer can fail, if the graphics card
        # doesn't support the necessary OpenGL extensions.

        if (self.modelbuffer == None) or (self.lightbuffer == None):
            self.t = addTitle(
                "Toon Shader: Video driver does not support multiple render targets"
            )
            return

        # Create four render textures: depth, normal, albedo, and final.
        # attach them to the various bitplanes of the offscreen buffers.

        self.texDepth = Texture()
        self.texDepth.setFormat(Texture.FDepthStencil)
        self.texAlbedo = Texture()
        self.texNormal = Texture()
        self.texFinal = Texture()

        self.modelbuffer.addRenderTexture(self.texDepth,
                                          GraphicsOutput.RTMBindOrCopy,
                                          GraphicsOutput.RTPDepthStencil)
        self.modelbuffer.addRenderTexture(self.texAlbedo,
                                          GraphicsOutput.RTMBindOrCopy,
                                          GraphicsOutput.RTPColor)
        self.modelbuffer.addRenderTexture(self.texNormal,
                                          GraphicsOutput.RTMBindOrCopy,
                                          GraphicsOutput.RTPAuxRgba0)

        self.lightbuffer.addRenderTexture(self.texFinal,
                                          GraphicsOutput.RTMBindOrCopy,
                                          GraphicsOutput.RTPColor)

        # Set the near and far clipping planes.

        base.cam.node().getLens().setNear(50.0)
        base.cam.node().getLens().setFar(500.0)
        lens = base.cam.node().getLens()

        # This algorithm uses three cameras: one to render the models into the
        # model buffer, one to render the lights into the light buffer, and
        # one to render "plain" stuff (non-deferred shaded) stuff into the light
        # buffer.  Each camera has a bitmask to identify it.

        self.modelMask = 1
        self.lightMask = 2
        self.plainMask = 4

        self.modelcam = base.makeCamera(self.modelbuffer,
                                        lens=lens,
                                        scene=render,
                                        mask=self.modelMask)
        self.lightcam = base.makeCamera(self.lightbuffer,
                                        lens=lens,
                                        scene=render,
                                        mask=self.lightMask)
        self.plaincam = base.makeCamera(self.lightbuffer,
                                        lens=lens,
                                        scene=render,
                                        mask=self.plainMask)

        # Panda's main camera is not used.

        base.cam.node().setActive(0)

        # Take explicit control over the order in which the three
        # buffers are rendered.

        self.modelbuffer.setSort(1)
        self.lightbuffer.setSort(2)
        base.win.setSort(3)

        # Within the light buffer, control the order of the two cams.

        self.lightcam.node().getDisplayRegion(0).setSort(1)
        self.plaincam.node().getDisplayRegion(0).setSort(2)

        # By default, panda usually clears the screen before every
        # camera and before every window.  Tell it not to do that.
        # Then, tell it specifically when to clear and what to clear.

        self.modelcam.node().getDisplayRegion(0).disableClears()
        self.lightcam.node().getDisplayRegion(0).disableClears()
        self.plaincam.node().getDisplayRegion(0).disableClears()
        base.cam.node().getDisplayRegion(0).disableClears()
        base.cam2d.node().getDisplayRegion(0).disableClears()
        self.modelbuffer.disableClears()
        base.win.disableClears()

        self.modelbuffer.setClearColorActive(1)
        self.modelbuffer.setClearDepthActive(1)
        self.lightbuffer.setClearColorActive(1)
        self.lightbuffer.setClearColor(Vec4(0, 0, 0, 1))

        # Miscellaneous stuff.

        base.disableMouse()
        base.camera.setPos(-9.112, -211.077, 46.951)
        base.camera.setHpr(0, -7.5, 2.4)
        base.setBackgroundColor(Vec4(0, 0, 0, 0))
        random.seed()

        # Calculate the projection parameters for the final shader.
        # The math here is too complex to explain in an inline comment,
        # I've put in a full explanation into the HTML intro.

        proj = base.cam.node().getLens().getProjectionMat()
        proj_x = 0.5 * proj.getCell(3, 2) / proj.getCell(0, 0)
        proj_y = 0.5 * proj.getCell(3, 2)
        proj_z = 0.5 * proj.getCell(3, 2) / proj.getCell(2, 1)
        proj_w = -0.5 - 0.5 * proj.getCell(1, 2)

        # Configure the render state of the model camera.

        tempnode = NodePath(PandaNode("temp node"))
        tempnode.setAttrib(
            AlphaTestAttrib.make(RenderAttrib.MGreaterEqual, 0.5))
        tempnode.setShader(Shader.load(os.path.join(PANDA_SHADER_PATH, \
          "samples/fireflies/fireflies_model.sha")))
        tempnode.setAttrib(DepthTestAttrib.make(RenderAttrib.MLessEqual))
        self.modelcam.node().setInitialState(tempnode.getState())

        # Configure the render state of the light camera.

        tempnode = NodePath(PandaNode("temp node"))
        tempnode.setShader(Shader.load(os.path.join(PANDA_SHADER_PATH, \
          "samples/fireflies/fireflies_lighting.sha")))
        tempnode.setShaderInput("texnormal", self.texNormal)
        tempnode.setShaderInput("texalbedo", self.texAlbedo)
        tempnode.setShaderInput("texdepth", self.texDepth)
        tempnode.setShaderInput("proj", Vec4(proj_x, proj_y, proj_z, proj_w))
        tempnode.setAttrib(
            ColorBlendAttrib.make(ColorBlendAttrib.MAdd, ColorBlendAttrib.OOne,
                                  ColorBlendAttrib.OOne))
        tempnode.setAttrib(
            CullFaceAttrib.make(CullFaceAttrib.MCullCounterClockwise))
        # The next line causes problems on Linux.
        #tempnode.setAttrib(DepthTestAttrib.make(RenderAttrib.MGreaterEqual))
        tempnode.setAttrib(DepthWriteAttrib.make(DepthWriteAttrib.MOff))
        self.lightcam.node().setInitialState(tempnode.getState())

        # Configure the render state of the plain camera.

        rs = RenderState.makeEmpty()
        self.plaincam.node().setInitialState(rs)

        # Clear any render attribs on the root node. This is necessary
        # because by default, panda assigns some attribs to the root
        # node.  These default attribs will override the
        # carefully-configured render attribs that we just attached
        # to the cameras.  The simplest solution is to just clear
        # them all out.

        render.setState(RenderState.makeEmpty())

        # My artist created a model in which some of the polygons
        # don't have textures.  This confuses the shader I wrote.
        # This little hack guarantees that everything has a texture.

        white = loader.loadTexture("models/samples/fireflies/white.jpg")
        render.setTexture(white, 0)

        # Create two subroots, to help speed cull traversal.

        self.lightroot = NodePath(PandaNode("lightroot"))
        self.lightroot.reparentTo(render)
        self.modelroot = NodePath(PandaNode("modelroot"))
        self.modelroot.reparentTo(render)
        self.lightroot.hide(BitMask32(self.modelMask))
        self.modelroot.hide(BitMask32(self.lightMask))
        self.modelroot.hide(BitMask32(self.plainMask))

        # Load the model of a forest. Make it visible to the model camera.

        self.forest = NodePath(PandaNode("Forest Root"))
        self.forest.reparentTo(render)

        loader.loadModel( \
          "models/samples/fireflies/background").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage01").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage02").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage03").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage04").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage05").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage06").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage07").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage08").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage09").reparentTo(self.forest)
        self.forest.hide(BitMask32(self.lightMask | self.plainMask))

        # Cause the final results to be rendered into the main window on a card.

        cm = CardMaker("card")
        cm.setFrameFullscreenQuad()
        self.card = render2d.attachNewNode(cm.generate())
        self.card.setTexture(self.texFinal)

        # Post the instructions.

        self.title = addTitle(
            "Panda3D: Tutorial - Fireflies using Deferred Shading")
        self.inst1 = addInstructions(0.95, "ESC: Quit")
        self.inst2 = addInstructions(
            0.90, "Up/Down: More / Fewer Fireflies (Count: unknown)")
        self.inst3 = addInstructions(
            0.85, "Right/Left: Bigger / Smaller Fireflies (Radius: unknown)")
        self.inst4 = addInstructions(0.80,
                                     "V: View the render-to-texture results")

        # Panda contains a built-in viewer that lets you view the results of
        # your render-to-texture operations.  This code configures the viewer.

        base.bufferViewer.setPosition("llcorner")
        base.bufferViewer.setCardSize(0, 0.40)
        base.bufferViewer.setLayout("vline")
        self.toggleCards()
        self.toggleCards()

        # Firefly parameters

        self.fireflies = []
        self.sequences = []
        self.scaleseqs = []
        self.glowspheres = []
        self.fireflysize = 1.0
        self.spheremodel = loader.loadModel("models/misc/sphere.flt")
        self.setFireflySize(25.0)
        while (len(self.fireflies) < 5):
            self.addFirefly()
        self.updateReadout()

        # these allow you to change parameters in realtime

        self.accept("escape", sys.exit, [0])
        self.accept("arrow_up", self.incFireflyCount, [1.1111111])
        self.accept("arrow_down", self.decFireflyCount, [0.9000000])
        self.accept("arrow_right", self.setFireflySize, [1.1111111])
        self.accept("arrow_left", self.setFireflySize, [0.9000000])
        self.accept("v", self.toggleCards)
        self.accept("V", self.toggleCards)

        self.nextadd = 0
        taskMgr.add(self.spawnTask, "spawner")
Esempio n. 23
0
    def __init__(self):
        
        # create a texture into which we can copy the main window.

        self.tex = Texture()
        self.tex.setMinfilter(Texture.FTLinear)
        base.win.addRenderTexture(self.tex, GraphicsOutput.RTMTriggeredCopyTexture)
        
        # Create another 2D camera. Tell it to render before the main camera.

        self.backcam = base.makeCamera2d(base.win, sort=-10)
        self.background = NodePath("background")
        self.backcam.reparentTo(self.background)
        self.background.setDepthTest(0)
        self.background.setDepthWrite(0)
        self.backcam.node().getDisplayRegion(0).setClearDepthActive(0)

        # Obtain two texture cards. One renders before the dragon, the other after.
        self.bcard = base.win.getTextureCard()
        self.bcard.reparentTo(self.background)
        self.bcard.setTransparency(1)
        self.fcard = base.win.getTextureCard()
        self.fcard.reparentTo(render2d)
        self.fcard.setTransparency(1)

        # Initialize one of the nice effects.
        self.chooseEffectGhost()
        
        # Add the task that initiates the screenshots.
        taskMgr.add(self.takeSnapShot, "takeSnapShot")

        # Create some black squares on top of which we will
        # place the instructions.
        blackmaker = CardMaker("blackmaker")
        blackmaker.setColor(0,0,0,1)
        blackmaker.setFrame(-1.00, -0.50, 0.65, 1.00)
        instcard = NodePath(blackmaker.generate())
        instcard.reparentTo(render2d)
        blackmaker.setFrame(-0.5, 0.5, -1.00, -0.85)
        titlecard = NodePath(blackmaker.generate())
        titlecard.reparentTo(render2d)

        # Panda does its best to hide the differences between DirectX and
        # OpenGL.  But there are a few differences that it cannot hide.
        # One such difference is that when OpenGL copies from a
        # visible window to a texture, it gets it right-side-up.  When
        # DirectX does it, it gets it upside-down.  There is nothing panda
        # can do to compensate except to expose a flag and let the 
        # application programmer deal with it.  You should only do this
        # in the rare event that you're copying from a visible window
        # to a texture.

        if (base.win.getGsg().getCopyTextureInverted()):
            print "Copy texture is inverted."
            self.bcard.setScale(1,1,-1)
            self.fcard.setScale(1,1,-1)

        # Put up the instructions
        title = OnscreenText(text="Panda3D: Tutorial - Motion Trails",
                             style=1, fg=(1,1,1,1), font = font,
                             pos=(0,-0.95), scale = .07)
        
        instr0 = addInstructions(0.95, "Press ESC to exit")
        instr1 = addInstructions(0.90, "Press 1: Ghost effect")
        instr2 = addInstructions(0.85, "Press 2: PaintBrush effect")
        instr3 = addInstructions(0.80, "Press 3: Double Vision effect")
        instr4 = addInstructions(0.75, "Press 4: Wings of Blue effect")
        instr5 = addInstructions(0.70, "Press 5: Whirlpool effect")
        
        # enable the key events
        self.accept("escape", sys.exit, [0])
        self.accept("1", self.chooseEffectGhost)
        self.accept("2", self.chooseEffectPaintBrush)
        self.accept("3", self.chooseEffectDoubleVision)
        self.accept("4", self.chooseEffectWingsOfBlue)
        self.accept("5", self.chooseEffectWhirlpool)
Esempio n. 24
0
class FireflyDemo(DirectObject):
    def __init__(self):

        # Preliminary capabilities check.

        if (base.win.getGsg().getSupportsBasicShaders()==0):
            self.t=addTitle("Firefly Demo: Video driver reports that shaders are not supported.")
            return
        if (base.win.getGsg().getSupportsDepthTexture()==0):
            self.t=addTitle("Firefly Demo: Video driver reports that depth textures are not supported.")
            return
        
        # This algorithm uses two offscreen buffers, one of which has
        # an auxiliary bitplane, and the offscreen buffers share a single
        # depth buffer.  This is a heck of a complicated buffer setup.

        self.modelbuffer = self.makeFBO("model buffer",1)
        self.lightbuffer = self.makeFBO("light buffer",0)
        
        # Creation of a high-powered buffer can fail, if the graphics card
        # doesn't support the necessary OpenGL extensions.

        if (self.modelbuffer == None) or (self.lightbuffer == None):
            self.t=addTitle("Toon Shader: Video driver does not support multiple render targets")
            return

        # Create four render textures: depth, normal, albedo, and final.
        # attach them to the various bitplanes of the offscreen buffers.

        self.texDepth = Texture()
        self.texDepth.setFormat(Texture.FDepthStencil)
        self.texAlbedo = Texture()
        self.texNormal = Texture()
        self.texFinal = Texture()

        self.modelbuffer.addRenderTexture(self.texDepth,  GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPDepthStencil)
        self.modelbuffer.addRenderTexture(self.texAlbedo, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPColor)
        self.modelbuffer.addRenderTexture(self.texNormal, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPAuxRgba0)

        self.lightbuffer.addRenderTexture(self.texFinal,  GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPColor)

        # Set the near and far clipping planes.
        
        base.cam.node().getLens().setNear(50.0)
        base.cam.node().getLens().setFar(500.0)
        lens = base.cam.node().getLens()

        # This algorithm uses three cameras: one to render the models into the
        # model buffer, one to render the lights into the light buffer, and
        # one to render "plain" stuff (non-deferred shaded) stuff into the light
        # buffer.  Each camera has a bitmask to identify it.

        self.modelMask = 1
        self.lightMask = 2
        self.plainMask = 4
        
        self.modelcam=base.makeCamera(self.modelbuffer, lens=lens, scene=render, mask=self.modelMask)
        self.lightcam=base.makeCamera(self.lightbuffer, lens=lens, scene=render, mask=self.lightMask)
        self.plaincam=base.makeCamera(self.lightbuffer, lens=lens, scene=render, mask=self.plainMask)

        # Panda's main camera is not used.
        
        base.cam.node().setActive(0)
        
        # Take explicit control over the order in which the three
        # buffers are rendered.

        self.modelbuffer.setSort(1)
        self.lightbuffer.setSort(2)
        base.win.setSort(3)

        # Within the light buffer, control the order of the two cams.

        self.lightcam.node().getDisplayRegion(0).setSort(1)
        self.plaincam.node().getDisplayRegion(0).setSort(2)

        # By default, panda usually clears the screen before every
        # camera and before every window.  Tell it not to do that.
        # Then, tell it specifically when to clear and what to clear.

        self.modelcam.node().getDisplayRegion(0).disableClears()
        self.lightcam.node().getDisplayRegion(0).disableClears()
        self.plaincam.node().getDisplayRegion(0).disableClears()
        base.cam.node().getDisplayRegion(0).disableClears()
        base.cam2d.node().getDisplayRegion(0).disableClears()
        self.modelbuffer.disableClears()
        base.win.disableClears()

        self.modelbuffer.setClearColorActive(1)
        self.modelbuffer.setClearDepthActive(1)
        self.lightbuffer.setClearColorActive(1)
        self.lightbuffer.setClearColor(Vec4(0,0,0,1))

        # Miscellaneous stuff.
        
        base.disableMouse()
        base.camera.setPos(-9.112,-211.077,46.951)
        base.camera.setHpr(0, -7.5, 2.4)
        base.setBackgroundColor(Vec4(0,0,0,0))
        random.seed()

        # Calculate the projection parameters for the final shader.
        # The math here is too complex to explain in an inline comment,
        # I've put in a full explanation into the HTML intro.

        proj = base.cam.node().getLens().getProjectionMat()
        proj_x = 0.5 * proj.getCell(3,2) / proj.getCell(0,0)
        proj_y = 0.5 * proj.getCell(3,2)
        proj_z = 0.5 * proj.getCell(3,2) / proj.getCell(2,1)
        proj_w = -0.5 - 0.5*proj.getCell(1,2)
        
        # Configure the render state of the model camera.
        
        tempnode = NodePath(PandaNode("temp node"))
        tempnode.setAttrib(AlphaTestAttrib.make(RenderAttrib.MGreaterEqual, 0.5))
        tempnode.setShader(Shader.load(os.path.join(PANDA_SHADER_PATH, \
          "samples/fireflies/fireflies_model.sha")))
        tempnode.setAttrib(DepthTestAttrib.make(RenderAttrib.MLessEqual))
        self.modelcam.node().setInitialState(tempnode.getState())
        
        # Configure the render state of the light camera.

        tempnode = NodePath(PandaNode("temp node"))
        tempnode.setShader(Shader.load(os.path.join(PANDA_SHADER_PATH, \
          "samples/fireflies/fireflies_lighting.sha")))
        tempnode.setShaderInput("texnormal",self.texNormal)
        tempnode.setShaderInput("texalbedo",self.texAlbedo)
        tempnode.setShaderInput("texdepth",self.texDepth)
        tempnode.setShaderInput("proj",Vec4(proj_x,proj_y,proj_z,proj_w))
        tempnode.setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.MAdd, ColorBlendAttrib.OOne, ColorBlendAttrib.OOne))
        tempnode.setAttrib(CullFaceAttrib.make(CullFaceAttrib.MCullCounterClockwise))
        # The next line causes problems on Linux.
        #tempnode.setAttrib(DepthTestAttrib.make(RenderAttrib.MGreaterEqual))
        tempnode.setAttrib(DepthWriteAttrib.make(DepthWriteAttrib.MOff))
        self.lightcam.node().setInitialState(tempnode.getState())

        # Configure the render state of the plain camera.
        
        rs = RenderState.makeEmpty()
        self.plaincam.node().setInitialState(rs)
        
        # Clear any render attribs on the root node. This is necessary
        # because by default, panda assigns some attribs to the root
        # node.  These default attribs will override the
        # carefully-configured render attribs that we just attached
        # to the cameras.  The simplest solution is to just clear
        # them all out.

        render.setState(RenderState.makeEmpty())

        # My artist created a model in which some of the polygons
        # don't have textures.  This confuses the shader I wrote. 
        # This little hack guarantees that everything has a texture.
        
        white = loader.loadTexture("models/samples/fireflies/white.jpg")
        render.setTexture(white,0)

        # Create two subroots, to help speed cull traversal.
        
        self.lightroot = NodePath(PandaNode("lightroot"))
        self.lightroot.reparentTo(render)
        self.modelroot = NodePath(PandaNode("modelroot"))
        self.modelroot.reparentTo(render) 
        self.lightroot.hide(BitMask32(self.modelMask))
        self.modelroot.hide(BitMask32(self.lightMask))
        self.modelroot.hide(BitMask32(self.plainMask))

        # Load the model of a forest. Make it visible to the model camera.

        self.forest=NodePath(PandaNode("Forest Root"))
        self.forest.reparentTo(render)

        loader.loadModel( \
          "models/samples/fireflies/background").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage01").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage02").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage03").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage04").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage05").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage06").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage07").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage08").reparentTo(self.forest)
        loader.loadModel( \
          "models/samples/fireflies/foliage09").reparentTo(self.forest)
        self.forest.hide(BitMask32(self.lightMask | self.plainMask))

        # Cause the final results to be rendered into the main window on a card.
        
        cm = CardMaker("card")
        cm.setFrameFullscreenQuad()
        self.card = render2d.attachNewNode(cm.generate())
        self.card.setTexture(self.texFinal)
        
        # Post the instructions.

        self.title = addTitle("Panda3D: Tutorial - Fireflies using Deferred Shading")
        self.inst1 = addInstructions(0.95,"ESC: Quit")
        self.inst2 = addInstructions(0.90,"Up/Down: More / Fewer Fireflies (Count: unknown)")
        self.inst3 = addInstructions(0.85,"Right/Left: Bigger / Smaller Fireflies (Radius: unknown)")
        self.inst4 = addInstructions(0.80,"V: View the render-to-texture results")

        # Panda contains a built-in viewer that lets you view the results of
        # your render-to-texture operations.  This code configures the viewer.

        base.bufferViewer.setPosition("llcorner")
        base.bufferViewer.setCardSize(0,0.40)
        base.bufferViewer.setLayout("vline")
        self.toggleCards()
        self.toggleCards()

        # Firefly parameters

        self.fireflies = []
        self.sequences = []
        self.scaleseqs = []
        self.glowspheres = []
        self.fireflysize = 1.0
        self.spheremodel = loader.loadModel("models/misc/sphere.flt")
        self.setFireflySize(25.0)
        while (len(self.fireflies)<5): self.addFirefly()
        self.updateReadout()

        # these allow you to change parameters in realtime

        self.accept("escape", sys.exit, [0])
        self.accept("arrow_up",   self.incFireflyCount, [1.1111111])
        self.accept("arrow_down", self.decFireflyCount, [0.9000000])
        self.accept("arrow_right", self.setFireflySize, [1.1111111])
        self.accept("arrow_left",  self.setFireflySize, [0.9000000])
        self.accept("v", self.toggleCards)
        self.accept("V", self.toggleCards)

        self.nextadd = 0
        taskMgr.add(self.spawnTask, "spawner")

    def makeFBO(self, name, auxrgba):
        # This routine creates an offscreen buffer.  All the complicated
        # parameters are basically demanding capabilities from the offscreen
        # buffer - we demand that it be able to render to texture on every
        # bitplane, that it can support aux bitplanes, that it track
        # the size of the host window, that it can render to texture
        # cumulatively, and so forth.
        winprops = WindowProperties()
        props = FrameBufferProperties()
        props.setRgbColor(1)
        props.setAlphaBits(1)
        props.setDepthBits(1)
        props.setAuxRgba(auxrgba)
        return base.graphicsEngine.makeOutput(
             base.pipe, "model buffer", -2,
             props, winprops,
             GraphicsPipe.BFSizeTrackHost | GraphicsPipe.BFCanBindEvery | 
             GraphicsPipe.BFRttCumulative | GraphicsPipe.BFRefuseWindow,
             base.win.getGsg(), base.win)

    def addFirefly(self):
        pos1 = Point3(random.uniform(-50, 50), random.uniform(-100, 150), random.uniform(-10,80))
        dir = Vec3(random.uniform(-1, 1), random.uniform(-1, 1), random.uniform(-1, 1))
        dir.normalize()
        pos2 = pos1 + (dir*20)
        fly = self.lightroot.attachNewNode(PandaNode("fly"))
        glow = fly.attachNewNode(PandaNode("glow"))
        dot  = fly.attachNewNode(PandaNode("dot"))
        color_r = random.uniform(0.7,1.0)
        color_g = 1.0
        color_b = 0.8
        fly.setShaderInput("lightcolor", color_r, color_g, color_b, 1.0)
        int1 = fly.posInterval(random.uniform(7,12), pos1, pos2)
        int2 = fly.posInterval(random.uniform(7,12), pos2, pos1)
        si1 = fly.scaleInterval(random.uniform(0.8,1.5), Point3(0.2,0.2,0.2), Point3(0.2,0.2,0.2))
        si2 = fly.scaleInterval(random.uniform(1.5,0.8), Point3(1.0,1.0,1.0), Point3(0.2,0.2,0.2))
        si3 = fly.scaleInterval(random.uniform(1.0,2.0), Point3(0.2,0.2,0.2), Point3(1.0,1.0,1.0))
        siseq = Sequence(si1, si2, si3)
        siseq.loop()
        siseq.setT(random.uniform(0,1000))
        seq = Sequence(int1, int2)
        seq.loop()
        self.spheremodel.instanceTo(glow)
        self.spheremodel.instanceTo(dot)
        glow.setScale(self.fireflysize*1.1)
        glow.hide(BitMask32(self.modelMask | self.plainMask))
        dot.setScale(0.6)
        dot.hide(BitMask32(self.modelMask | self.lightMask))
        dot.setColor(color_r, color_g, color_b, 1.0)
        self.fireflies.append(fly)
        self.sequences.append(seq)
        self.glowspheres.append(glow)
        self.scaleseqs.append(siseq)

    def updateReadout(self):
        self.inst2.destroy()
        self.inst2 = addInstructions(0.90,"Up/Down: More / Fewer Fireflies (Currently: "+str(len(self.fireflies))+")")
        self.inst3.destroy()
        self.inst3 = addInstructions(0.85,"Right/Left: Bigger / Smaller Fireflies (Radius: "+str(self.fireflysize)+" ft)")

    def toggleCards(self):
        base.bufferViewer.toggleEnable()
        # When the cards are not visible, I also disable the color clear.
        # This color-clear is actually not necessary, the depth-clear is
        # sufficient for the purposes of the algorithm.
        if (base.bufferViewer.isEnabled()):
            self.modelbuffer.setClearColorActive(1)
        else:
            self.modelbuffer.setClearColorActive(0)

    def incFireflyCount(self, scale):
        n = int((len(self.fireflies) * scale) + 1)
        while (n > len(self.fireflies)):
            self.addFirefly()
        self.updateReadout()

    def decFireflyCount(self, scale):
        n = int(len(self.fireflies) * scale)
        if (n < 1): n=1
        while (len(self.fireflies) > n):
            self.glowspheres.pop()
            self.sequences.pop().finish()
            self.scaleseqs.pop().finish()
            self.fireflies.pop().removeNode()
        self.updateReadout()

    def setFireflySize(self, n):
        n = n * self.fireflysize
        self.fireflysize = n
        for x in self.glowspheres:
            x.setScale(self.fireflysize * 1.1)
        self.updateReadout()

    def spawnTask(self, task):
        if task.time > self.nextadd:
            self.nextadd = task.time + 1.0
            if (len(self.fireflies) < 300):
                self.incFireflyCount(1.03)
        return Task.cont
Esempio n. 25
0
    def __init__(self):
        base.disableMouse()
        base.cam.node().getLens().setNear(10.0)
        base.cam.node().getLens().setFar(200.0)
        camera.setPos(0, -50, 0)

        # Check video card capabilities.

        if (base.win.getGsg().getSupportsBasicShaders() == 0):
            addTitle(
                "Toon Shader: Video driver reports that shaders are not supported."
            )
            return

        # Enable a 'light ramp' - this discretizes the lighting,
        # which is half of what makes a model look like a cartoon.
        # Light ramps only work if shader generation is enabled,
        # so we call 'setShaderAuto'.

        tempnode = NodePath(PandaNode("temp node"))
        tempnode.setAttrib(LightRampAttrib.makeSingleThreshold(0.5, 0.4))
        tempnode.setShaderAuto()
        base.cam.node().setInitialState(tempnode.getState())

        # Use class 'CommonFilters' to enable a cartoon inking filter.
        # This can fail if the video card is not powerful enough, if so,
        # display an error and exit.

        self.separation = 1  # Pixels
        self.filters = CommonFilters(base.win, base.cam)
        filterok = self.filters.setCartoonInk(separation=self.separation)
        if (filterok == False):
            addTitle(
                "Toon Shader: Video card not powerful enough to do image postprocessing"
            )
            return

        # Post the instructions.

        self.title = addTitle(
            "Panda3D: Tutorial - Toon Shading with Normals-Based Inking")
        self.inst1 = addInstructions(0.95, "ESC: Quit")
        self.inst2 = addInstructions(
            0.90, "Up/Down: Increase/Decrease Line Thickness")
        self.inst3 = addInstructions(0.85,
                                     "V: View the render-to-texture results")

        # Load a dragon model and animate it.

        self.character = Actor()
        self.character.loadModel('models/samples/cartoon/nik_dragon')
        self.character.reparentTo(render)
        self.character.loadAnims({'win': 'models/samples/cartoon/nik_dragon'})
        self.character.loop('win')
        self.character.hprInterval(15, Point3(360, 0, 0)).loop()

        # Create a non-attenuating point light and an ambient light.

        plightnode = PointLight("point light")
        plightnode.setAttenuation(Vec3(1, 0, 0))
        plight = render.attachNewNode(plightnode)
        plight.setPos(30, -50, 0)
        alightnode = AmbientLight("ambient light")
        alightnode.setColor(Vec4(0.8, 0.8, 0.8, 1))
        alight = render.attachNewNode(alightnode)
        render.setLight(alight)
        render.setLight(plight)

        # Panda contains a built-in viewer that lets you view the
        # results of all render-to-texture operations.  This lets you
        # see what class CommonFilters is doing behind the scenes.

        self.accept("v", base.bufferViewer.toggleEnable)
        self.accept("V", base.bufferViewer.toggleEnable)
        base.bufferViewer.setPosition("llcorner")
        self.accept("s", self.filters.manager.resizeBuffers)

        # These allow you to change cartooning parameters in realtime

        self.accept("escape", sys.exit, [0])
        self.accept("arrow_up", self.increaseSeparation)
        self.accept("arrow_down", self.decreaseSeparation)
Esempio n. 26
0
    def __init__(self):

        # create a texture into which we can copy the main window.

        self.tex = Texture()
        self.tex.setMinfilter(Texture.FTLinear)
        base.win.addRenderTexture(self.tex,
                                  GraphicsOutput.RTMTriggeredCopyTexture)

        # Create another 2D camera. Tell it to render before the main camera.

        self.backcam = base.makeCamera2d(base.win, sort=-10)
        self.background = NodePath("background")
        self.backcam.reparentTo(self.background)
        self.background.setDepthTest(0)
        self.background.setDepthWrite(0)
        self.backcam.node().getDisplayRegion(0).setClearDepthActive(0)

        # Obtain two texture cards. One renders before the dragon, the other after.
        self.bcard = base.win.getTextureCard()
        self.bcard.reparentTo(self.background)
        self.bcard.setTransparency(1)
        self.fcard = base.win.getTextureCard()
        self.fcard.reparentTo(render2d)
        self.fcard.setTransparency(1)

        # Initialize one of the nice effects.
        self.chooseEffectGhost()

        # Add the task that initiates the screenshots.
        taskMgr.add(self.takeSnapShot, "takeSnapShot")

        # Create some black squares on top of which we will
        # place the instructions.
        blackmaker = CardMaker("blackmaker")
        blackmaker.setColor(0, 0, 0, 1)
        blackmaker.setFrame(-1.00, -0.50, 0.65, 1.00)
        instcard = NodePath(blackmaker.generate())
        instcard.reparentTo(render2d)
        blackmaker.setFrame(-0.5, 0.5, -1.00, -0.85)
        titlecard = NodePath(blackmaker.generate())
        titlecard.reparentTo(render2d)

        # Panda does its best to hide the differences between DirectX and
        # OpenGL.  But there are a few differences that it cannot hide.
        # One such difference is that when OpenGL copies from a
        # visible window to a texture, it gets it right-side-up.  When
        # DirectX does it, it gets it upside-down.  There is nothing panda
        # can do to compensate except to expose a flag and let the
        # application programmer deal with it.  You should only do this
        # in the rare event that you're copying from a visible window
        # to a texture.

        if (base.win.getGsg().getCopyTextureInverted()):
            print "Copy texture is inverted."
            self.bcard.setScale(1, 1, -1)
            self.fcard.setScale(1, 1, -1)

        # Put up the instructions
        title = OnscreenText(text="Panda3D: Tutorial - Motion Trails",
                             style=1,
                             fg=(1, 1, 1, 1),
                             font=font,
                             pos=(0, -0.95),
                             scale=.07)

        instr0 = addInstructions(0.95, "Press ESC to exit")
        instr1 = addInstructions(0.90, "Press 1: Ghost effect")
        instr2 = addInstructions(0.85, "Press 2: PaintBrush effect")
        instr3 = addInstructions(0.80, "Press 3: Double Vision effect")
        instr4 = addInstructions(0.75, "Press 4: Wings of Blue effect")
        instr5 = addInstructions(0.70, "Press 5: Whirlpool effect")

        # enable the key events
        self.accept("escape", sys.exit, [0])
        self.accept("1", self.chooseEffectGhost)
        self.accept("2", self.chooseEffectPaintBrush)
        self.accept("3", self.chooseEffectDoubleVision)
        self.accept("4", self.chooseEffectWingsOfBlue)
        self.accept("5", self.chooseEffectWhirlpool)