def renderQuadInto(self, mul=1, div=1, align=1, depthtex=None, colortex=None, auxtex0=None, auxtex1=None): texgroup = (depthtex, colortex, auxtex0, auxtex1) (winx, winy) = self.getScaledSize(mul, div, align) depthbits = bool(depthtex != None) buffer = self.createBuffer('filter-stage', winx, winy, texgroup, depthbits) if buffer == None: return None cm = CardMaker('filter-stage-quad') cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(0) quad.setDepthWrite(0) quad.setColor(Vec4(1, 0.5, 0.5, 1)) quadcamnode = Camera('filter-quad-cam') lens = OrthographicLens() lens.setFilmSize(2, 2) lens.setFilmOffset(0, 0) lens.setNearFar(-1000, 1000) quadcamnode.setLens(lens) quadcam = quad.attachNewNode(quadcamnode) buffer.getDisplayRegion(0).setCamera(quadcam) buffer.getDisplayRegion(0).setActive(1) self.buffers.append(buffer) self.sizes.append((mul, div, align)) return quad
def renderSceneInto(self, depthtex=None, colortex=None, auxtex=None, auxbits=0, textures=None): if textures: colortex = textures.get('color', None) depthtex = textures.get('depth', None) auxtex = textures.get('aux', None) if colortex == None: colortex = Texture('filter-base-color') colortex.setWrapU(Texture.WMClamp) colortex.setWrapV(Texture.WMClamp) texgroup = (depthtex, colortex, auxtex, None) (winx, winy) = self.getScaledSize(1, 1, 1) buffer = self.createBuffer('filter-base', winx, winy, texgroup) if buffer == None: return None cm = CardMaker('filter-base-quad') cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(0) quad.setDepthWrite(0) quad.setTexture(colortex) quad.setColor(Vec4(1, 0.5, 0.5, 1)) cs = NodePath('dummy') cs.setState(self.camstate) if auxbits: cs.setAttrib(AuxBitplaneAttrib.make(auxbits)) self.camera.node().setInitialState(cs.getState()) quadcamnode = Camera('filter-quad-cam') lens = OrthographicLens() lens.setFilmSize(2, 2) lens.setFilmOffset(0, 0) lens.setNearFar(-1000, 1000) quadcamnode.setLens(lens) quadcam = quad.attachNewNode(quadcamnode) self.region.setCamera(quadcam) dr = buffer.getDisplayRegion(0) self.setStackedClears(dr, self.rclears, self.wclears) if auxtex: dr.setClearActive(GraphicsOutput.RTPAuxRgba0, 1) dr.setClearValue(GraphicsOutput.RTPAuxRgba0, Vec4(0.5, 0.5, 1.0, 0.0)) self.region.disableClears() if self.isFullscreen(): self.win.disableClears() dr.setCamera(self.camera) dr.setActive(1) self.buffers.append(buffer) self.sizes.append((1, 1, 1)) return quad
def make_camera(self, output, sort=0, dr_dims=(0, 1, 0, 1), aspect_ratio=None, clear_depth=False, clear_color=None, lens=None, cam_name='camera0', mask=None): """ Makes a new 3-d camera associated with the indicated window, and creates a display region in the indicated subrectangle. If stereo is True, then a stereo camera is created, with a pair of DisplayRegions. If stereo is False, then a standard camera is created. If stereo is None or omitted, a stereo camera is created if the window says it can render in stereo. If useCamera is not None, it is a NodePath to be used as the camera to apply to the window, rather than creating a new camera. """ # self.cameras is the parent node of all cameras: a node that # we can move around to move all cameras as a group. if self.cameras is None: # We make it a ModelNode with the PTLocal flag, so that a # wayward flatten operations won't attempt to mangle the # camera. self.cameras = self.rootnode.attachNewNode(ModelNode('cameras')) self.cameras.node().setPreserveTransform(ModelNode.PTLocal) # Make a new Camera node. cam_node = Camera(cam_name) if lens is None: lens = PerspectiveLens() if aspect_ratio is None: aspect_ratio = self.get_aspect_ratio(output) lens.setAspectRatio(aspect_ratio) lens.setNear(0.1) lens.setFar(1000.0) if lens is not None: cam_node.setLens(lens) camera = self.cameras.attachNewNode(cam_node) # Masks out part of scene from camera if mask is not None: if (isinstance(mask, int)): mask = BitMask32(mask) cam_node.setCameraMask(mask) # Make a display region dr = output.makeDisplayRegion(*dr_dims) # By default, we do not clear 3-d display regions (the entire # window will be cleared, which is normally sufficient). But # we will if clearDepth is specified. if clear_depth: dr.setClearDepthActive(1) if clear_color: dr.setClearColorActive(1) dr.setClearColor(clear_color) dr.setSort(sort) dr.setCamera(camera) dr.setActive(True) return camera
def renderQuadInto(self, mul=1, div=1, align=1, depthtex=None, colortex=None, auxtex0=None, auxtex1=None): """ Creates an offscreen buffer for an intermediate computation. Installs a quad into the buffer. Returns the fullscreen quad. The size of the buffer is initially equal to the size of the main window. The parameters 'mul', 'div', and 'align' can be used to adjust that size. """ texgroup = (depthtex, colortex, auxtex0, auxtex1) winx, winy = self.getScaledSize(mul, div, align) depthbits = bool(depthtex != None) buffer = self.createBuffer("filter-stage", winx, winy, texgroup, depthbits) if (buffer == None): return None cm = CardMaker("filter-stage-quad") cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(0) quad.setDepthWrite(0) quad.setColor(Vec4(1, 0.5, 0.5, 1)) quadcamnode = Camera("filter-quad-cam") lens = OrthographicLens() lens.setFilmSize(2, 2) lens.setFilmOffset(0, 0) lens.setNearFar(-1000, 1000) quadcamnode.setLens(lens) quadcam = quad.attachNewNode(quadcamnode) dr = buffer.makeDisplayRegion((0, 1, 0, 1)) dr.disableClears() dr.setCamera(quadcam) dr.setActive(True) dr.setScissorEnabled(False) # This clear stage is important if the buffer is padded, so that # any pixels accidentally sampled in the padded region won't # be reading from unititialised memory. buffer.setClearColor((0, 0, 0, 1)) buffer.setClearColorActive(True) self.buffers.append(buffer) self.sizes.append((mul, div, align)) return quad
def renderQuadInto(self, mul=1, div=1, align=1, depthtex=None, colortex=None, auxtex0=None, auxtex1=None): """ Creates an offscreen buffer for an intermediate computation. Installs a quad into the buffer. Returns the fullscreen quad. The size of the buffer is initially equal to the size of the main window. The parameters 'mul', 'div', and 'align' can be used to adjust that size. """ texgroup = (depthtex, colortex, auxtex0, auxtex1) winx, winy = self.getScaledSize(mul, div, align) depthbits = bool(depthtex != None) buffer = self.createBuffer("filter-stage", winx, winy, texgroup, depthbits) if (buffer == None): return None cm = CardMaker("filter-stage-quad") cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(0) quad.setDepthWrite(0) quad.setColor(Vec4(1, 0.5, 0.5, 1)) quadcamnode = Camera("filter-quad-cam") lens = OrthographicLens() lens.setFilmSize(2, 2) lens.setFilmOffset(0, 0) lens.setNearFar(-1000, 1000) quadcamnode.setLens(lens) quadcam = quad.attachNewNode(quadcamnode) buffer.getDisplayRegion(0).setCamera(quadcam) buffer.getDisplayRegion(0).setActive(1) self.buffers.append(buffer) self.sizes.append((mul, div, align)) return quad
def renderQuadInto(self, mul=1, div=1, align=1, depthtex=None, colortex=None, auxtex0=None, auxtex1=None): """ Creates an offscreen buffer for an intermediate computation. Installs a quad into the buffer. Returns the fullscreen quad. The size of the buffer is initially equal to the size of the main window. The parameters 'mul', 'div', and 'align' can be used to adjust that size. """ texgroup = (depthtex, colortex, auxtex0, auxtex1) winx, winy = self.getScaledSize(mul, div, align) depthbits = bool(depthtex != None) buffer = self.createBuffer("filter-stage", winx, winy, texgroup, depthbits) if (buffer == None): return None cm = CardMaker("filter-stage-quad") cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(0) quad.setDepthWrite(0) quad.setColor(Vec4(1,0.5,0.5,1)) quadcamnode = Camera("filter-quad-cam") lens = OrthographicLens() lens.setFilmSize(2, 2) lens.setFilmOffset(0, 0) lens.setNearFar(-1000, 1000) quadcamnode.setLens(lens) quadcam = quad.attachNewNode(quadcamnode) buffer.getDisplayRegion(0).setCamera(quadcam) buffer.getDisplayRegion(0).setActive(1) self.buffers.append(buffer) self.sizes.append((mul, div, align)) return quad
class SmartCam(object): def __init__(self, nameorcam): if isinstance(nameorcam, NodePath): self.node = nameorcam else: self.node = Camera(nameorcam) def __getattr__(self, name): return getattr(self.node, name) @property def fpos(self): return self.focus.getPos() def sync_focus(self): self.node.setPos(self.fpos.x, self.fpos.y, self.fpos.z - self.zoom_height) self.node.lookAt(self.fpos) def set_focus(self, focus): self.focus = focus if not hasattr(self, 'zoom_height'): self.zoom_height = self.focal_radius / 2 self.sync_focus() @property def focal_radius(self): return self.focus.getBounds().getRadius() def zoom_in(self, n=1): self.zoom_height = max( 0.5, self.zoom_height * 0.9 - self.focal_radius * n / 10) self.sync_focus() #self.node.setZ(-(self.focal_radius/2+new_height)) def zoom_out(self, n=1): self.zoom_height = max( 0.5, self.zoom_height * 1.1 + self.focal_radius * n / 10) self.sync_focus()
def startGame(self): self.hideLogin() """Initialise the game environment and characters.""" # Post some onscreen instructions. title = addTitle("Nexus Demo") # inst1 = addInstructions(0.95, "[ESC]: Quit") # inst2 = addInstructions(0.90, "[Left Arrow]: Rotate Ralph Left") # inst3 = addInstructions(0.85, "[Right Arrow]: Rotate Ralph Right") # inst4 = addInstructions(0.80, "[Up Arrow]: Run Ralph Forward") # inst4 = addInstructions(0.75, "[Down Arrow]: Run Ralph Backward") # inst6 = addInstructions(0.65, "[A]: Rotate Camera Left") # inst7 = addInstructions(0.60, "[S]: Rotate Camera Right") # Initialise the environment. base.win.setClearColor(Vec4(0, 0, 0, 1)) environ = loader.loadModel(Config.MYDIR + "/models/world/world") environ.reparentTo(render) environ.setPos(0, 0, 0) environ.setCollideMask(BitMask32.bit(1)) # Create a character for the player. player = Character("/models/ralph/ralph", "/models/ralph/ralph-run", "/models/ralph/ralph-walk", environ.find("**/start_point").getPos(), .2) # Hook up some control keys to the character self.accept("arrow_left", player.setControl, ["left", 1]) self.accept("arrow_right", player.setControl, ["right", 1]) self.accept("arrow_up", player.setControl, ["up", 1]) self.accept("arrow_down", player.setControl, ["down", 1]) # Stop event handling if the keys are lifted up self.accept("arrow_left-up", player.setControl, ["left", 0]) self.accept("arrow_right-up", player.setControl, ["right", 0]) self.accept("arrow_up-up", player.setControl, ["up", 0]) self.accept("arrow_down-up", player.setControl, ["down", 0]) # Create a camera to follow the player. camera = Camera(player.actor) # Accept some keys to move the camera. self.accept("a-up", camera.setControl, ["left", 0]) self.accept("s-up", camera.setControl, ["right", 0]) self.accept("a", camera.setControl, ["left", 1]) self.accept("s", camera.setControl, ["right", 1])
def __init__(self, scene_file, pedestrian_file, dir, mode): ShowBase.__init__(self) self.globalClock = ClockObject.getGlobalClock() self.globalClock.setMode(ClockObject.MSlave) self.directory = dir self.model = Model(dir) self.loadScene(scene_file) self.loadPedestrians(pedestrian_file) self.cam_label = OST("Top Down", pos=(0, 0.95), fg=(1, 1, 1, 1), scale=0.05, mayChange=True) self.time_label = OST("Time: 0.0", pos=(-1.3, 0.95), fg=(1, 1, 1, 1), scale=0.06, mayChange=True, align=TextNode.ALeft) self.accept("arrow_right", self.changeCamera, [1]) self.accept("arrow_left", self.changeCamera, [-1]) self.accept("escape", self.exit) self.accept("aspectRatioChanged", self.setAspectRatio) self.accept("window-event", self.windowChanged) #base.disableMouse() lens = OrthographicLens() lens.setFilmSize(1550, 1000) self.display_region = base.win.makeDisplayRegion() self.default_camera = render.attachNewNode(Camera("top down")) self.default_camera.node().setLens(lens) self.default_camera.setPosHpr(Vec3(-75, 0, 2200), Vec3(0, -90, 0)) self.setCamera(0) self.controller = Controller(self, mode) self.taskMgr.add(self.updateCameraModules, "Update Camera Modules", 80) self.globalClock.setFrameTime(0.0) self.width = WIDTH self.height = HEIGHT props = WindowProperties() props.setTitle('Virtual Vision Simulator') base.win.requestProperties(props)
class SmartCam(object): def __init__(self, nameorcam): if isinstance(nameorcam, NodePath): self.node = nameorcam else: self.node = Camera(nameorcam) def __getattr__(self, name): return getattr(self.node, name) @property def fpos(self): return self.focus.getPos() def sync_focus(self): self.node.setPos(self.fpos.x, self.fpos.y, self.fpos.z-self.zoom_height) self.node.lookAt(self.fpos) def set_focus(self, focus): self.focus = focus if not hasattr(self, 'zoom_height'): self.zoom_height = self.focal_radius/2 self.sync_focus() @property def focal_radius(self): return self.focus.getBounds().getRadius() def zoom_in(self, n=1): self.zoom_height = max(0.5, self.zoom_height*0.9 - self.focal_radius*n/10) self.sync_focus() #self.node.setZ(-(self.focal_radius/2+new_height)) def zoom_out(self, n=1): self.zoom_height = max(0.5, self.zoom_height*1.1 + self.focal_radius*n/10) self.sync_focus()
def setupEnviroCamera(self): clearColor = VBase4(0, 0, 0, 1) if self.enviroDR: clearColor = self.enviroDR.getClearColor() self.win.removeDisplayRegion(self.enviroDR) if not self.enviroCam: self.enviroCam = self.cam.attachNewNode(Camera('enviroCam')) mainDR = self.camNode.getDisplayRegion(0) if self.stereoEnabled: self.enviroDR = self.win.makeStereoDisplayRegion() if not mainDR.isStereo(): self.win.removeDisplayRegion(mainDR) mainDR = self.win.makeStereoDisplayRegion() mainDR.setCamera(self.cam) ml = mainDR.getLeftEye() mr = mainDR.getRightEye() el = self.enviroDR.getLeftEye() er = self.enviroDR.getRightEye() el.setSort(-8) ml.setSort(-6) er.setSort(-4) er.setClearDepthActive(True) mr.setSort(-2) mr.setClearDepthActive(False) else: self.enviroDR = self.win.makeMonoDisplayRegion() if mainDR.isStereo(): self.win.removeDisplayRegion(mainDR) mainDR = self.win.makeMonoDisplayRegion() mainDR.setCamera(self.cam) self.enviroDR.setSort(-10) self.enviroDR.setClearColor(clearColor) self.win.setClearColor(clearColor) self.enviroDR.setCamera(self.enviroCam) self.enviroCamNode = self.enviroCam.node() self.enviroCamNode.setLens(self.cam.node().getLens()) self.enviroCamNode.setCameraMask(OTPRender.EnviroCameraBitmask) render.hide(OTPRender.EnviroCameraBitmask) self.camList.append(self.enviroCam) self.backgroundDrawable = self.enviroDR self.enviroDR.setTextureReloadPriority(-10) if self.pixelZoomSetup: self.setupAutoPixelZoom()
def __init__(self, objectPath, groundPath, lightPos=Vec3(0,0,1)): """ ShadowCaster::__init__ objectPath is the shadow casting object groundPath is the shadow receiving object lightPos is the lights relative position to the objectPath """ # uniq id-number for each shadow global shadowCasterObjectCounter shadowCasterObjectCounter += 1 self.objectShadowId = shadowCasterObjectCounter # the object which will cast shadows self.objectPath = objectPath # get the objects bounds center and radius to # define the shadowrendering camera position and filmsize try: objectBoundRadius = self.objectPath.getBounds().getRadius() objectBoundCenter = self.objectPath.getBounds().getCenter() except: print "failed" objectBoundCenter = Point3( 0,0,0 ) objectBoundRadius = 1 lightPath = objectPath.attachNewNode('lightPath%i'%self.objectShadowId) # We can change this position at will to change the angle of the sun. lightPath.setPos( objectPath.getParent(), objectBoundCenter ) self.lightPath = lightPath # the film size is the diameter of the object self.filmSize = objectBoundRadius * 2 # Create an offscreen buffer to render the view of the avatar # into a texture. self.buffer = base.win.makeTextureBuffer( 'shadowBuffer%i'%self.objectShadowId, self.texXSize, self.texYSize) # The background of this buffer--and the border of the # texture--is pure white. clearColor = VBase4(1, 1, 1, 1) self.buffer.setClearColor(clearColor) self.tex = self.buffer.getTexture() self.tex.setBorderColor(clearColor) self.tex.setWrapU(Texture.WMBorderColor) self.tex.setWrapV(Texture.WMBorderColor) # Set up a display region on this buffer, and create a camera. dr = self.buffer.makeDisplayRegion() self.camera = Camera('shadowCamera%i'%self.objectShadowId) self.cameraPath = self.lightPath.attachNewNode(self.camera) self.camera.setScene(self.objectPath) dr.setCamera(self.cameraPath) self.setLightPos( lightPos ) # Use a temporary NodePath to define the initial state for the # camera. The initial state will render everything in a # flat-shaded gray, as if it were a shadow. initial = NodePath('initial%i'%self.objectShadowId) initial.setColor( *SHADOWCOLOR ) initial.setTextureOff(2) self.camera.setInitialState(initial.getState()) # Use an orthographic lens for this camera instead of the # usual perspective lens. An orthographic lens is better to # simulate sunlight, which is (almost) orthographic. We set # the film size large enough to render a typical avatar (but # not so large that we lose detail in the texture). self.lens = OrthographicLens() self.lens.setFilmSize(self.filmSize, self.filmSize) self.camera.setLens(self.lens) # Finally, we'll need a unique TextureStage to apply this # shadow texture to the world. self.stage = TextureStage('shadow%i'%self.objectShadowId) # Make sure the shadowing object doesn't get its own shadow # applied to it. self.objectPath.setTextureOff(self.stage) # the object which will receive shadows self.setGround( groundPath )
def setupEnviroCamera(self): """ Set up a special DisplayRegion and camera for rendering environments, especially big, lush environments like the islands in Pirates. It's mainly useful in conjunction with setupAutoPixelZoom, which then allows us to zoom down the resolution of this DisplayRegion, independently of the smaller objects in the scene like avatars and their nametags, when we need better frame rate (this is useful only with the tinydisplay software renderer). """ clearColor = VBase4(0, 0, 0, 1) if self.enviroDR: clearColor = self.enviroDR.getClearColor() self.win.removeDisplayRegion(self.enviroDR) if not self.enviroCam: self.enviroCam = self.cam.attachNewNode(Camera('enviroCam')) mainDR = self.camNode.getDisplayRegion(0) if self.stereoEnabled: # If we are in stereo mode, set up the left and right eyes # properly w.r.t. the main display region. We need to # draw both left channels, clear the depth once, then draw # both right channels. self.enviroDR = self.win.makeStereoDisplayRegion() if not mainDR.isStereo(): # If the main DR isn't a stereo DisplayRegion, make it # one. self.win.removeDisplayRegion(mainDR) mainDR = self.win.makeStereoDisplayRegion() mainDR.setCamera(self.cam) ml = mainDR.getLeftEye() mr = mainDR.getRightEye() el = self.enviroDR.getLeftEye() er = self.enviroDR.getRightEye() el.setSort(-8) ml.setSort(-6) er.setSort(-4) er.setClearDepthActive(True) mr.setSort(-2) mr.setClearDepthActive(False) else: # If we're not in stereo mode, make sure our main DR isn't # either. self.enviroDR = self.win.makeMonoDisplayRegion() if mainDR.isStereo(): self.win.removeDisplayRegion(mainDR) mainDR = self.win.makeMonoDisplayRegion() mainDR.setCamera(self.cam) self.enviroDR.setSort(-10) self.enviroDR.setClearColor(clearColor) self.win.setClearColor(clearColor) self.enviroDR.setCamera(self.enviroCam) self.enviroCamNode = self.enviroCam.node() self.enviroCamNode.setLens(self.cam.node().getLens()) self.enviroCamNode.setCameraMask(OTPRender.EnviroCameraBitmask) render.hide(OTPRender.EnviroCameraBitmask) self.camList.append(self.enviroCam) self.backgroundDrawable = self.enviroDR # Texture reloads for things in the environment # (i.e. background objects) are the lowest priority. Load # gui items, foreground object textures, and animations first. self.enviroDR.setTextureReloadPriority(-10) if self.pixelZoomSetup: # If we want pixel zoom, enable it for the new display # region. self.setupAutoPixelZoom()
def renderSceneInto(self, depthtex=None, colortex=None, auxtex=None, auxbits=0, textures=None): """ Causes the scene to be rendered into the supplied textures instead of into the original window. Puts a fullscreen quad into the original window to show the render-to-texture results. Returns the quad. Normally, the caller would then apply a shader to the quad. To elaborate on how this all works: * An offscreen buffer is created. It is set up to mimic the original display region - it is the same size, uses the same clear colors, and contains a DisplayRegion that uses the original camera. * A fullscreen quad and an orthographic camera to render that quad are both created. The original camera is removed from the original window, and in its place, the orthographic quad-camera is installed. * The fullscreen quad is textured with the data from the offscreen buffer. A shader is applied that tints the results pink. * Automatic shader generation NOT enabled. If you have a filter that depends on a render target from the auto-shader, you either need to set an auto-shader attrib on the main camera or scene, or, you need to provide these outputs in your own shader. * All clears are disabled on the original display region. If the display region fills the whole window, then clears are disabled on the original window as well. It is assumed that rendering the full-screen quad eliminates the need to do clears. Hence, the original window which used to contain the actual scene, now contains a pink-tinted quad with a texture of the scene. It is assumed that the user will replace the shader on the quad with a more interesting filter. """ if (textures): colortex = textures.get("color", None) depthtex = textures.get("depth", None) auxtex = textures.get("aux", None) if (colortex == None): colortex = Texture("filter-base-color") colortex.setWrapU(Texture.WMClamp) colortex.setWrapV(Texture.WMClamp) texgroup = (depthtex, colortex, auxtex, None) # Choose the size of the offscreen buffer. (winx, winy) = self.getScaledSize(1,1,1) buffer = self.createBuffer("filter-base", winx, winy, texgroup) if (buffer == None): return None cm = CardMaker("filter-base-quad") cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(0) quad.setDepthWrite(0) quad.setTexture(colortex) quad.setColor(Vec4(1,0.5,0.5,1)) cs = NodePath("dummy") cs.setState(self.camstate) # Do we really need to turn on the Shader Generator? #cs.setShaderAuto() if (auxbits): cs.setAttrib(AuxBitplaneAttrib.make(auxbits)) self.camera.node().setInitialState(cs.getState()) quadcamnode = Camera("filter-quad-cam") lens = OrthographicLens() lens.setFilmSize(2, 2) lens.setFilmOffset(0, 0) lens.setNearFar(-1000, 1000) quadcamnode.setLens(lens) quadcam = quad.attachNewNode(quadcamnode) self.region.setCamera(quadcam) dr = buffer.getDisplayRegion(0) self.setStackedClears(dr, self.rclears, self.wclears) if (auxtex): dr.setClearActive(GraphicsOutput.RTPAuxRgba0, 1) dr.setClearValue(GraphicsOutput.RTPAuxRgba0, Vec4(0.5,0.5,1.0,0.0)) self.region.disableClears() if (self.isFullscreen()): self.win.disableClears() dr.setCamera(self.camera) dr.setActive(1) self.buffers.append(buffer) self.sizes.append((1, 1, 1)) return quad
def renderSceneInto(self, depthtex=None, colortex=None, auxtex=None, auxbits=0, textures=None): """ Causes the scene to be rendered into the supplied textures instead of into the original window. Puts a fullscreen quad into the original window to show the render-to-texture results. Returns the quad. Normally, the caller would then apply a shader to the quad. To elaborate on how this all works: * An offscreen buffer is created. It is set up to mimic the original display region - it is the same size, uses the same clear colors, and contains a DisplayRegion that uses the original camera. * A fullscreen quad and an orthographic camera to render that quad are both created. The original camera is removed from the original window, and in its place, the orthographic quad-camera is installed. * The fullscreen quad is textured with the data from the offscreen buffer. A shader is applied that tints the results pink. * Automatic shader generation NOT enabled. If you have a filter that depends on a render target from the auto-shader, you either need to set an auto-shader attrib on the main camera or scene, or, you need to provide these outputs in your own shader. * All clears are disabled on the original display region. If the display region fills the whole window, then clears are disabled on the original window as well. It is assumed that rendering the full-screen quad eliminates the need to do clears. Hence, the original window which used to contain the actual scene, now contains a pink-tinted quad with a texture of the scene. It is assumed that the user will replace the shader on the quad with a more interesting filter. """ if (textures): colortex = textures.get("color", None) depthtex = textures.get("depth", None) auxtex = textures.get("aux", None) auxtex0 = textures.get("aux0", auxtex) auxtex1 = textures.get("aux1", None) else: auxtex0 = auxtex auxtex1 = None if (colortex == None): colortex = Texture("filter-base-color") colortex.setWrapU(Texture.WMClamp) colortex.setWrapV(Texture.WMClamp) texgroup = (depthtex, colortex, auxtex0, auxtex1) # Choose the size of the offscreen buffer. (winx, winy) = self.getScaledSize(1,1,1) buffer = self.createBuffer("filter-base", winx, winy, texgroup) if (buffer == None): return None cm = CardMaker("filter-base-quad") cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(0) quad.setDepthWrite(0) quad.setTexture(colortex) quad.setColor(Vec4(1,0.5,0.5,1)) cs = NodePath("dummy") cs.setState(self.camstate) # Do we really need to turn on the Shader Generator? #cs.setShaderAuto() if (auxbits): cs.setAttrib(AuxBitplaneAttrib.make(auxbits)) self.camera.node().setInitialState(cs.getState()) quadcamnode = Camera("filter-quad-cam") lens = OrthographicLens() lens.setFilmSize(2, 2) lens.setFilmOffset(0, 0) lens.setNearFar(-1000, 1000) quadcamnode.setLens(lens) quadcam = quad.attachNewNode(quadcamnode) self.region.setCamera(quadcam) self.setStackedClears(buffer, self.rclears, self.wclears) if (auxtex0): buffer.setClearActive(GraphicsOutput.RTPAuxRgba0, 1) buffer.setClearValue(GraphicsOutput.RTPAuxRgba0, Vec4(0.5, 0.5, 1.0, 0.0)) if (auxtex1): buffer.setClearActive(GraphicsOutput.RTPAuxRgba1, 1) self.region.disableClears() if (self.isFullscreen()): self.win.disableClears() dr = buffer.makeDisplayRegion() dr.disableClears() dr.setCamera(self.camera) dr.setActive(1) self.buffers.append(buffer) self.sizes.append((1, 1, 1)) return quad
def __init__(self, scene_file, pedestrian_file, dir, mode): ShowBase.__init__(self) self.globalClock = ClockObject.getGlobalClock() self.globalClock.setMode(ClockObject.MSlave) self.directory = dir self.model = Model(dir) self.loadScene(scene_file) self.loadPedestrians(pedestrian_file) #self.cam_label = OST("Top Down", pos=(0, 0.95), fg=(1,1,1,1), # scale=0.05, mayChange=True) #self.time_label = OST("Time: 0.0", pos=(-1.3, 0.95), fg=(1,1,1,1), # scale=0.06, mayChange=True, align=TextNode.ALeft) #self.accept("arrow_right", self.changeCamera, [1]) #self.accept("arrow_left", self.changeCamera, [-1]) self.accept("escape", self.exit) self.accept("aspectRatioChanged", self.setAspectRatio) self.accept("window-event", self.windowChanged) new_window_fbp = FrameBufferProperties.getDefault() new_window_properties = WindowProperties.getDefault() self.new_window = base.graphicsEngine.makeOutput( base.pipe, 'Top Down View Window', 0, new_window_fbp, new_window_properties, GraphicsPipe.BFRequireWindow) self.new_window_display_region = self.new_window.makeDisplayRegion() #base.disableMouse() lens = OrthographicLens() lens.setFilmSize(1500, 1500) lens.setNearFar(-5000, 5000) self.default_camera = render.attachNewNode(Camera("top down")) self.default_camera.node().setLens(lens) #self.default_camera.setPosHpr(Vec3( -75, 0, 2200), Vec3(0, -90, 0)) self.default_camera.setPosHpr(Vec3(-75, 0, 0), Vec3(0, -90, 0)) #self.new_window = base.openWindow() self.display_regions = [] self.display_regions.append(self.new_window_display_region) self.display_regions.append( base.win.makeDisplayRegion(0, 0.32, 0.52, 1)) self.display_regions.append( base.win.makeDisplayRegion(0.34, 0.66, 0.52, 1)) self.display_regions.append( base.win.makeDisplayRegion(0.68, 1, 0.52, 1)) self.display_regions.append( base.win.makeDisplayRegion(0, 0.32, 0, 0.48)) self.display_regions.append( base.win.makeDisplayRegion(0.34, 0.66, 0, 0.48)) self.display_regions.append( base.win.makeDisplayRegion(0.68, 1, 0, 0.48)) self.display_regions[0].setCamera(self.default_camera) self.border_regions = [] self.border_regions.append( base.win.makeDisplayRegion(0.32, 0.34, 0.52, 1)) self.border_regions.append( base.win.makeDisplayRegion(0.66, 0.68, 0.52, 1)) self.border_regions.append(base.win.makeDisplayRegion( 0, 1, 0.48, 0.52)) self.border_regions.append( base.win.makeDisplayRegion(0.32, 0.34, 0, 0.48)) self.border_regions.append( base.win.makeDisplayRegion(0.66, 0.68, 0, 0.48)) for i in range(0, len(self.border_regions)): border_region = self.border_regions[i] border_region.setClearColor(VBase4(0, 0, 0, 1)) border_region.setClearColorActive(True) border_region.setClearDepthActive(True) #self.setCamera(0) self.controller = Controller(self, mode) self.taskMgr.add(self.updateCameraModules, "Update Camera Modules", 80) self.globalClock.setFrameTime(0.0) self.width = WIDTH self.height = HEIGHT props = WindowProperties() props.setTitle('Virtual Vision Simulator') base.win.requestProperties(props) """new_window_2d_display_region = self.new_window.makeDisplayRegion() new_window_2d_display_region.setSort(20) new_window_camera_2d = NodePath(Camera('2d camera of new window')) lens_2d = OrthographicLens() lens_2d.setFilmSize(2, 2) lens_2d.setNearFar(-1000, 1000) new_window_camera_2d.node().setLens(lens_2d) new_window_render_2d = NodePath('render2d of new window') new_window_render_2d.setDepthTest(False) new_window_render_2d.setDepthWrite(False) new_window_camera_2d.reparentTo(new_window_render_2d) new_window_2d_display_region.setCamera(new_window_camera_2d)""" """aspectRatio = base.getAspectRatio() self.new_window_aspect2d = new_window_render_2d.attachNewNode(PGTop('Aspect2d of new window')) self.new_window_aspect2d.setScale(1.0 / aspectRatio, 1.0, 1.0)""" render.analyze()
def __init__(self, nameorcam): if isinstance(nameorcam, NodePath): self.node = nameorcam else: self.node = Camera(nameorcam)