def setViewPlatform(self, vp): """Set our view platform""" self.viewPlatform = vp self.projection = vp.viewMatrix().astype('f') self.modelView = vp.modelMatrix().astype('f') self.modelproj = dot(self.modelView, self.projection) self.matrix = None
def legacyLightRender(self, matrix): """Do legacy light-rendering operation""" # okay, now visible presentations for remaining in range(0, self.MAX_LIGHTS - 1): glDisable(GL_LIGHT0 + remaining) id = 0 for path in self.paths.get(nodetypes.Light, ()): tmatrix = path.transformMatrix() localMatrix = dot(tmatrix, matrix) self.matrix = localMatrix self.renderPath = path glLoadMatrixf(localMatrix) path[-1].Light(GL_LIGHT0 + id, mode=self) id += 1 if id >= (self.MAX_LIGHTS - 1): break if not id: # default VRML lighting... from OpenGLContext.scenegraph import light l = light.DirectionalLight(direction=(0, 0, -1.0)) glLoadMatrixf(matrix) l.Light(GL_LIGHT0, mode=self) self.matrix = matrix
def setViewPlatform( self, vp ): """Set our view platform""" self.viewPlatform = vp self.projection = vp.viewMatrix().astype('f') self.modelView = vp.modelMatrix().astype('f') self.modelproj = dot( self.modelView, self.projection ) self.matrix = None
def legacyLightRender( self, matrix ): """Do legacy light-rendering operation""" # okay, now visible presentations for remaining in range(0,self.MAX_LIGHTS-1): glDisable( GL_LIGHT0 + remaining ) id = 0 for path in self.paths.get( nodetypes.Light, ()): tmatrix = path.transformMatrix() localMatrix = dot(tmatrix,matrix) self.matrix = localMatrix self.renderPath = path glLoadMatrixf( localMatrix ) path[-1].Light( GL_LIGHT0+id, mode=self ) id += 1 if id >= (self.MAX_LIGHTS-1): break if not id: # default VRML lighting... from OpenGLContext.scenegraph import light l = light.DirectionalLight( direction = (0,0,-1.0)) glLoadMatrixf( matrix ) l.Light( GL_LIGHT0, mode = self ) self.matrix = matrix
def modelMatrix( self, direction=None, inverse=False ): """Calculate our model-side matrix""" if direction is None: direction = getattr( self,'direction',None) if direction: rot = vectorutilities.orientToXYZR( (0,0,-1), direction ) # inverse of rotation matrix, hmm... rotate = transformmatrix.rotMatrix( rot )[bool(not inverse)] # inverse of translation matrix... translate = transformmatrix.transMatrix(self.location)[bool(not inverse)] if inverse: return dot( rotate,translate ) else: return dot( translate,rotate ) else: # *inverse* of translation matrix is forward return transformmatrix.transMatrix(self.location)[bool(not inverse)]
def modelMatrix(self, direction=None, inverse=False): """Calculate our model-side matrix""" if direction is None: direction = getattr(self, 'direction', None) if direction: rot = vectorutilities.orientToXYZR((0, 0, -1), direction) # inverse of rotation matrix, hmm... rotate = transformmatrix.rotMatrix(rot)[bool(not inverse)] # inverse of translation matrix... translate = transformmatrix.transMatrix( self.location)[bool(not inverse)] if inverse: return dot(rotate, translate) else: return dot(translate, rotate) else: # *inverse* of translation matrix is forward return transformmatrix.transMatrix( self.location)[bool(not inverse)]
def test_matrix(self): """Test that a texture matrix can produce a proper scale/offset""" map = self.atlasManager.add(NumpyAdapter(zeros((64, 64, 4), 'B'))) matrix = map.matrix() assert matrix is not None bottom_left = dot(array([0, 0, 0, 1], 'f'), matrix) assert allclose(bottom_left, [0, 0, 0, 1]), bottom_left top_right = dot(array([1, 1, 0, 1], 'f'), matrix) assert allclose(top_right, [.25, .25, 0, 1]), top_right map = self.atlasManager.add(NumpyAdapter(zeros((64, 64, 4), 'B'))) matrix = map.matrix() assert matrix is not None bottom_left = dot(array([0, 0, 0, 1], 'f'), matrix) assert allclose(bottom_left, [.25, 0, 0, 1]), (bottom_left, matrix) top_right = dot(array([1, 1, 0, 1], 'f'), matrix) assert allclose(top_right, [.5, .25, 0, 1]), (top_right, matrix) set = dot(array([[0, 0, 0, 1], [1, 1, 0, 1]], 'f'), matrix) assert allclose(set, [[.25, 0, 0, 1], [.5, .25, 0, 1]]), (set, matrix)
def greatestDepth(self, toRender): # experimental: adjust our frustum to smaller depth based on # the projected z-depth of bbox points... maxDepth = 0 for (key, mv, tm, bv, path) in toRender: try: points = bv.getPoints() except (AttributeError, boundingvolume.UnboundedObject), err: return 0 else: translated = dot(points, mv) maxDepth = min((maxDepth, min(translated[:, 2])))
def greatestDepth( self, toRender ): # experimental: adjust our frustum to smaller depth based on # the projected z-depth of bbox points... maxDepth = 0 for (key,mv,tm,bv,path) in toRender: try: points = bv.getPoints() except (AttributeError,boundingvolume.UnboundedObject), err: return 0 else: translated = dot( points, mv ) maxDepth = min((maxDepth, min( translated[:,2] )))
def legacyBackgroundRender(self, vp, matrix): """Do legacy background rendering""" bPath = self.currentBackground() if bPath is not None: # legacy... self.matrix = dot( vp.quaternion.matrix(dtype='f'), bPath.transformMatrix(translate=0, scale=0, rotate=1)) bPath[-1].Render(mode=self, clear=True) else: ### default VRML background is black glClearColor(0.0, 0.0, 0.0, 1.0) glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
def legacyBackgroundRender( self, vp, matrix ): """Do legacy background rendering""" bPath = self.currentBackground( ) if bPath is not None: # legacy... self.matrix = dot( vp.quaternion.matrix( dtype='f'), bPath.transformMatrix(translate=0,scale=0, rotate=1 ) ) bPath[-1].Render( mode=self, clear=True ) else: ### default VRML background is black glClearColor(0.0,0.0,0.0,1.0) glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )
def moveTo(cls, path, context): """Given a node-path to a viewpoint, move context's platform there""" matrix = path.transformMatrix() node = path[-1] platform = context.platform if not platform: platform = context.platform = context.getViewPlatform() if node.jump: position = list(node.position) + [1] newPosition = arrays.dot(position, matrix) newOrientation = (path.quaternion() * quaternion.fromXYZR(*node.orientation)).XYZR() platform.setPosition(newPosition) platform.setOrientation(newOrientation) platform.setFrustum(node.fieldOfView)
def renderSet(self, matrix): """Calculate ordered rendering set to display""" # ordered set of things to work with... toRender = [] for path in self.paths.get(nodetypes.Rendering, ()): tmatrix = path.transformMatrix() mvmatrix = dot(tmatrix, matrix) sortKey = path[-1].sortKey(self, tmatrix) if hasattr(path[-1], 'boundingVolume'): bvolume = path[-1].boundingVolume(self) else: bvolume = None toRender.append((sortKey, mvmatrix, tmatrix, bvolume, path)) toRender = self.frustumVisibilityFilter(toRender) toRender.sort(key=lambda x: x[0]) return toRender
def renderSet( self, matrix ): """Calculate ordered rendering set to display""" # ordered set of things to work with... toRender = [] for path in self.paths.get( nodetypes.Rendering, ()): tmatrix = path.transformMatrix() mvmatrix = dot(tmatrix,matrix) sortKey = path[-1].sortKey( self, tmatrix ) if hasattr( path[-1], 'boundingVolume' ): bvolume = path[-1].boundingVolume( self ) else: bvolume = None toRender.append( (sortKey, mvmatrix,tmatrix,bvolume, path ) ) toRender = self.frustumVisibilityFilter( toRender ) toRender.sort( key = lambda x: x[0]) return toRender
def modelMatrix(self, direction=None): """Calculate our model-side matrix""" if direction is None and hasattr(self, 'direction'): direction = self.direction rot = vectorutilities.orientToXYZR((0, 0, -1), direction) # inverse of rotation matrix, hmm... rotate = transformmatrix.rotMatrix(rot)[1] # inverse of translation matrix... translate = transformmatrix.transMatrix(self.location)[1] if rotate is not None and translate is not None: return dot(translate, rotate) elif rotate is not None: return rotate elif translate is not None: return translate else: return identity((4, 4), type='f')
def modelMatrix( self, direction=None ): """Calculate our model-side matrix""" if direction is None and hasattr( self, 'direction' ): direction = self.direction rot = vectorutilities.orientToXYZR( (0,0,-1), direction ) # inverse of rotation matrix, hmm... rotate = transformmatrix.rotMatrix( rot )[1] # inverse of translation matrix... translate = transformmatrix.transMatrix(self.location)[1] if rotate is not None and translate is not None: return dot( translate,rotate ) elif rotate is not None: return rotate elif translate is not None: return translate else: return identity((4,4),type='f')
def moveTo( cls, path, context ): """Given a node-path to a viewpoint, move context's platform there""" matrix = path.transformMatrix( ) node = path[-1] platform = context.platform if not platform: platform = context.platform = context.getViewPlatform() if node.jump: position = list(node.position)+[1] newPosition = arrays.dot( position, matrix ) newOrientation = ( path.quaternion() * quaternion.fromXYZR( *node.orientation ) ).XYZR() platform.setPosition( newPosition ) platform.setOrientation( newOrientation ) platform.setFrustum( node.fieldOfView )
def renderLightTexture( self, light, mode,direction=None, fov = None, textureKey = None ): """Render ourselves into a texture for the given light""" '''We're going to render our scene into the depth buffer, so we'll explicitly specify the depth operation. The use of GL_LEQUAL means that we can rewrite the same geometry to the depth buffer multiple times and (save for floating-point artefacts), should see the geometry render each time. ''' glDepthFunc(GL_LEQUAL) glEnable(GL_DEPTH_TEST) '''Our setupShadowContext method will reset our viewport to match the size of the depth-texture we're creating.''' glPushAttrib(GL_VIEWPORT_BIT) '''We invoke our setupShadowContext method to establish the texture we'll use as our target. This tutorial is just going to reset the viewport to a subset of the back-buffer (the regular rendering target for OpenGL). Later tutorials will set up an off-screen rendering target (a Frame Buffer Object) by overriding this method-call.''' texture = self.setupShadowContext(light,mode) '''==Setup Scene with Light as Camera== The algorithm requires us to set up the scene to render from the point of view of our light. We're going to use a pair of methods on the light to do the calculations. These do the same calculations as "gluPerspective" for the viewMatrix, and a pair of rotation,translation transformations for the model-view matrix. Note: For VRML97 scenegraphs, this wouldn't be sufficient, as we can have multiple lights, and lights can be children of arbitrary Transforms, and can appear multiple times within the same scenegraph. We would have to calculate the matrices for each path that leads to a light, not just for each light. The node-paths have methods to retrieve their matrices, so we would simply dot those matrices with the matrices we retrieve here. The complexity of supporting these features doesn't particularly suit an introductory tutorial. ''' if fov: cutoff = fov /2.0 else: cutoff = None lightView = light.viewMatrix( cutoff, near=.3, far=30.0 ) lightModel = light.modelMatrix( direction=direction ) '''The texture matrix translates from camera eye-space into light eye-space. See the original tutorial for an explanation of how the mapping is done, and how it interacts with the current projection matrix. Things to observe about the calculation of the matrix compared to the values in the original tutorial: * we are explicitly taking the transpose of the result matrix * the order of operations is the reverse of the calculations in the tutorial * we take the transpose of the matrix so that matrix[0] is a row in the sense that the tutorial uses it This pattern of reversing order-of-operations and taking the transpose happens frequently in PyOpenGL when working with matrix code from C sources. Note: A number of fixes to matrix multiply order came from comparing results with [http://www.geometrian.com/Programs.php Ian Mallett's OpenGL Library v1.4]. ''' lightMatrix = dot( lightModel, lightView ) textureMatrix = transpose( dot( lightMatrix, self.BIAS_MATRIX ) ) '''This is a bit wasteful, as we've already loaded our projection and model-view matrices for our view-platform into the GL. Real-world implementations would normally do the light-rendering pass before doing their world-view setup. We'll restore the platform values later on. ''' glMatrixMode( GL_PROJECTION ) glLoadMatrixf( lightView ) glMatrixMode( GL_MODELVIEW ) glLoadMatrixf( lightModel ) '''Our geometryPasses object needs to have the same setup as the mode (another FlatPass instance) which we are processing.''' self.geometryPasses.matrix = lightModel self.geometryPasses.modelView = lightModel self.geometryPasses.projection = lightView self.geometryPasses.viewport = mode.viewport self.geometryPasses.calculateFrustum() self.geometryPasses.context = self self.geometryPasses.cache = mode.cache try: '''Because we *only* care about the depth buffer, we can mask out the color buffer entirely. We can use frustum-culling to only render those objects which intersect with the light's frustum (this is done automatically by the render-visiting code we use for drawing). Note: The glColorMask call does not prevent OpenGL from ever attempting to write to the color buffer, it just masks regular drawing operations. A call to glClear() for instance, could still clear the colour buffer. ''' if not self.lightViewDebug: glColorMask( 0,0,0,0 ) '''We reconfigure the mode to tell the geometry to optimize its rendering process, for instance by disabling normal generation, and excluding color and texture information.''' self.geometryPasses.lighting = False self.geometryPasses.textured = False self.geometryPasses.visible = False '''==Offset Polygons to avoid Artefacts== We want to avoid depth-buffer artefacts where the front-face appears to be ever-so-slightly behind itself due to multiplication and transformation artefacts. The original tutorial uses rendering of the *back* faces of objects into the depth buffer, but with "open" geometry such as the Utah Teapot, we wind up with nasty artefacts where e.g. the area on the body around the spout isn't shadowed because there's no back-faces in front of it. Even with the original approach, using a polygon offset will tend to avoid "moire" effects in the shadows where precision issues cause the depths in the buffer to pass back and forth across the LEQUAL threshold as they cross the surface of the object. To avoid these problems, we use a polygon-offset operation. The first 1.0 gives us the raw fragment depth-value, the second 1.0, the parameter "units" says to take 1.0 depth-buffer units and add it to the depth-value from the previous step, making the depth buffer record values 1.0 units less than the geometry's natural value. ''' glEnable(GL_POLYGON_OFFSET_FILL) glPolygonOffset(1.0, self.offset) '''Don't render front-faces, so that we avoid moire effects in the rendering of shadows''' glCullFace(GL_FRONT) glEnable( GL_CULL_FACE ) '''And now we draw our scene into the depth-buffer.''' self.drawScene( mode, lightModel ) '''Our closeShadowContext will copy the current depth buffer into our depth texture and deactivate the texture.''' self.closeShadowContext( texture ) '''Return the configured texture into which we will render''' return texture, textureMatrix finally: '''Restore "regular" rendering...''' glDisable(GL_POLYGON_OFFSET_FILL) glShadeModel( GL_SMOOTH ) glCullFace(GL_BACK) glDisable( GL_CULL_FACE ) glColorMask( 1,1,1,1 ) '''Now restore the viewport.''' glPopAttrib()
def renderLightTexture(self, light, mode, direction=None, fov=None, textureKey=None): """Render ourselves into a texture for the given light""" '''We're going to render our scene into the depth buffer, so we'll explicitly specify the depth operation. The use of GL_LEQUAL means that we can rewrite the same geometry to the depth buffer multiple times and (save for floating-point artefacts), should see the geometry render each time. ''' glDepthFunc(GL_LEQUAL) glEnable(GL_DEPTH_TEST) '''Our setupShadowContext method will reset our viewport to match the size of the depth-texture we're creating.''' glPushAttrib(GL_VIEWPORT_BIT) '''We invoke our setupShadowContext method to establish the texture we'll use as our target. This tutorial is just going to reset the viewport to a subset of the back-buffer (the regular rendering target for OpenGL). Later tutorials will set up an off-screen rendering target (a Frame Buffer Object) by overriding this method-call.''' texture = self.setupShadowContext(light, mode) '''==Setup Scene with Light as Camera== The algorithm requires us to set up the scene to render from the point of view of our light. We're going to use a pair of methods on the light to do the calculations. These do the same calculations as "gluPerspective" for the viewMatrix, and a pair of rotation,translation transformations for the model-view matrix. Note: For VRML97 scenegraphs, this wouldn't be sufficient, as we can have multiple lights, and lights can be children of arbitrary Transforms, and can appear multiple times within the same scenegraph. We would have to calculate the matrices for each path that leads to a light, not just for each light. The node-paths have methods to retrieve their matrices, so we would simply dot those matrices with the matrices we retrieve here. The complexity of supporting these features doesn't particularly suit an introductory tutorial. ''' if fov: cutoff = fov / 2.0 else: cutoff = None lightView = light.viewMatrix(cutoff, near=.3, far=30.0) lightModel = light.modelMatrix(direction=direction) '''The texture matrix translates from camera eye-space into light eye-space. See the original tutorial for an explanation of how the mapping is done, and how it interacts with the current projection matrix. Things to observe about the calculation of the matrix compared to the values in the original tutorial: * we are explicitly taking the transpose of the result matrix * the order of operations is the reverse of the calculations in the tutorial * we take the transpose of the matrix so that matrix[0] is a row in the sense that the tutorial uses it This pattern of reversing order-of-operations and taking the transpose happens frequently in PyOpenGL when working with matrix code from C sources. Note: A number of fixes to matrix multiply order came from comparing results with [http://www.geometrian.com/Programs.php Ian Mallett's OpenGL Library v1.4]. ''' lightMatrix = dot(lightModel, lightView) textureMatrix = transpose(dot(lightMatrix, self.BIAS_MATRIX)) '''This is a bit wasteful, as we've already loaded our projection and model-view matrices for our view-platform into the GL. Real-world implementations would normally do the light-rendering pass before doing their world-view setup. We'll restore the platform values later on. ''' glMatrixMode(GL_PROJECTION) glLoadMatrixf(lightView) glMatrixMode(GL_MODELVIEW) glLoadMatrixf(lightModel) '''Our geometryPasses object needs to have the same setup as the mode (another FlatPass instance) which we are processing.''' self.geometryPasses.matrix = lightModel self.geometryPasses.modelView = lightModel self.geometryPasses.projection = lightView self.geometryPasses.viewport = mode.viewport self.geometryPasses.calculateFrustum() self.geometryPasses.context = self self.geometryPasses.cache = mode.cache try: '''Because we *only* care about the depth buffer, we can mask out the color buffer entirely. We can use frustum-culling to only render those objects which intersect with the light's frustum (this is done automatically by the render-visiting code we use for drawing). Note: The glColorMask call does not prevent OpenGL from ever attempting to write to the color buffer, it just masks regular drawing operations. A call to glClear() for instance, could still clear the colour buffer. ''' if not self.lightViewDebug: glColorMask(0, 0, 0, 0) '''We reconfigure the mode to tell the geometry to optimize its rendering process, for instance by disabling normal generation, and excluding color and texture information.''' self.geometryPasses.lighting = False self.geometryPasses.textured = False self.geometryPasses.visible = False '''==Offset Polygons to avoid Artefacts== We want to avoid depth-buffer artefacts where the front-face appears to be ever-so-slightly behind itself due to multiplication and transformation artefacts. The original tutorial uses rendering of the *back* faces of objects into the depth buffer, but with "open" geometry such as the Utah Teapot, we wind up with nasty artefacts where e.g. the area on the body around the spout isn't shadowed because there's no back-faces in front of it. Even with the original approach, using a polygon offset will tend to avoid "moire" effects in the shadows where precision issues cause the depths in the buffer to pass back and forth across the LEQUAL threshold as they cross the surface of the object. To avoid these problems, we use a polygon-offset operation. The first 1.0 gives us the raw fragment depth-value, the second 1.0, the parameter "units" says to take 1.0 depth-buffer units and add it to the depth-value from the previous step, making the depth buffer record values 1.0 units less than the geometry's natural value. ''' glEnable(GL_POLYGON_OFFSET_FILL) glPolygonOffset(1.0, self.offset) '''Don't render front-faces, so that we avoid moire effects in the rendering of shadows''' glCullFace(GL_FRONT) glEnable(GL_CULL_FACE) '''And now we draw our scene into the depth-buffer.''' self.drawScene(mode, lightModel) '''Our closeShadowContext will copy the current depth buffer into our depth texture and deactivate the texture.''' self.closeShadowContext(texture) '''Return the configured texture into which we will render''' return texture, textureMatrix finally: '''Restore "regular" rendering...''' glDisable(GL_POLYGON_OFFSET_FILL) glShadeModel(GL_SMOOTH) glCullFace(GL_BACK) glDisable(GL_CULL_FACE) glColorMask(1, 1, 1, 1) '''Now restore the viewport.''' glPopAttrib()
def get_modelproj( shader, mode ): return dot( mode.matrix, mode.projection )
def get_inv_modelview( shader, mode ): return dot( mode.viewPlatform.modelMatrix(inverse=True), mode.renderPath.transformMatrix( inverse=True ) )
def get_inv_modelproj(shader, mode): mv = get_inv_modelview(shader, mode) proj = get_inv_projection(shader, mode) return dot(proj, mv)
def get_inv_modelview(shader, mode): return dot(mode.viewPlatform.modelMatrix(inverse=True), mode.renderPath.transformMatrix(inverse=True))
def get_modelproj(shader, mode): return dot(mode.matrix, mode.projection)
def get_inv_modelproj( shader, mode ): mv = get_inv_modelview( shader, mode ) proj = get_inv_projection( shader, mode ) return dot( proj, mv )