Esempio n. 1
0
def main(argv):

    
    app = QApplication(argc, argv)

    # prepare scene.
    center = osg.Vec3(0.0,0.0,0.0)
    radius = 1.0
    
    # create the hud.
    camera = osg.Camera()
    camera.setReferenceFrame(osg.Transform.ABSOLUTE_RF)
    camera.setProjectionMatrixAsOrtho2D(0,1280,0,1024)
    camera.setViewMatrix(osg.Matrix.identity())
    camera.setClearMask(GL_DEPTH_BUFFER_BIT)
    camera.addChild(createHUDText())
    camera.getOrCreateStateSet().setMode(GL_LIGHTING,osg.StateAttribute.OFF)
    
    # make sure the root node is group so we can add extra nodes to it.
    group = osg.Group()
    group.addChild(camera)
    group.addChild(create3DText(center, radius))

    # The qt window
    widget = MainWindow()
    
    # set the scene to render
    widget.setSceneData(group)
    widget.setCameraManipulator(osgGA.TrackballManipulator)()

    widget.setGeometry(100, 100, 800, 600)
    widget.show()

    return app.exec()
Esempio n. 2
0
class DepthPeeling (osg.Referenced) :

    DepthPeeling(unsigned int width, unsigned int height)
    setSolidScene = void(osg.Node* scene)
    setTransparentScene = void(osg.Node* scene)
    getRoot = osg.Node*()
    resize = void(int width, int height)
    setNumPasses = void(unsigned int numPasses)
    unsigned int getNumPasses() 
    setTexUnit = void(unsigned int texUnit)
    setShowAllLayers = void(bool showAllLayers)
    bool getShowAllLayers() 
    setOffsetValue = void(unsigned int offsetValue)
    unsigned int getOffsetValue() 

    static  char *PeelingShader # use this to support depth peeling in GLSL shaders in transparent objects 

    class EventHandler (osgGA.GUIEventHandler) :
        EventHandler(DepthPeeling* depthPeeling)

        #* Handle events, return True if handled, False otherwise. 
        handle = virtual bool( osgGA.GUIEventAdapter ea, osgGA.GUIActionAdapter, osg.Object*, osg.NodeVisitor*)
        _depthPeeling = DepthPeeling()
    
    *createQuad = osg.Node(unsigned int layerNumber, unsigned int numTiles)
    createPeeling = void()

    class CullCallback (osg.NodeCallback) :
        CullCallback(unsigned int texUnit, unsigned int texWidth, unsigned int texHeight, unsigned int offsetValue)
        virtual void operator()(osg.Node* node, osg.NodeVisitor* nv)
        _texUnit = unsigned int()
        _texWidth = unsigned int()
        _texHeight = unsigned int()
        _offsetValue = unsigned int()
    

    _numPasses = unsigned int()
    _texUnit = unsigned int()
    _texWidth = unsigned int()
    _texHeight = unsigned int()
    _showAllLayers = bool()
    _offsetValue = unsigned int()

    # The root node that is handed over to the viewer
    _root = osg.Group()

    # The scene that is displayed
    _solidscene = osg.Group()
    _transparentscene = osg.Group()

    # The final camera that composites the pre rendered textures to the final picture
    _compositeCamera = osg.Camera()

#ifdef USE_TEXTURE_RECTANGLE
    _depthTextures = std.vector<osg.TextureRectangle >()
    _colorTextures = std.vector<osg.TextureRectangle >()
#else:
    _depthTextures = std.vector<osg.Texture2D >()
    _colorTextures = std.vector<osg.Texture2D >()
Esempio n. 3
0
def createShadowedScene(reflectedSubgraph, reflectorNodePath, unit, clearColor, tex_width, tex_height, renderImplementation):


    

    group = osg.Group()
    
    texture = osg.TextureCubeMap()
    texture.setTextureSize(tex_width, tex_height)

    texture.setInternalFormat(GL_RGB)
    texture.setWrap(osg.Texture.WRAP_S, osg.Texture.CLAMP_TO_EDGE)
    texture.setWrap(osg.Texture.WRAP_T, osg.Texture.CLAMP_TO_EDGE)
    texture.setWrap(osg.Texture.WRAP_R, osg.Texture.CLAMP_TO_EDGE)
    texture.setFilter(osg.TextureCubeMap.MIN_FILTER,osg.TextureCubeMap.LINEAR)
    texture.setFilter(osg.TextureCubeMap.MAG_FILTER,osg.TextureCubeMap.LINEAR)
    
    # set up the render to texture cameras.
    Cameras = UpdateCameraAndTexGenCallback.CameraList()
    for(unsigned int i=0 i<6 ++i)
        # create the camera
        camera = osg.Camera()

        camera.setClearMask(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        camera.setClearColor(clearColor)

        # set viewport
        camera.setViewport(0,0,tex_width,tex_height)

        # set the camera to render before the main camera.
        camera.setRenderOrder(osg.Camera.PRE_RENDER)

        # tell the camera to use OpenGL frame buffer object where supported.
        camera.setRenderTargetImplementation(renderImplementation)

        # attach the texture and use it as the color buffer.
        camera.attach(osg.Camera.COLOR_BUFFER, texture, 0, i)

        # add subgraph to render
        camera.addChild(reflectedSubgraph)
        
        group.addChild(camera)
        
        Cameras.push_back(camera)
Esempio n. 4
0
def createHUD(label):

    
    # create a camera to set up the projection and model view matrices, and the subgraph to drawn in the HUD
    camera = osg.Camera()

    # set the projection matrix
    camera.setProjectionMatrix(osg.Matrix.ortho2D(0,1280,0,1024))

    # set the view matrix    
    camera.setReferenceFrame(osg.Transform.ABSOLUTE_RF)
    camera.setViewMatrix(osg.Matrix.identity())

    # only clear the depth buffer
    camera.setClearMask(GL_DEPTH_BUFFER_BIT)

    # draw subgraph after main camera view.
    camera.setRenderOrder(osg.Camera.POST_RENDER)

    # we don't want the camera to grab event focus from the viewers main camera(s).
    camera.setAllowEventFocus(False)

    # add to this camera a subgraph to render

        geode = osg.Geode()

        font = str("fonts/arial.ttf")

        # turn lighting off for the text and disable depth test to ensure its always ontop.
        stateset = geode.getOrCreateStateSet()
        stateset.setMode(GL_LIGHTING,osg.StateAttribute.OFF)

        position = osg.Vec3(150.0,150.0,0.0)

        text = osgText.Text()
        geode.addDrawable( text )

        text.setFont(font)
        text.setPosition(position)
        text.setCharacterSize(100.0)
        text.setText(label)

        camera.addChild(geode)
Esempio n. 5
0
def createOrthoCamera(width, height):

    
    camera = osg.Camera()

    camera.getOrCreateStateSet().setMode(
        GL_LIGHTING,
        osg.StateAttribute.PROTECTED | osg.StateAttribute.OFF
    )

    m = osg.Matrix.ortho2D(0.0, width, 0.0, height)

    camera.setProjectionMatrix(m)
    camera.setReferenceFrame(osg.Transform.ABSOLUTE_RF)
    camera.setViewMatrix(osg.Matrix.identity())
    camera.setClearMask(GL_DEPTH_BUFFER_BIT)
    camera.setRenderOrder(osg.Camera.POST_RENDER)

    return camera
Esempio n. 6
0
def createHUD(w, h):


    
    # create a camera to set up the projection and model view matrices, and the subgraph to draw in the HUD
    camera = osg.Camera()

    # set the projection matrix
    camera.setProjectionMatrix(osg.Matrix.ortho2D(0,w,0,h))

    # set the view matrix    
    camera.setReferenceFrame(osg.Transform.ABSOLUTE_RF)
    camera.setViewMatrix(osg.Matrix.identity())

    # only clear the depth buffer
    camera.setClearMask(GL_DEPTH_BUFFER_BIT)

    # draw subgraph after main camera view.
    camera.setRenderOrder(osg.Camera.POST_RENDER)

    # we don't want the camera to grab event focus from the viewers main camera(s).
    camera.setAllowEventFocus(False)
    


    # add to this camera a subgraph to render

        geode = osg.Geode()

        timesFont = str("fonts/arial.ttf")

        # turn lighting off for the text and disable depth test to ensure it's always ontop.
        stateset = geode.getOrCreateStateSet()
        stateset.setMode(GL_LIGHTING,osg.StateAttribute.OFF)

        position = osg.Vec3(50.0,h-50,0.0)

            text = osgText.Text()
            geode.addDrawable( text )

            text.setFont(timesFont)
            text.setPosition(position)
            text.setText("A simple multi-touch-example\n1 touch = rotate, \n2 touches = drag + scale, \n3 touches = home")
Esempio n. 7
0
def createHUD(updateText):

    

    # create the hud. derived from osgHud.cpp
    # adds a set of quads, each in a separate Geode - which can be picked individually
    # eg to be used as a menuing/help system not 
    # Can pick texts too not 

    hudCamera = osg.Camera()
    hudCamera.setReferenceFrame(osg.Transform.ABSOLUTE_RF)
    hudCamera.setProjectionMatrixAsOrtho2D(0,1280,0,1024)
    hudCamera.setViewMatrix(osg.Matrix.identity())
    hudCamera.setRenderOrder(osg.Camera.POST_RENDER)
    hudCamera.setClearMask(GL_DEPTH_BUFFER_BIT)

    timesFont = str("fonts/times.ttf")

    # turn lighting off for the text and disable depth test to ensure its always ontop.
    position = osg.Vec3(150.0,800.0,0.0)
    delta = osg.Vec3(0.0,-60.0,0.0)

        geode = osg.Geode()
        stateset = geode.getOrCreateStateSet()
        stateset.setMode(GL_LIGHTING,osg.StateAttribute.OFF)
        stateset.setMode(GL_DEPTH_TEST,osg.StateAttribute.OFF)
        geode.setName("simple")
        hudCamera.addChild(geode)

        text = osgText.Text()
        geode.addDrawable( text )

        text.setFont(timesFont)
        text.setText("Picking in Head Up Displays is simple not ")
        text.setPosition(position)

        position += delta
Esempio n. 8
0
     int width( 800 ), height( 450 )
    version =  str( "3.1" )
     traits = osg.GraphicsContext.Traits()
    traits.x = 20 traits.y = 30
    traits.width = width traits.height = height
    traits.windowDecoration = True
    traits.doubleBuffer = True
    traits.glContextVersion = version
     gc = osg.GraphicsContext.createGraphicsContext( traits )
    if   not gc.valid()  :
        osg.notify( osg.FATAL ), "Unable to create OpenGL v", version, " context."
        return( 1 )

    # Create a Camera that uses the above OpenGL context.
    cam = osg.Camera()
    cam.setGraphicsContext( gc )
    # Must set perspective projection for fovy and aspect.
    cam.setProjectionMatrix( osg.Matrix.perspective( 30., (double)width/(double)height, 1., 100. ) )
    # Unlike OpenGL, OSG viewport does *not* default to window dimensions.
    cam.setViewport( osg.Viewport( 0, 0, width, height ) )

    viewer = osgViewer.Viewer()
    viewer.setCamera( cam )
    viewer.setSceneData( root )

    # for non GL3/GL4 and non GLES2 platforms we need enable the osg_ uniforms that the shaders will use,
    # you don't need thse two lines on GL3/GL4 and GLES2 specific builds as these will be enable by default.
    gc.getState().setUseModelViewAndProjectionUniforms(True)
    gc.getState().setUseVertexAttributeAliasing(True)
Esempio n. 9
0
def main(argv):


    
    # Qt requires that we construct the global QApplication before creating any widgets.
    app = QApplication(argc, argv)

    # use an ArgumentParser object to manage the program arguments.
    arguments = osg.ArgumentParser(argv)

    # True = run osgViewer in a separate thread than Qt
    # False = interleave osgViewer and Qt in the main thread
    useFrameLoopThread = False
    if arguments.read("--no-frame-thread") : useFrameLoopThread = False
    if arguments.read("--frame-thread") : useFrameLoopThread = True

    # True = use QWidgetImage
    # False = use QWebViewImage
    useWidgetImage = False
    if arguments.read("--useWidgetImage") : useWidgetImage = True

    # True = use QWebView in a QWidgetImage to compare to QWebViewImage
    # False = make an interesting widget
    useBrowser = False
    if arguments.read("--useBrowser") : useBrowser = True

    # True = use a QLabel for text
    # False = use a QTextEdit for text
    # (only applies if useWidgetImage == True and useBrowser == False)
    useLabel = False
    if arguments.read("--useLabel") : useLabel = True

    # True = make a Qt window with the same content to compare to 
    # QWebViewImage/QWidgetImage
    # False = use QWebViewImage/QWidgetImage (depending on useWidgetImage)
    sanityCheck = False
    if arguments.read("--sanityCheck") : sanityCheck = True

    # Add n floating windows inside the QGraphicsScene.
    numFloatingWindows = 0
    while arguments.read("--numFloatingWindows", numFloatingWindows) :

    # True = Qt widgets will be displayed on a quad inside the 3D scene
    # False = Qt widgets will be an overlay over the scene (like a HUD)
    inScene = True
    if arguments.read("--fullscreen") :  inScene = False 


    root = osg.Group()

    if  not useWidgetImage :
        #-------------------------------------------------------------------
        # QWebViewImage test
        #-------------------------------------------------------------------
        # Note: When the last few issues with QWidgetImage are fixed, 
        # QWebViewImage and this if  :  section can be removed since 
        # QWidgetImage can display a QWebView just like QWebViewImage. Use 
        # --useWidgetImage --useBrowser to see that in action.

        if  not sanityCheck :
            image = osgQt.QWebViewImage()

            if arguments.argc()>1 : image.navigateTo((arguments[1]))
            else image.navigateTo("http:#www.youtube.com/")

            hints = osgWidget.GeometryHints(osg.Vec3(0.0,0.0,0.0),
                                           osg.Vec3(1.0,0.0,0.0),
                                           osg.Vec3(0.0,0.0,1.0),
                                           osg.Vec4(1.0,1.0,1.0,1.0),
                                           osgWidget.GeometryHints.RESIZE_HEIGHT_TO_MAINTAINCE_ASPECT_RATIO)

            browser = osgWidget.Browser()
            browser.assign(image, hints)

            root.addChild(browser)
        else:
            # Sanity check, do the same thing as QGraphicsViewAdapter but in 
            # a separate Qt window.
            webPage = QWebPage()
            webPage.settings().setAttribute(QWebSettings.JavascriptEnabled, True)
            webPage.settings().setAttribute(QWebSettings.PluginsEnabled, True)

            webView = QWebView()
            webView.setPage(webPage)

            if arguments.argc()>1 : webView.load(QUrl(arguments[1]))
            else webView.load(QUrl("http:#www.youtube.com/"))

            graphicsScene = QGraphicsScene()
            graphicsScene.addWidget(webView)

            graphicsView = QGraphicsView()
            graphicsView.setScene(graphicsScene)

            mainWindow = QMainWindow()
            #mainWindow.setLayout(QVBoxLayout)()
            mainWindow.setCentralWidget(graphicsView)
            mainWindow.setGeometry(50, 50, 1024, 768)
            mainWindow.show()
            mainWindow.raise()
    else:
        #-------------------------------------------------------------------
        # QWidgetImage test
        #-------------------------------------------------------------------
        # QWidgetImage still has some issues, some examples are:
        # 
        # 1. Editing in the QTextEdit doesn't work. Also when started with 
        #    --useBrowser, editing in the search field on YouTube doesn't 
        #    work. But that same search field when using QWebViewImage 
        #    works... And editing in the text field in the pop-up getInteger 
        #    dialog works too. All these cases use QGraphicsViewAdapter 
        #    under the hood, so why do some work and others don't?
        #
        #    a) osgQtBrowser --useWidgetImage [--fullscreen] (optional)
        #    b) Try to click in the QTextEdit and type, or to select text
        #       and drag-and-drop it somewhere else in the QTextEdit. These
        #       don't work.
        #    c) osgQtBrowser --useWidgetImage --sanityCheck
        #    d) Try the operations in b), they all work.
        #    e) osgQtBrowser --useWidgetImage --useBrowser [--fullscreen]
        #    f) Try to click in the search field and type, it doesn't work.
        #    g) osgQtBrowser
        #    h) Try the operation in f), it works.
        #
        # 2. Operations on floating windows (--numFloatingWindows 1 or more). 
        #    Moving by dragging the titlebar, clicking the close button, 
        #    resizing them, none of these work. I wonder if it's because the 
        #    OS manages those functions (they're functions of the window 
        #    decorations) so we need to do something special for that? But 
        #    in --sanityCheck mode they work.
        #
        #    a) osgQtBrowser --useWidgetImage --numFloatingWindows 1 [--fullscreen]
        #    b) Try to drag the floating window, click the close button, or
        #       drag its sides to resize it. None of these work.
        #    c) osgQtBrowser --useWidgetImage --numFloatingWindows 1 --sanityCheck
        #    d) Try the operations in b), all they work.
        #    e) osgQtBrowser --useWidgetImage [--fullscreen]
        #    f) Click the button so that the getInteger() dialog is 
        #       displayed, then try to move that dialog or close it with the 
        #       close button, these don't work.
        #    g) osgQtBrowser --useWidgetImage --sanityCheck
        #    h) Try the operation in f), it works.
        #
        # 3. (Minor) The QGraphicsView's scrollbars don't appear when 
        #    using QWidgetImage or QWebViewImage. QGraphicsView is a 
        #    QAbstractScrollArea and it should display scrollbars as soon as
        #    the scene is too large to fit the view.
        #
        #    a) osgQtBrowser --useWidgetImage --fullscreen
        #    b) Resize the OSG window so it's smaller than the QTextEdit.
        #       Scrollbars should appear but don't.
        #    c) osgQtBrowser --useWidgetImage --sanityCheck
        #    d) Try the operation in b), scrollbars appear. Even if you have 
        #       floating windows (by clicking the button or by adding 
        #       --numFloatingWindows 1) and move them outside the view, 
        #       scrollbars appear too. You can't test that case in OSG for 
        #       now because of problem 2 above, but that's pretty cool.
        #
        # 4. (Minor) In sanity check mode, the widget added to the 
        #    QGraphicsView is centered. With QGraphicsViewAdapter, it is not.
        #
        #    a) osgQtBrowser --useWidgetImage [--fullscreen]
        #    b) The QTextEdit and button are not in the center of the image
        #       generated by the QGraphicsViewAdapter.
        #    c) osgQtBrowser --useWidgetImage --sanityCheck
        #    d) The QTextEdit and button are in the center of the 
        #       QGraphicsView.


        widget = 0
        if useBrowser :
            webPage = QWebPage()
            webPage.settings().setAttribute(QWebSettings.JavascriptEnabled, True)
            webPage.settings().setAttribute(QWebSettings.PluginsEnabled, True)

            webView = QWebView()
            webView.setPage(webPage)

            if arguments.argc()>1 : webView.load(QUrl(arguments[1]))
            else webView.load(QUrl("http:#www.youtube.com/"))

            widget = webView
        else:
            widget = QWidget()
            widget.setLayout(QVBoxLayout)()

            text = QString("Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque velit turpis, euismod ac ultrices et, molestie non nisi. Nullam egestas dignissim enim, quis placerat nulla suscipit sed. Donec molestie elementum risus sit amet sodales. Nunc consectetur congue neque, at viverra massa pharetra fringilla. Integer vitae mi sem. Donec dapibus semper elit nec sollicitudin. Vivamus egestas ultricies felis, in mollis mi facilisis quis. Nam suscipit bibendum eros sed cursus. Suspendisse mollis suscipit hendrerit. Etiam magna eros, convallis non congue vel, faucibus ac augue. Integer ante ante, porta in ornare ullamcorper, congue nec nibh. Etiam congue enim vitae enim sollicitudin fringilla. Mauris mattis, urna in fringilla dapibus, ipsum sem feugiat purus, ac hendrerit felis arcu sed sapien. Integer id velit quam, sit amet dignissim tortor. Sed mi tortor, placerat ac luctus id, tincidunt et urna. Nulla sed nunc ante.Sed ut sodales enim. Ut sollicitudin ultricies magna, vel ultricies ante venenatis id. Cras luctus mi in lectus rhoncus malesuada. Sed ac sollicitudin nisi. Nunc venenatis congue quam, et suscipit diam consectetur id. Donec vel enim ac enim elementum bibendum ut quis augue. Nulla posuere suscipit dolor, id convallis tortor congue eu. Vivamus sagittis consectetur dictum. Duis a ante quis dui varius fermentum. In hac habitasse platea dictumst. Nam dapibus dolor eu felis eleifend in scelerisque dolor ultrices. Donec arcu lectus, fringilla ut interdum non, tristique id dolor. Morbi sagittis sagittis volutpat. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Duis venenatis ultrices euismod.Nam sit amet convallis libero. Integer lectus urna, eleifend et sollicitudin non, porttitor vel erat. Vestibulum pulvinar egestas leo, a porttitor turpis ullamcorper et. Vestibulum in ornare turpis. Ut nec libero a sem mattis iaculis quis id purus. Praesent ante neque, dictum vitae pretium vel, iaculis luctus dui. Etiam luctus tellus vel nunc suscipit a ullamcorper nisl semper. Nunc dapibus, eros in sodales dignissim, orci lectus egestas felis, sit amet vehicula tortor dolor eu quam. Vivamus pellentesque convallis quam aliquet pellentesque. Phasellus facilisis arcu ac orci fringilla aliquet. Donec sed euismod augue. Duis eget orci sit amet neque tempor fringilla. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae In hac habitasse platea dictumst. Duis sollicitudin, lacus ac pellentesque lacinia, lacus magna pulvinar purus, pulvinar porttitor est nibh quis augue.Duis eleifend, massa sit amet mattis fringilla, elit turpis venenatis libero, sed convallis turpis diam sit amet ligula. Morbi non dictum turpis. Integer porttitor condimentum elit, sit amet sagittis nibh ultrices sit amet. Mauris ac arcu augue, id aliquet mauris. Donec ultricies urna id enim accumsan at pharetra dui adipiscing. Nunc luctus rutrum molestie. Curabitur libero ipsum, viverra at pulvinar ut, porttitor et neque. Aliquam sit amet dolor et purus sagittis adipiscing. Nam sit amet hendrerit sem. Etiam varius, ligula non ultricies dignissim, sapien dui commodo urna, eu vehicula enim nunc molestie augue. Fusce euismod, erat vitae pharetra tempor, quam eros tincidunt lorem, ut iaculis ligula erat vitae nibh. Aenean eu ultricies dolor. Curabitur suscipit viverra bibendum.Sed egestas adipiscing mi in egestas. Proin in neque in nibh blandit consequat nec quis tortor. Vestibulum sed interdum justo. Sed volutpat velit vitae elit pulvinar aliquam egestas elit rutrum. Proin lorem nibh, bibendum vitae sollicitudin condimentum, pulvinar ut turpis. Maecenas iaculis, mauris in consequat ultrices, ante erat blandit mi, vel fermentum lorem turpis eget sem. Integer ultrices tristique erat sit amet volutpat. In sit amet diam et nunc congue pellentesque at in dolor. Mauris eget orci orci. Integer posuere augue ornare tortor tempus elementum. Quisque iaculis, nunc ac cursus fringilla, magna elit cursus eros, id feugiat diam eros et tellus. Etiam consectetur ultrices erat quis rhoncus. Mauris eu lacinia neque. Curabitur suscipit feugiat tellus in dictum. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Sed aliquam tempus ante a tempor. Praesent viverra erat quis sapien pretium rutrum. Praesent dictum scelerisque venenatis.Proin bibendum lectus eget nisl lacinia porta. Morbi eu erat in sapien malesuada vulputate. Cras non elit quam. Ut dictum urna quis nisl feugiat ac sollicitudin libero luctus. Donec leo mauris, varius at luctus eget, placerat quis arcu. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae Etiam tristique, mauris ut lacinia elementum, mauris erat consequat massa, ac gravida nisi tellus vitae purus. Curabitur consectetur ultricies commodo. Cras pulvinar orci nec enim adipiscing tristique. Ut ornare orci id est fringilla sit amet blandit libero pellentesque. Vestibulum tincidunt sapien ut enim venenatis vestibulum ultricies ipsum tristique. Mauris tempus eleifend varius. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse vitae dui ac quam gravida semper. In ac enim ac ligula rutrum porttitor.Integer dictum sagittis leo, at convallis sapien facilisis eget. Etiam cursus bibendum tortor, faucibus aliquam lectus ullamcorper sed. Nulla pulvinar posuere quam, ut sagittis ligula tincidunt ut. Nulla convallis velit ut enim condimentum pulvinar. Quisque gravida accumsan scelerisque. Proin pellentesque nisi cursus tortor aliquet dapibus. Duis vel eros orci. Sed eget purus ligula. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec ullamcorper porta congue. Nunc id velit ut neque malesuada consequat in eu nisi. Nulla facilisi. Quisque pellentesque magna vitae nisl euismod ac accumsan tellus feugiat.Nulla facilisi. Integer quis orci lectus, non aliquam nisi. Vivamus varius porta est, ac porttitor orci blandit mattis. Sed dapibus facilisis dapibus. Duis tincidunt leo ac tortor faucibus hendrerit. Morbi sit amet sapien risus, vel luctus enim. Aliquam sagittis nunc id purus aliquam lobortis. Duis posuere viverra dui, sit amet convallis sem vulputate at. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque pellentesque, lectus id imperdiet commodo, diam diam faucibus lectus, sit amet vestibulum tortor lacus viverra eros.Maecenas nec augue lectus. Duis nec arcu eget lorem tempus sollicitudin suscipit vitae arcu. Nullam vitae mauris lectus. Vivamus id risus neque, dignissim vehicula diam. Cras rhoncus velit sed velit iaculis ac dignissim turpis luctus. Suspendisse potenti. Sed vitae ligula a ligula ornare rutrum sit amet ut quam. Duis tincidunt, nibh vitae iaculis adipiscing, dolor orci cursus arcu, vel congue tortor quam eget arcu. Suspendisse tellus felis, blandit ac accumsan vitae, fringilla id lorem. Duis tempor lorem mollis est congue ut imperdiet velit laoreet. Nullam interdum cursus mollis. Pellentesque non mauris accumsan elit laoreet viverra ut at risus. Proin rutrum sollicitudin sem, vitae ultricies augue sagittis vel. Cras quis vehicula neque. Aliquam erat volutpat. Aliquam erat volutpat. Praesent non est erat, accumsan rutrum lacus. Pellentesque tristique molestie aliquet. Cras ullamcorper facilisis faucibus. In non lorem quis velit lobortis pulvinar.Phasellus non sem ipsum. Praesent ut libero quis turpis viverra semper. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. In hac habitasse platea dictumst. Donec at velit tellus. Fusce commodo pharetra tincidunt. Proin lacus enim, fringilla a fermentum ut, vestibulum ut nibh. Duis commodo dolor vel felis vehicula at egestas neque bibendum. Phasellus malesuada dictum ante in aliquam. Curabitur interdum semper urna, nec placerat justo gravida in. Praesent quis mauris massa. Pellentesque porttitor lacinia tincidunt. Phasellus egestas viverra elit vel blandit. Sed dapibus nisi et lectus pharetra dignissim. Mauris hendrerit lectus nec purus dapibus condimentum. Sed ac eros nulla. Aenean semper sapien a nibh aliquam lobortis. Aliquam elementum euismod sapien, in dapibus leo dictum et. Pellentesque augue neque, ultricies non viverra eu, tincidunt ac arcu. Morbi ut porttitor lectus.")

            if useLabel :
                label = QLabel(text)
                label.setWordWrap(True)
                label.setTextInteractionFlags(Qt.TextEditorInteraction)

                palette = label.palette()
                palette.setColor(QPalette.Highlight, Qt.darkBlue)
                palette.setColor(QPalette.HighlightedText, Qt.white)
                label.setPalette(palette)

                scrollArea = QScrollArea()
                scrollArea.setWidget(label)

                widget.layout().addWidget(scrollArea)
            else:
                textEdit = QTextEdit(text)
                textEdit.setReadOnly(False)
                textEdit.setTextInteractionFlags(Qt.TextEditable)

                palette = textEdit.palette()
                palette.setColor(QPalette.Highlight, Qt.darkBlue)
                palette.setColor(QPalette.HighlightedText, Qt.white)
                textEdit.setPalette(palette)

                widget.layout().addWidget(textEdit)

            button = MyPushButton("Button")
            widget.layout().addWidget(button)

            widget.setGeometry(0, 0, 800, 600)

        graphicsScene = 0

        if  not sanityCheck :
            widgetImage = osgQt.QWidgetImage(widget)
#if QT_VERSION >= QT_VERSION_CHECK(4, 5, 0) :
            widgetImage.getQWidget().setAttribute(Qt.WA_TranslucentBackground)
#endif
            widgetImage.getQGraphicsViewAdapter().setBackgroundColor(QColor(0, 0, 0, 0))
            #widgetImage.getQGraphicsViewAdapter().resize(800, 600)
            graphicsScene = widgetImage.getQGraphicsViewAdapter().getQGraphicsScene()

            camera = 0        # Will stay NULL in the inScene case.
            quad = osg.createTexturedQuadGeometry(osg.Vec3(0,0,0), osg.Vec3(1,0,0), osg.Vec3(0,1,0), 1, 1)
            geode = osg.Geode()
            geode.addDrawable(quad)

            mt = osg.MatrixTransform()

            texture = osg.Texture2D(widgetImage)
            texture.setResizeNonPowerOfTwoHint(False)
            texture.setFilter(osg.Texture.MIN_FILTER,osg.Texture.LINEAR)
            texture.setWrap(osg.Texture.WRAP_S, osg.Texture.CLAMP_TO_EDGE)
            texture.setWrap(osg.Texture.WRAP_T, osg.Texture.CLAMP_TO_EDGE)
            mt.getOrCreateStateSet().setTextureAttributeAndModes(0, texture, osg.StateAttribute.ON)

            handler = osgViewer.InteractiveImageHandler*() 
            if inScene :
                mt.setMatrix(osg.Matrix.rotate(osg.Vec3(0,1,0), osg.Vec3(0,0,1)))
                mt.addChild(geode)

                handler = osgViewer.InteractiveImageHandler(widgetImage)
            else    # fullscreen
                # The HUD camera's viewport needs to follow the size of the 
                # window. MyInteractiveImageHandler will make sure of this.
                # As for the quad and the camera's projection, setting the 
                # projection resize policy to FIXED takes care of them, so
                # they can stay the same: (0,1,0,1) with a quad that fits.

                # Set the HUD camera's projection and viewport to match the screen.
                camera = osg.Camera()
                camera.setProjectionResizePolicy(osg.Camera.FIXED)
                camera.setProjectionMatrix(osg.Matrix.ortho2D(0,1,0,1))
                camera.setReferenceFrame(osg.Transform.ABSOLUTE_RF)
                camera.setViewMatrix(osg.Matrix.identity())
                camera.setClearMask(GL_DEPTH_BUFFER_BIT)
                camera.setRenderOrder(osg.Camera.POST_RENDER)
                camera.addChild(geode)
                camera.setViewport(0, 0, 1024, 768)

                mt.addChild(camera)

                handler = osgViewer.InteractiveImageHandler(widgetImage, texture, camera)

            mt.getOrCreateStateSet().setMode(GL_LIGHTING, osg.StateAttribute.OFF)
            mt.getOrCreateStateSet().setMode(GL_BLEND, osg.StateAttribute.ON)
            mt.getOrCreateStateSet().setRenderingHint(osg.StateSet.TRANSPARENT_BIN)
            mt.getOrCreateStateSet().setAttribute(osg.Program)()

            overlay = osg.Group()
            overlay.addChild(mt)

            root.addChild(overlay)
            
            quad.setEventCallback(handler)
            quad.setCullCallback(handler)
        else:
            # Sanity check, do the same thing as QWidgetImage and 
            # QGraphicsViewAdapter but in a separate Qt window.

            graphicsScene = QGraphicsScene()
            graphicsScene.addWidget(widget)

            graphicsView = QGraphicsView()
            graphicsView.setScene(graphicsScene)

            mainWindow = QMainWindow()
            mainWindow.setCentralWidget(graphicsView)
            mainWindow.setGeometry(50, 50, 1024, 768)
            mainWindow.show()
            mainWindow.raise()

        # Add numFloatingWindows windows to the graphicsScene.
        for (unsigned int i = 0 i < (unsigned int)numFloatingWindows ++i)
            window = QWidget(0, Qt.Window)
            window.setWindowTitle(QString("Window %1").arg(i))
            window.setLayout(QVBoxLayout)()
            window.layout().addWidget(QLabel(QString("This window %1").arg(i)))
            window.layout().addWidget(MyPushButton(QString("Button in window %1").arg(i)))
            window.setGeometry(100, 100, 300, 300)

            proxy = QGraphicsProxyWidget(0, Qt.Window)
            proxy.setWidget(window)
            proxy.setFlag(QGraphicsItem.ItemIsMovable, True)

            graphicsScene.addItem(proxy)


    root.addChild(osgDB.readNodeFile("cow.osg.(15,0,5).trans.(0.1,0.1,0.1).scale"))

    viewer = osgViewer.Viewer(arguments)
    viewer.setSceneData(root)
    viewer.setCameraManipulator(osgGA.TrackballManipulator())
    viewer.addEventHandler(osgGA.StateSetManipulator(root.getOrCreateStateSet()))
    viewer.addEventHandler(osgViewer.StatsHandler)()
    viewer.addEventHandler(osgViewer.WindowSizeHandler)()

    viewer.setUpViewInWindow(50, 50, 1024, 768)
    viewer.getEventQueue().windowResize(0, 0, 1024, 768)

    if useFrameLoopThread :
        # create a thread to run the viewer's frame loop
        viewerThread = ViewerFrameThread(viewer, True)
        viewerThread.startThread()

        # now start the standard Qt event loop, then exists when the viewerThead sends the QApplication.exit() signal.
        return QApplication.exec()

    else:
        # run the frame loop, interleaving Qt and the main OSG frame loop
        while  not viewer.done() :
            # process Qt events - this handles both events and paints the browser image
            QCoreApplication.processEvents(QEventLoop.AllEvents, 100)

            viewer.frame()

        return 0
Esempio n. 10
0
def main(argv):

    
    # use an ArgumentParser object to manage the program arguments.
    arguments = osg.ArgumentParser(argv)

    # read the scene from the list of file specified commandline args.
    scene = osgDB.readNodeFiles(arguments)

    if  not scene  and  arguments.read("--relative-camera-scene") :
        # Create a test scene with a camera that has a relative reference frame.
        group = osg.Group()

        sphere = osg.Geode()
        sphere.setName("Sphere")
        sphere.addDrawable(osg.ShapeDrawable(osg.Sphere()))

        cube = osg.Geode()
        cube.setName("Cube")
        cube.addDrawable(osg.ShapeDrawable(osg.Box()))

        camera = osg.Camera()
        camera.setRenderOrder(osg.Camera.POST_RENDER)
        camera.setClearMask(GL_DEPTH_BUFFER_BIT)
        camera.setReferenceFrame(osg.Transform.RELATIVE_RF)
        camera.setViewMatrix(osg.Matrix.translate(-2, 0, 0))

        xform = osg.MatrixTransform(osg.Matrix.translate(1, 1, 1))
        xform.addChild(camera)

        group.addChild(sphere)
        group.addChild(xform)
        camera.addChild(cube)

        scene = group

    # if not loaded assume no arguments passed in, try use default mode instead.
    if  not scene : scene = osgDB.readNodeFile("fountain.osgt")

    group = dynamic_cast<osg.Group*>(scene)
    if  not group :
        group = osg.Group()
        group.addChild(scene)

    updateText = osgText.Text()

    # add the HUD subgraph.
    group.addChild(createHUD(updateText))

    if arguments.read("--CompositeViewer") :
        view = osgViewer.View()
        # add the handler for doing the picking
        view.addEventHandler(PickHandler(updateText))

        # set the scene to render
        view.setSceneData(group)

        view.setUpViewAcrossAllScreens()

        viewer = osgViewer.CompositeViewer()
        viewer.addView(view)

        return viewer.run()

    else:
        viewer = osgViewer.Viewer()


        # add all the camera manipulators
            keyswitchManipulator = osgGA.KeySwitchMatrixManipulator()

            keyswitchManipulator.addMatrixManipulator( ord("1"), "Trackball", osgGA.TrackballManipulator() )
            keyswitchManipulator.addMatrixManipulator( ord("2"), "Flight", osgGA.FlightManipulator() )
            keyswitchManipulator.addMatrixManipulator( ord("3"), "Drive", osgGA.DriveManipulator() )

            num = keyswitchManipulator.getNumMatrixManipulators()
            keyswitchManipulator.addMatrixManipulator( ord("4"), "Terrain", osgGA.TerrainManipulator() )

            pathfile = str()
            keyForAnimationPath = ord("5")
            while arguments.read("-p",pathfile) :
                apm = osgGA.AnimationPathManipulator(pathfile)
                if apm  or   not apm.valid() :
                    num = keyswitchManipulator.getNumMatrixManipulators()
                    keyswitchManipulator.addMatrixManipulator( keyForAnimationPath, "Path", apm )
                    ++keyForAnimationPath

            keyswitchManipulator.selectMatrixManipulator(num)

            viewer.setCameraManipulator( keyswitchManipulator )

        # add the handler for doing the picking
        viewer.addEventHandler(PickHandler(updateText))

        # set the scene to render
        viewer.setSceneData(group)

        return viewer.run()
Esempio n. 11
0
def createDistortionSubgraph(subgraph, clearColour):

    
    distortionNode = osg.Group()

    tex_width = 1024
    tex_height = 1024

    texture = osg.Texture2D()
    texture.setTextureSize(tex_width, tex_height)
    texture.setInternalFormat(GL_RGBA)
    texture.setFilter(osg.Texture2D.MIN_FILTER,osg.Texture2D.LINEAR)
    texture.setFilter(osg.Texture2D.MAG_FILTER,osg.Texture2D.LINEAR)

    # set up the render to texture camera.
        camera = osg.Camera()

        # set clear the color and depth buffer
        camera.setClearColor(clearColour)
        camera.setClearMask(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)

        # just inherit the main cameras view
        camera.setReferenceFrame(osg.Transform.RELATIVE_RF)
        camera.setProjectionMatrix(osg.Matrixd.identity())
        camera.setViewMatrix(osg.Matrixd.identity())

        # set viewport
        camera.setViewport(0,0,tex_width,tex_height)

        # set the camera to render before the main camera.
        camera.setRenderOrder(osg.Camera.PRE_RENDER)

        # tell the camera to use OpenGL frame buffer object where supported.
        camera.setRenderTargetImplementation(osg.Camera.FRAME_BUFFER_OBJECT)

        # attach the texture and use it as the color buffer.
        camera.attach(osg.Camera.COLOR_BUFFER, texture)

        # add subgraph to render
        camera.addChild(subgraph)

        distortionNode.addChild(camera)

    # set up the hud camera
        # create the quad to visualize.
        polyGeom = osg.Geometry()

        polyGeom.setSupportsDisplayList(False)

        origin = osg.Vec3(0.0,0.0,0.0)
        xAxis = osg.Vec3(1.0,0.0,0.0)
        yAxis = osg.Vec3(0.0,1.0,0.0)
        height = 1024.0
        width = 1280.0
        noSteps = 50

        vertices = osg.Vec3Array()
        texcoords = osg.Vec2Array()
        colors = osg.Vec4Array()

        bottom = origin
        dx = xAxis*(width/((float)(noSteps-1)))
        dy = yAxis*(height/((float)(noSteps-1)))

        bottom_texcoord = osg.Vec2(0.0,0.0)
        dx_texcoord = osg.Vec2(1.0/(float)(noSteps-1),0.0)
        dy_texcoord = osg.Vec2(0.0,1.0/(float)(noSteps-1))

        int i,j
        for(i=0i<noSteps++i)
            cursor = bottom+dy*(float)i
            texcoord = bottom_texcoord+dy_texcoord*(float)i
            for(j=0j<noSteps++j)
Esempio n. 12
0
def createHUD():


    
    # create a camera to set up the projection and model view matrices, and the subgraph to draw in the HUD
    camera = osg.Camera()

    # set the projection matrix
    camera.setProjectionMatrix(osg.Matrix.ortho2D(0,1280,0,1024))

    # set the view matrix
    camera.setReferenceFrame(osg.Transform.ABSOLUTE_RF)
    camera.setViewMatrix(osg.Matrix.identity())

    # only clear the depth buffer
    camera.setClearMask(GL_DEPTH_BUFFER_BIT)

    # draw subgraph after main camera view.
    camera.setRenderOrder(osg.Camera.POST_RENDER)

    # we don't want the camera to grab event focus from the viewers main camera(s).
    camera.setAllowEventFocus(False)



    # add to this camera a subgraph to render

        geode = osg.Geode()

        timesFont = str("fonts/arial.ttf")

        # turn lighting off for the text and disable depth test to ensure it's always ontop.
        stateset = geode.getOrCreateStateSet()
        stateset.setMode(GL_LIGHTING,osg.StateAttribute.OFF)

        position = osg.Vec3(150.0,800.0,0.0)
        delta = osg.Vec3(0.0,-120.0,0.0)

            text = osgText.Text()
            geode.addDrawable( text )

            text.setFont(timesFont)
            text.setPosition(position)
            text.setText("Head Up Displays are simple :-)")

            position += delta


            text = osgText.Text()
            geode.addDrawable( text )

            text.setFont(timesFont)
            text.setPosition(position)
            text.setText("All you need to do is create your text in a subgraph.")

            position += delta


            text = osgText.Text()
            geode.addDrawable( text )

            text.setFont(timesFont)
            text.setPosition(position)
            text.setText("Then place an osg.Camera above the subgraph\n"
                          "to create an orthographic projection.\n")

            position += delta

            text = osgText.Text()
            geode.addDrawable( text )

            text.setFont(timesFont)
            text.setPosition(position)
            text.setText("Set the Camera's ReferenceFrame to ABSOLUTE_RF to ensure\n"
                          "it remains independent from any external model view matrices.")

            position += delta

            text = osgText.Text()
            geode.addDrawable( text )

            text.setFont(timesFont)
            text.setPosition(position)
            text.setText("And set the Camera's clear mask to just clear the depth buffer.")

            position += delta

            text = osgText.Text()
            geode.addDrawable( text )

            text.setFont(timesFont)
            text.setPosition(position)
            text.setText("And finally set the Camera's RenderOrder to POST_RENDER\n"
                          "to make sure it's drawn last.")

            position += delta


            bb = osg.BoundingBox()
            for(unsigned int i=0i<geode.getNumDrawables()++i)
                bb.expandBy(geode.getDrawable(i).getBound())
Esempio n. 13
0
def setDomeFaces(viewer, arguments):

    

    wsi = osg.GraphicsContext.getWindowingSystemInterface()
    if  not wsi :
        osg.notify(osg.NOTICE), "Error, no WindowSystemInterface available, cannot create windows."
        return

    unsigned int width, height
    wsi.getScreenResolution(osg.GraphicsContext.ScreenIdentifier(0), width, height)

    while arguments.read("--width",width) : 
    while arguments.read("--height",height) : 

    traits = osg.GraphicsContext.Traits()
    traits.x = 0
    traits.y = 0
    traits.width = width
    traits.height = height
    traits.windowDecoration = True
    traits.doubleBuffer = True
    traits.sharedContext = 0

    gc = osg.GraphicsContext.createGraphicsContext(traits)
    if  not gc :
        osg.notify(osg.NOTICE), "GraphicsWindow has not been created successfully."
        return


    center_x = width/2
    center_y = height/2
    camera_width = 256
    camera_height = 256

    # front face
        camera = osg.Camera()
        camera.setGraphicsContext(gc)
        camera.setViewport(osg.Viewport(center_x-camera_width/2, center_y, camera_width, camera_height))

        buffer =  GL_BACK if (traits.doubleBuffer) else  GL_FRONT
        camera.setDrawBuffer(buffer)
        camera.setReadBuffer(buffer)

        viewer.addSlave(camera, osg.Matrixd(), osg.Matrixd())

    # top face
        camera = osg.Camera()
        camera.setGraphicsContext(gc)
        camera.setViewport(osg.Viewport(center_x-camera_width/2, center_y+camera_height, camera_width, camera_height))
        buffer =  GL_BACK if (traits.doubleBuffer) else  GL_FRONT
        camera.setDrawBuffer(buffer)
        camera.setReadBuffer(buffer)

        viewer.addSlave(camera, osg.Matrixd(), osg.Matrixd.rotate(osg.inDegrees(-90.0), 1.0,0.0,0.0))

    # left face
        camera = osg.Camera()
        camera.setGraphicsContext(gc)
        camera.setViewport(osg.Viewport(center_x-camera_width*3/2, center_y, camera_width, camera_height))
        buffer =  GL_BACK if (traits.doubleBuffer) else  GL_FRONT
        camera.setDrawBuffer(buffer)
        camera.setReadBuffer(buffer)

        viewer.addSlave(camera, osg.Matrixd(), osg.Matrixd.rotate(osg.inDegrees(-90.0), 0.0,1.0,0.0))

    # right face
        camera = osg.Camera()
        camera.setGraphicsContext(gc)
        camera.setViewport(osg.Viewport(center_x+camera_width/2, center_y, camera_width, camera_height))
        buffer =  GL_BACK if (traits.doubleBuffer) else  GL_FRONT
        camera.setDrawBuffer(buffer)
        camera.setReadBuffer(buffer)

        viewer.addSlave(camera, osg.Matrixd(), osg.Matrixd.rotate(osg.inDegrees(90.0), 0.0,1.0,0.0))

    # bottom face
        camera = osg.Camera()
        camera.setGraphicsContext(gc)
        camera.setViewport(osg.Viewport(center_x-camera_width/2, center_y-camera_height, camera_width, camera_height))
        buffer =  GL_BACK if (traits.doubleBuffer) else  GL_FRONT
        camera.setDrawBuffer(buffer)
        camera.setReadBuffer(buffer)

        viewer.addSlave(camera, osg.Matrixd(), osg.Matrixd.rotate(osg.inDegrees(90.0), 1.0,0.0,0.0))

    # back face
        camera = osg.Camera()
        camera.setGraphicsContext(gc)
        camera.setViewport(osg.Viewport(center_x-camera_width/2, center_y-2*camera_height, camera_width, camera_height))
        buffer =  GL_BACK if (traits.doubleBuffer) else  GL_FRONT
        camera.setDrawBuffer(buffer)
        camera.setReadBuffer(buffer)

        viewer.addSlave(camera, osg.Matrixd(), osg.Matrixd.rotate(osg.inDegrees(-180.0), 1.0,0.0,0.0))

    viewer.getCamera().setProjectionMatrixAsPerspective(90.0, 1.0, 1, 1000.0)

    viewer.assignSceneDataToCameras()

def createDomeDistortionMesh(origin, widthVector, heightVector, arguments):

    
    sphere_radius = 1.0
    if arguments.read("--radius", sphere_radius) : 

    collar_radius = 0.45
    if arguments.read("--collar", collar_radius) : 

    center = osg.Vec3d(0.0,0.0,0.0)
    eye = osg.Vec3d(0.0,0.0,0.0)

    distance = sqrt(sphere_radius*sphere_radius - collar_radius*collar_radius)
    if arguments.read("--distance", distance) : 

    centerProjection = False

    projector = eye - osg.Vec3d(0.0,0.0, distance)


    osg.notify(osg.NOTICE), "Projector position = ", projector
    osg.notify(osg.NOTICE), "distance = ", distance


    # create the quad to visualize.
    geometry = osg.Geometry()

    geometry.setSupportsDisplayList(False)

    xAxis = osg.Vec3(widthVector)
    width = widthVector.length()
    xAxis /= width

    yAxis = osg.Vec3(heightVector)
    height = heightVector.length()
    yAxis /= height

    noSteps = 50

    vertices = osg.Vec3Array()
    texcoords = osg.Vec3Array()
    colors = osg.Vec4Array()

    bottom = origin
    dx = xAxis*(width/((float)(noSteps-1)))
    dy = yAxis*(height/((float)(noSteps-1)))

    screenCenter = origin + widthVector*0.5 + heightVector*0.5
    screenRadius = heightVector.length() * 0.5

    int i,j
    if centerProjection :
        for(i=0i<noSteps++i)
            cursor = bottom+dy*(float)i
            for(j=0j<noSteps++j)
                delta = osg.Vec2(cursor.x() - screenCenter.x(), cursor.y() - screenCenter.y())
                theta = atan2(-delta.y(), delta.x())
                phi = osg.PI_2 * delta.length() / screenRadius
                if phi > osg.PI_2 : phi = osg.PI_2

                phi *= 2.0

                # osg.notify(osg.NOTICE), "theta = ", theta, "phi=", phi

                texcoord = osg.Vec3(sin(phi) * cos(theta),
                                   sin(phi) * sin(theta),
                                   cos(phi))

                vertices.push_back(cursor)
                colors.push_back(osg.Vec4(1.0,1.0,1.0,1.0))
                texcoords.push_back(texcoord)

                cursor += dx
            # osg.notify(osg.NOTICE)
    else:
Esempio n. 14
0
def setDomeCorrection(viewer, arguments):

    

    wsi = osg.GraphicsContext.getWindowingSystemInterface()
    if  not wsi :
        osg.notify(osg.NOTICE), "Error, no WindowSystemInterface available, cannot create windows."
        return

    unsigned int width, height
    wsi.getScreenResolution(osg.GraphicsContext.ScreenIdentifier(0), width, height)

    while arguments.read("--width",width) : 
    while arguments.read("--height",height) : 

    traits = osg.GraphicsContext.Traits()
    traits.x = 0
    traits.y = 0
    traits.width = width
    traits.height = height
    traits.windowDecoration = False
    traits.doubleBuffer = True
    traits.sharedContext = 0



    gc = osg.GraphicsContext.createGraphicsContext(traits)
    if  not gc :
        osg.notify(osg.NOTICE), "GraphicsWindow has not been created successfully."
        return

    tex_width = 512
    tex_height = 512

    camera_width = tex_width
    camera_height = tex_height

    texture = osg.TextureCubeMap()

    texture.setTextureSize(tex_width, tex_height)
    texture.setInternalFormat(GL_RGB)
    texture.setFilter(osg.Texture.MIN_FILTER,osg.Texture.LINEAR)
    texture.setFilter(osg.Texture.MAG_FILTER,osg.Texture.LINEAR)

#if 0
    renderTargetImplementation = osg.Camera.SEPERATE_WINDOW
    buffer = GL_FRONT
#else:
    renderTargetImplementation = osg.Camera.FRAME_BUFFER_OBJECT
    buffer = GL_FRONT
#endif

    # front face
        camera = osg.Camera()
        camera.setName("Front face camera")
        camera.setGraphicsContext(gc)
        camera.setViewport(osg.Viewport(0,0,camera_width, camera_height))
        camera.setDrawBuffer(buffer)
        camera.setReadBuffer(buffer)
        camera.setAllowEventFocus(False)
        # tell the camera to use OpenGL frame buffer object where supported.
        camera.setRenderTargetImplementation(renderTargetImplementation)

        # attach the texture and use it as the color buffer.
        camera.attach(osg.Camera.COLOR_BUFFER, texture, 0, osg.TextureCubeMap.POSITIVE_Y)

        viewer.addSlave(camera, osg.Matrixd(), osg.Matrixd())


    # top face
        camera = osg.Camera()
        camera.setName("Top face camera")
        camera.setGraphicsContext(gc)
        camera.setViewport(osg.Viewport(0,0,camera_width, camera_height))
        buffer =  GL_BACK if (traits.doubleBuffer) else  GL_FRONT
        camera.setDrawBuffer(buffer)
        camera.setReadBuffer(buffer)
        camera.setAllowEventFocus(False)

        # tell the camera to use OpenGL frame buffer object where supported.
        camera.setRenderTargetImplementation(renderTargetImplementation)

        # attach the texture and use it as the color buffer.
        camera.attach(osg.Camera.COLOR_BUFFER, texture, 0, osg.TextureCubeMap.POSITIVE_Z)

        viewer.addSlave(camera, osg.Matrixd(), osg.Matrixd.rotate(osg.inDegrees(-90.0), 1.0,0.0,0.0))

    # left face
        camera = osg.Camera()
        camera.setName("Left face camera")
        camera.setGraphicsContext(gc)
        camera.setViewport(osg.Viewport(0,0,camera_width, camera_height))
        camera.setDrawBuffer(buffer)
        camera.setReadBuffer(buffer)
        camera.setAllowEventFocus(False)

        # tell the camera to use OpenGL frame buffer object where supported.
        camera.setRenderTargetImplementation(renderTargetImplementation)

        # attach the texture and use it as the color buffer.
        camera.attach(osg.Camera.COLOR_BUFFER, texture, 0, osg.TextureCubeMap.NEGATIVE_X)

        viewer.addSlave(camera, osg.Matrixd(), osg.Matrixd.rotate(osg.inDegrees(-90.0), 0.0,1.0,0.0) * osg.Matrixd.rotate(osg.inDegrees(-90.0), 0.0,0.0,1.0))

    # right face
        camera = osg.Camera()
        camera.setName("Right face camera")
        camera.setGraphicsContext(gc)
        camera.setViewport(osg.Viewport(0,0,camera_width, camera_height))
        camera.setDrawBuffer(buffer)
        camera.setReadBuffer(buffer)
        camera.setAllowEventFocus(False)

        # tell the camera to use OpenGL frame buffer object where supported.
        camera.setRenderTargetImplementation(renderTargetImplementation)

        # attach the texture and use it as the color buffer.
        camera.attach(osg.Camera.COLOR_BUFFER, texture, 0, osg.TextureCubeMap.POSITIVE_X)

        viewer.addSlave(camera, osg.Matrixd(), osg.Matrixd.rotate(osg.inDegrees(90.0), 0.0,1.0,0.0 ) * osg.Matrixd.rotate(osg.inDegrees(90.0), 0.0,0.0,1.0))

    # bottom face
        camera = osg.Camera()
        camera.setGraphicsContext(gc)
        camera.setName("Bottom face camera")
        camera.setViewport(osg.Viewport(0,0,camera_width, camera_height))
        camera.setDrawBuffer(buffer)
        camera.setReadBuffer(buffer)
        camera.setAllowEventFocus(False)

        # tell the camera to use OpenGL frame buffer object where supported.
        camera.setRenderTargetImplementation(renderTargetImplementation)

        # attach the texture and use it as the color buffer.
        camera.attach(osg.Camera.COLOR_BUFFER, texture, 0, osg.TextureCubeMap.NEGATIVE_Z)

        viewer.addSlave(camera, osg.Matrixd(), osg.Matrixd.rotate(osg.inDegrees(90.0), 1.0,0.0,0.0) * osg.Matrixd.rotate(osg.inDegrees(180.0), 0.0,0.0,1.0))

    # back face
        camera = osg.Camera()
        camera.setName("Back face camera")
        camera.setGraphicsContext(gc)
        camera.setViewport(osg.Viewport(0,0,camera_width, camera_height))
        camera.setDrawBuffer(buffer)
        camera.setReadBuffer(buffer)
        camera.setAllowEventFocus(False)

        # tell the camera to use OpenGL frame buffer object where supported.
        camera.setRenderTargetImplementation(renderTargetImplementation)

        # attach the texture and use it as the color buffer.
        camera.attach(osg.Camera.COLOR_BUFFER, texture, 0, osg.TextureCubeMap.NEGATIVE_Y)

        viewer.addSlave(camera, osg.Matrixd(), osg.Matrixd.rotate(osg.inDegrees(180.0), 1.0,0.0,0.0))

    viewer.getCamera().setProjectionMatrixAsPerspective(90.0, 1.0, 1, 1000.0)



    # distortion correction set up.
        geode = osg.Geode()
        geode.addDrawable(createDomeDistortionMesh(osg.Vec3(0.0,0.0,0.0), osg.Vec3(width,0.0,0.0), osg.Vec3(0.0,height,0.0), arguments))

        # we need to add the texture to the mesh, we do so by creating a
        # StateSet to contain the Texture StateAttribute.
        stateset = geode.getOrCreateStateSet()
        stateset.setTextureAttributeAndModes(0, texture,osg.StateAttribute.ON)
        stateset.setMode(GL_LIGHTING,osg.StateAttribute.OFF)

        camera = osg.Camera()
        camera.setGraphicsContext(gc)
        camera.setClearMask(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT )
        camera.setClearColor( osg.Vec4(0.1,0.1,1.0,1.0) )
        camera.setViewport(osg.Viewport(0, 0, width, height))
        buffer =  GL_BACK if (traits.doubleBuffer) else  GL_FRONT
        camera.setDrawBuffer(buffer)
        camera.setReadBuffer(buffer)
        camera.setReferenceFrame(osg.Camera.ABSOLUTE_RF)
        camera.setAllowEventFocus(False)
        #camera.setInheritanceMask(camera.getInheritanceMask()  ~osg.CullSettings.CLEAR_COLOR  ~osg.CullSettings.COMPUTE_NEAR_FAR_MODE)
        #camera.setComputeNearFarMode(osg.CullSettings.DO_NOT_COMPUTE_NEAR_FAR)

        camera.setProjectionMatrixAsOrtho2D(0,width,0,height)
        camera.setViewMatrix(osg.Matrix.identity())

        # add subgraph to render
        camera.addChild(geode)

        camera.setName("DistortionCorrectionCamera")

        viewer.addSlave(camera, osg.Matrixd(), osg.Matrixd(), False)

    viewer.getCamera().setNearFarRatio(0.0001)
Esempio n. 15
0
def singleWindowSideBySideCameras(viewer):

    
    wsi = osg.GraphicsContext.getWindowingSystemInterface()
    if  not wsi : 
        osg.notify(osg.NOTICE), "Error, no WindowSystemInterface available, cannot create windows."
        return
    
    unsigned int width, height
    wsi.getScreenResolution(osg.GraphicsContext.ScreenIdentifier(0), width, height)



    # Not fullscreen
    width /= 2
    height /= 2

    traits = osg.GraphicsContext.Traits()
    traits.x = 100
    traits.y = 100
    traits.width = width
    traits.height = height
    traits.windowDecoration = True
    traits.doubleBuffer = True
    traits.sharedContext = 0

    gc = osg.GraphicsContext.createGraphicsContext(traits)
    if gc.valid() :
        osg.notify(osg.INFO), "  GraphicsWindow has been created successfully."

        # need to ensure that the window is cleared make sure that the complete window is set the correct colour
        # rather than just the parts of the window that are under the camera's viewports
        gc.setClearColor(osg.Vec4f(0.2,0.2,0.6,1.0))
        gc.setClearMask(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
    else:
        osg.notify(osg.NOTICE), "  GraphicsWindow has not been created successfully."


    master = viewer.getCamera()

    # get the default settings for the camera
    double fovy, aspectRatio, zNear, zFar
    master.getProjectionMatrixAsPerspective(fovy, aspectRatio, zNear, zFar)

    # reset this for the actual apsect ratio of out created window
    windowAspectRatio = double(width)/double(height)
    master.setProjectionMatrixAsPerspective(fovy, windowAspectRatio, 1.0, 10000.0)

    master.setName("MasterCam")

    camera = osg.Camera()
    camera.setCullMask(1)
    camera.setName("Left")
    camera.setGraphicsContext(gc)
    camera.setViewport(osg.Viewport(0, 0, width/2, height))
    buffer =  GL_BACK if (traits.doubleBuffer) else  GL_FRONT
    camera.setDrawBuffer(buffer)
    camera.setReadBuffer(buffer)
    viewer.addSlave(camera, osg.Matrixd.scale(1.0,0.5,1.0), osg.Matrixd())

    camera = osg.Camera()
    camera.setCullMask(2)
    camera.setName("Right")
    camera.setGraphicsContext(gc)
    camera.setViewport(osg.Viewport(width/2, 0, width/2, height))
    buffer =  GL_BACK if (traits.doubleBuffer) else  GL_FRONT
    camera.setDrawBuffer(buffer)
    camera.setReadBuffer(buffer)
    viewer.addSlave(camera, osg.Matrixd.scale(1.0,0.5,1.0), osg.Matrixd())
Esempio n. 16
0
                elements.push_back(j+(i+1)*noSteps)
                elements.push_back(j+(i)*noSteps)
            polyGeom.addPrimitiveSet(elements)


        # we need to add the texture to the Drawable, we do so by creating a
        # StateSet to contain the Texture StateAttribute.
        stateset = polyGeom.getOrCreateStateSet()
        stateset.setTextureAttributeAndModes(0, texture,osg.StateAttribute.ON)
        stateset.setMode(GL_LIGHTING,osg.StateAttribute.OFF)

        geode = osg.Geode()
        geode.addDrawable(polyGeom)

        # set up the camera to render the textured quad
        camera = osg.Camera()

        # just inherit the main cameras view
        camera.setReferenceFrame(osg.Transform.ABSOLUTE_RF)
        camera.setViewMatrix(osg.Matrix.identity())
        camera.setProjectionMatrixAsOrtho2D(0,1280,0,1024)

        # set the camera to render before the main camera.
        camera.setRenderOrder(osg.Camera.NESTED_RENDER)

        # add subgraph to render
        camera.addChild(geode)

        distortionNode.addChild(camera)
    return distortionNode
Esempio n. 17
0
            traits.height = src_traits.height
            traits.red = src_traits.red
            traits.green = src_traits.green
            traits.blue = src_traits.blue
            traits.alpha = src_traits.alpha
            traits.depth = src_traits.depth
            traits.pbuffer = True
         else:
            #viewer would use fullscreen size (unknown here) pbuffer will use 4096 x4096 (or best avaiable)
            traits.width = 1, 12
            traits.height = 1, 12
            traits.pbuffer = True
        pbuffer = osg.GraphicsContext.createGraphicsContext(traits)
        if pbuffer.valid() :
            osg.notify(osg.NOTICE), "Pixel buffer has been created successfully."
            camera = osg.Camera(*viewer.getCamera())
            camera.setGraphicsContext(pbuffer)
            camera.setViewport(osg.Viewport(0,0,traits.width,traits.height))
            buffer =  GL_BACK if (pbuffer.getTraits().doubleBuffer) else  GL_FRONT
            camera.setDrawBuffer(buffer)
            camera.setReadBuffer(buffer)
            viewer.setCamera(camera)
        else:
            osg.notify(osg.NOTICE), "Pixel buffer has not been created successfully."

    # Read camera settings for screenshot
    lat = 50
    lon = 10
    alt = 2000
    heading = 0
    incline = 45
Esempio n. 18
0
    # depth and color attachments will be switched as needed.
    osg.FrameBufferObject fbos[2] = osg.FrameBufferObject(), osg.FrameBufferObject()

    # create the cameras for the individual depth peel layers
    for (unsigned int i = 0 i < _numPasses ++i) 

        # get the pointers to the required fbo, color and depth textures for each camera instance
        # we perform ping ponging between two depth textures
        fbo0 =  fbos[0] if ((i >= 1)) else  NULL
        fbo =  fbos[1] if ((i >= 1)) else  fbos[0]
        colorTexture = _colorTextures[i]
        depthTexture =  _depthTextures[1+(i-1)%2] if ((i >= 1)) else  _depthTextures[i]
        prevDepthTexture =  _depthTextures[1+(i-2)%2] if ((i >= 2)) else  NULL

        # all our peeling layer cameras are post render
        camera = osg.Camera()
        camera.setDataVariance(osg.Object.DYNAMIC)
        camera.setInheritanceMask(osg.Camera.ALL_VARIABLES)
        camera.setRenderOrder(osg.Camera.POST_RENDER, i)
        camera.setClearMask(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT)
        camera.setClearColor(osg.Vec4f(0, 0, 0, 0))
        camera.setComputeNearFarMode(osg.Camera.DO_NOT_COMPUTE_NEAR_FAR)
        camera.setPreDrawCallback(PreDrawFBOCallback(fbo, fbo0, _texWidth, _texHeight, depthTexture, colorTexture))
        camera.setPostDrawCallback(PostDrawFBOCallback(i == _numPasses - 1))
        camera.setDrawBuffer(GL_COLOR_ATTACHMENT0_EXT)
        camera.setReadBuffer(GL_COLOR_ATTACHMENT0_EXT)
        camera.setAllowEventFocus(False)

        # the peeled layers are rendered with blending forced off
        # and the depth buffer is directly taken from camera 0 via framebuffer blit
        if i > 0 : 
Esempio n. 19
0
class PosterPrinter (osg.Referenced) :
    typedef std.pair<unsigned int, unsigned int> TilePosition
    typedef std.map< TilePosition, osg.Image > TileImages
    
    PosterPrinter()
    
    #* Set to output each sub-image-tile to disk 
    def setOutputTiles(b):
         _outputTiles = b 
    def getOutputTiles():
         return _outputTiles 
    
    #* Set the output sub-image-tile extension, e.g. bmp 
    def setOutputTileExtension(ext):
         _outputTileExt = ext 
    def getOutputTileExtension():
         return _outputTileExt 
    
    #* Set the output poster name, e.g. output.bmp 
    def setOutputPosterName(name):
         _outputPosterName = name 
    def getOutputPosterName():
         return _outputPosterName 
    
    #* Set the size of each sub-image-tile, e.g. 640x480 
    def setTileSize(w, h):
         _tileSize.set(w, h) 
    def getTileSize():
         return _tileSize 
    
    #* Set the final size of the high-res poster, e.g. 6400x4800 
    def setPosterSize(w, h):
         _posterSize.set(w, h) 
    def getPosterSize():
         return _posterSize 
    
    #* Set the capturing camera 
    def setCamera(camera):
         _camera = camera 
    def getCamera():
         return _camera 
    
    #* Set the final poster image, should be already allocated 
    def setFinalPoster(image):
         _finalPoster = image 
    def getFinalPoster():
         return _finalPoster 
    
    def getPosterVisitor():
    
         return _visitor 
    def getPosterVisitor():
         return _visitor 
    
    def done():
    
         return  not _isRunning  and   not _isFinishing 
    
    init = void(  osg.Camera* camera )
    init = void(  osg.Matrixd view,  osg.Matrixd proj )
    frame = void(  osg.FrameStamp* fs, osg.Node* node )
    virtual ~PosterPrinter() 
    
    addCullCallbacks = bool(  osg.FrameStamp* fs, osg.Node* node )
    removeCullCallbacks = void( osg.Node* node )
    bindCameraToImage = void( osg.Camera* camera, int row, int col )
    recordImages = void()
    
    _outputTiles = bool()
    _outputTileExt = str()
    _outputPosterName = str()
    _tileSize = osg.Vec2()
    _posterSize = osg.Vec2()
    
    _isRunning = bool()
    _isFinishing = bool()
    _lastBindingFrame = unsigned int()
    _tileRows = int()
    _tileColumns = int()
    _currentRow = int()
    _currentColumn = int()
    _intersector = PosterIntersector()
    _visitor = PosterVisitor()
    
    _currentViewMatrix = osg.Matrixd()
    _currentProjectionMatrix = osg.Matrixd()
    _camera = osg.Camera()
    _finalPoster = osg.Image()
    _images = TileImages()
Esempio n. 20
0
def main(argv):
    
    arguments = osg.ArgumentParser( argc, argv )
    usage = arguments.getApplicationUsage()
    usage.setDescription( arguments.getApplicationName() +
                           " is the example which demonstrates how to render high-resolution images (posters).")
    usage.setCommandLineUsage( arguments.getApplicationName() + " [options] scene_file" )
    usage.addCommandLineOption( "-h or --help", "Display this information." )
    usage.addCommandLineOption( "--color <r> <g> <b>", "The background color." )
    usage.addCommandLineOption( "--ext <ext>", "The output tiles' extension (Default: bmp)." )
    usage.addCommandLineOption( "--poster <filename>", "The output poster's name (Default: poster.bmp)." )
    usage.addCommandLineOption( "--tilesize <w> <h>", "Size of each image tile (Default: 640 480)." )
    usage.addCommandLineOption( "--finalsize <w> <h>", "Size of the poster (Default: 6400 4800)." )
    usage.addCommandLineOption( "--enable-output-poster", "Output the final poster file (Default)." )
    usage.addCommandLineOption( "--disable-output-poster", "Don't output the final poster file." )
    #usage.addCommandLineOption( "--enable-output-tiles", "Output all tile files." )
    #usage.addCommandLineOption( "--disable-output-tiles", "Don't output all tile files (Default)." )
    usage.addCommandLineOption( "--use-fb", "Use Frame Buffer for rendering tiles (Default, recommended).")
    usage.addCommandLineOption( "--use-fbo", "Use Frame Buffer Object for rendering tiles.")
    usage.addCommandLineOption( "--use-pbuffer","Use Pixel Buffer for rendering tiles.")
    usage.addCommandLineOption( "--use-pbuffer-rtt","Use Pixel Buffer RTT for rendering tiles.")
    usage.addCommandLineOption( "--inactive", "Inactive capturing mode." )
    usage.addCommandLineOption( "--camera-eye <x> <y> <z>", "Set eye position in inactive mode." )
    usage.addCommandLineOption( "--camera-latlongheight <lat> <lon> <h>", "Set eye position on earth in inactive mode." )
    usage.addCommandLineOption( "--camera-hpr <h> <p> <r>", "Set eye rotation in inactive mode." )
    
    if  arguments.read("-h")  or  arguments.read("--help")  :
        usage.write( std.cout )
        return 1
    
    # Poster arguments
    activeMode = True
    outputPoster = True
    #bool outputTiles = False
    tileWidth = 640, tileHeight = 480
    posterWidth = 640*2, posterHeight = 480*2
    posterName = "poster.bmp", extName = "bmp"
    bgColor = osg.Vec4(0.2, 0.2, 0.6, 1.0)
    renderImplementation = osg.Camera.FRAME_BUFFER_OBJECT
    
    while  arguments.read("--inactive")  :  activeMode = False 
    while  arguments.read("--color", bgColor.r(), bgColor.g(), bgColor.b())  : 
    while  arguments.read("--tilesize", tileWidth, tileHeight)  : 
    while  arguments.read("--finalsize", posterWidth, posterHeight)  : 
    while  arguments.read("--poster", posterName)  : 
    while  arguments.read("--ext", extName)  : 
    while  arguments.read("--enable-output-poster")  :  outputPoster = True 
    while  arguments.read("--disable-output-poster")  :  outputPoster = False 
    #while  arguments.read("--enable-output-tiles")  :  outputTiles = True 
    #while  arguments.read("--disable-output-tiles")  :  outputTiles = False 
    while  arguments.read("--use-fbo") :  renderImplementation = osg.Camera.FRAME_BUFFER_OBJECT 
    while  arguments.read("--use-pbuffer") :  renderImplementation = osg.Camera.PIXEL_BUFFER 
    while  arguments.read("--use-pbuffer-rtt") :  renderImplementation = osg.Camera.PIXEL_BUFFER_RTT 
    while  arguments.read("--use-fb") :  renderImplementation = osg.Camera.FRAME_BUFFER 
    
    # Camera settings for inactive screenshot
    useLatLongHeight = True
    eye = osg.Vec3d()
    latLongHeight = osg.Vec3d( 50.0, 10.0, 2000.0 )
    hpr = osg.Vec3d( 0.0, 0.0, 0.0 )
    if  arguments.read("--camera-eye", eye.x(), eye.y(), eye.z())  :
        useLatLongHeight = False
        activeMode = False
    elif  arguments.read("--camera-latlongheight", latLongHeight.x(), latLongHeight.y(), latLongHeight.z())  :
        activeMode = False
        latLongHeight.x() = osg.DegreesToRadians( latLongHeight.x() )
        latLongHeight.y() = osg.DegreesToRadians( latLongHeight.y() )
    if  arguments.read("--camera-hpr", hpr.x(), hpr.y(), hpr.z())  :
        activeMode = False
        hpr.x() = osg.DegreesToRadians( hpr.x() )
        hpr.y() = osg.DegreesToRadians( hpr.y() )
        hpr.z() = osg.DegreesToRadians( hpr.z() )
    
    # Construct scene graph
    scene = osgDB.readNodeFiles( arguments )
    if   not scene  : scene = osgDB.readNodeFile( "cow.osgt" )
    if   not scene  :
        print arguments.getApplicationName(), ": No data loaded"
        return 1
    
    # Create camera for rendering tiles offscreen. FrameBuffer is recommended because it requires less memory.
    camera = osg.Camera()
    camera.setClearColor( bgColor )
    camera.setClearMask( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )
    camera.setReferenceFrame( osg.Transform.ABSOLUTE_RF )
    camera.setRenderOrder( osg.Camera.PRE_RENDER )
    camera.setRenderTargetImplementation( renderImplementation )
    camera.setViewport( 0, 0, tileWidth, tileHeight )
    camera.addChild( scene )
    
    # Set the printer
    printer = PosterPrinter()
    printer.setTileSize( tileWidth, tileHeight )
    printer.setPosterSize( posterWidth, posterHeight )
    printer.setCamera( camera )
    
    posterImage = 0
    if  outputPoster  :
        posterImage = osg.Image()
        posterImage.allocateImage( posterWidth, posterHeight, 1, GL_RGBA, GL_UNSIGNED_BYTE )
        printer.setFinalPoster( posterImage )
        printer.setOutputPosterName( posterName )
    
#if 0
    # While recording sub-images of the poster, the scene will always be traversed twice, from its two
    # parent node: root and camera. Sometimes this may not be so comfortable.
    # To prevent this behaviour, we can use a switch node to enable one parent and disable the other.
    # However, the solution also needs to be used with care, as the window will go blank while taking
    # snapshots and recover later.
    root = osg.Switch()
    root.addChild( scene, True )
    root.addChild( camera, False )
#else:
    root = osg.Group()
    root.addChild( scene )
    root.addChild( camera )
#endif
    
    viewer = osgViewer.Viewer()
    viewer.setSceneData( root )
    viewer.getDatabasePager().setDoPreCompile( False )
    
    if  renderImplementation==osg.Camera.FRAME_BUFFER  :
        # FRAME_BUFFER requires the window resolution equal or greater than the to-be-copied size
        viewer.setUpViewInWindow( 100, 100, tileWidth, tileHeight )
    else:
        # We want to see the console output, so just render in a window
        viewer.setUpViewInWindow( 100, 100, 800, 600 )
    
    if  activeMode  :
        viewer.addEventHandler( PrintPosterHandler(printer) )
        viewer.addEventHandler( osgViewer.StatsHandler )()
        viewer.addEventHandler( osgGA.StateSetManipulator(viewer.getCamera().getOrCreateStateSet()) )
        viewer.setCameraManipulator( osgGA.TrackballManipulator )()
        viewer.run()
    else:
        camera = viewer.getCamera()
        if   not useLatLongHeight  : computeViewMatrix( camera, eye, hpr )
        computeViewMatrixOnEarth = else( camera, scene, latLongHeight, hpr )
        
        renderer = CustomRenderer( camera )
        camera.setRenderer( renderer )
        viewer.setThreadingModel( osgViewer.Viewer.SingleThreaded )
        
        # Realize and initiate the first PagedLOD request
        viewer.realize()
        viewer.frame()
        
        printer.init( camera )
        while   not printer.done()  :
            viewer.advance()
            
            # Keep updating and culling until full level of detail is reached
            renderer.setCullOnly( True )
            while  viewer.getDatabasePager().getRequestsInProgress()  :
                viewer.updateTraversal()
                viewer.renderingTraversals()
            
            renderer.setCullOnly( False )
            printer.frame( viewer.getFrameStamp(), viewer.getSceneData() )
            viewer.renderingTraversals()
    return 0

# Translated from file 'PosterPrinter.cpp'

#include <osg/ClusterCullingCallback>
#include <osgDB/ReadFile>
#include <osgDB/WriteFile>
#include <string.h>
#include <iostream>
#include <sstream>
#include "PosterPrinter.h"

# PagedLoadingCallback: Callback for loading paged nodes while doing intersecting test 
class PagedLoadingCallback (osgUtil.IntersectionVisitor.ReadCallback) :
def readNodeFile(filename):
    
        return osgDB.readNodeFile( filename )

static PagedLoadingCallback g_pagedLoadingCallback = PagedLoadingCallback()

# LodCullingCallback: Callback for culling LODs and selecting the highest level 
class LodCullingCallback (osg.NodeCallback) :
    virtual void operator()( osg.Node* node, osg.NodeVisitor* nv )
        lod = static_cast<osg.LOD*>(node)
        if  lod  and  lod.getNumChildren()>0  :
            lod.getChild(lod.getNumChildren()-1).accept(*nv)

static LodCullingCallback g_lodCullingCallback = LodCullingCallback()

# PagedCullingCallback: Callback for culling paged nodes and selecting the highest level 
class PagedCullingCallback (osg.NodeCallback) :
    virtual void operator()( osg.Node* node, osg.NodeVisitor* nv )
        pagedLOD = static_cast<osg.PagedLOD*>(node)
        if  pagedLOD  and  pagedLOD.getNumChildren()>0  :
            numChildren = pagedLOD.getNumChildren()
            updateTimeStamp = nv.getVisitorType()==osg.NodeVisitor.CULL_VISITOR
            if  nv.getFrameStamp()  and  updateTimeStamp  :
                timeStamp =  nv.getFrameStamp().getReferenceTime() if (nv.getFrameStamp()) else 0.0
                frameNumber =  nv.getFrameStamp().getFrameNumber() if (nv.getFrameStamp()) else 0
                
                pagedLOD.setFrameNumberOfLastTraversal( frameNumber )
                pagedLOD.setTimeStamp( numChildren-1, timeStamp )
                pagedLOD.setFrameNumber( numChildren-1, frameNumber )
                pagedLOD.getChild(numChildren-1).accept(*nv)
            
            # Request for child
            if   not pagedLOD.getDisableExternalChildrenPaging()  and 
                 nv.getDatabaseRequestHandler()  and 
                 numChildren<pagedLOD.getNumRanges()  :
                if  pagedLOD.getDatabasePath().empty()  :
                    nv.getDatabaseRequestHandler().requestNodeFile(
                        pagedLOD.getFileName(numChildren), nv.getNodePath(),
                        1.0, nv.getFrameStamp(),
                        pagedLOD.getDatabaseRequest(numChildren), pagedLOD.getDatabaseOptions() )
                else:
                    nv.getDatabaseRequestHandler().requestNodeFile(
                        pagedLOD.getDatabasePath()+pagedLOD.getFileName(numChildren), nv.getNodePath(),
                        1.0, nv.getFrameStamp(),
                        pagedLOD.getDatabaseRequest(numChildren), pagedLOD.getDatabaseOptions() )
        #node.traverse(*nv)

static PagedCullingCallback g_pagedCullingCallback = PagedCullingCallback()

# PosterVisitor: A visitor for adding culling callbacks to newly allocated paged nodes 
PosterVisitor.PosterVisitor()
:   osg.NodeVisitor(osg.NodeVisitor.TRAVERSE_ALL_CHILDREN),
    _appliedCount(0), _needToApplyCount(0),
    _addingCallbacks(True)

void PosterVisitor.apply( osg.LOD node )
    #if   not hasCullCallback(node.getCullCallback(), g_lodCullingCallback)  :
#    
#        if   not node.getName().empty()  :
#        
#            itr = _pagedNodeNames.find( node.getName() )
#            if  itr not =_pagedNodeNames.end()  :
#            
#                insertCullCallback( node, g_lodCullingCallback )
#                _appliedCount++
#            
#        
#    
#    def if _addingCallbacks .
#        
#        node.removeCullCallback( g_lodCullingCallback )
#        _appliedCount--
#    
    traverse( node )

void PosterVisitor.apply( osg.PagedLOD node )
    if   not hasCullCallback(node.getCullCallback(), g_pagedCullingCallback)  :
        for ( unsigned int i=0 i<node.getNumFileNames() ++i )
            if  node.getFileName(i).empty()  : continue
            
            itr = _pagedNodeNames.find( node.getFileName(i) )
            if  itr not =_pagedNodeNames.end()  :
                node.addCullCallback( g_pagedCullingCallback )
                _appliedCount++
            break
def main(argv):


    
    # use an ArgumentParser object to manage the program arguments.
    arguments = osg.ArgumentParser(argv)

    arguments.getApplicationUsage().addCommandLineOption("--fbo","Use Frame Buffer Object for render to texture, where supported.")
    arguments.getApplicationUsage().addCommandLineOption("--pbuffer-rtt","Use Pixel Buffer for render to texture, where supported.")
    arguments.getApplicationUsage().addCommandLineOption("--nopds", "Don't use packed depth stencil.")
    arguments.getApplicationUsage().addCommandLineOption("--fbo-samples","")
    arguments.getApplicationUsage().addCommandLineOption("--color-samples", "")

    # construct the viewer.
    viewer = osgViewer.Viewer(arguments)

    # add stats
    viewer.addEventHandler( osgViewer.StatsHandler() )

    # if user request help write it out to cout.
    if arguments.read("-h")  or  arguments.read("--help") :
        arguments.getApplicationUsage().write(std.cout)
        return 1

    renderImplementation = osg.Camera.FRAME_BUFFER_OBJECT
    colorSamples = 0, samples = 0
    usePDS = True

    while arguments.read("--fbo") :  renderImplementation = osg.Camera.FRAME_BUFFER_OBJECT 
    while arguments.read("--pbuffer-rtt") :  renderImplementation = osg.Camera.PIXEL_BUFFER_RTT 
    while arguments.read("--nopds") :  usePDS = False  
    while arguments.read("--fbo-samples", samples) : 
    while arguments.read("--color-samples", colorSamples) : 
    

    rootNode = osg.Group()
    rootNode.getOrCreateStateSet().setMode(GL_LIGHTING, osg.StateAttribute.OFF)

    # creates texture to be rendered
    texture = osg.Texture2D()
    texture.setTextureSize(1024, 1024)
    texture.setInternalFormat(GL_RGBA)
    texture.setFilter(osg.Texture2D.MIN_FILTER, osg.Texture2D.LINEAR)
    texture.setFilter(osg.Texture2D.MAG_FILTER, osg.Texture2D.LINEAR)
    texture.setWrap(osg.Texture.WRAP_S, osg.Texture.CLAMP_TO_EDGE)
    texture.setWrap(osg.Texture.WRAP_T, osg.Texture.CLAMP_TO_EDGE)
    texture.setBorderColor(osg.Vec4(0, 0, 0, 0))

    # creates rtt camera
    rttCamera = osg.Camera()
    rttCamera.setClearMask(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT)
    rttCamera.setClearColor(osg.Vec4(0.0, 0.4, 0.5, 0.0))
    rttCamera.setClearStencil(0)
    rttCamera.setReferenceFrame(osg.Transform.ABSOLUTE_RF)
    rttCamera.setViewMatrix(osg.Matrixd.identity())
    rttCamera.setViewport(0, 0, 1024, 1024)
    rttCamera.setRenderOrder(osg.Camera.PRE_RENDER)
    rttCamera.setRenderTargetImplementation(renderImplementation)

    if usePDS :
        rttCamera.attach(osg.Camera.PACKED_DEPTH_STENCIL_BUFFER, GL_DEPTH_STENCIL_EXT)
    else:
        # this doesn't work on NVIDIA/Vista 64bit
        # FBO status = 0x8cd6 (FRAMEBUFFER_INCOMPLETE_ATTACHMENT_EXT)
        rttCamera.attach(osg.Camera.DEPTH_BUFFER, GL_DEPTH_COMPONENT)
        rttCamera.attach(osg.Camera.STENCIL_BUFFER, GL_STENCIL_INDEX8_EXT)

    rttCamera.attach(osg.Camera.COLOR_BUFFER, texture, 0, 0, False, samples, colorSamples)
    rttCamera.setCullingMode(osg.Camera.VIEW_FRUSTUM_SIDES_CULLING)

    # creates rtt subtree
    g0 = osg.Group()
    g0.addChild(createMask())
    g0.addChild(createGeometry())
    rttCamera.addChild(g0)
    rootNode.addChild(rttCamera)

    # creates textured quad with result
    rootNode.addChild(createTextureQuad(texture))
    
    # add model to the viewer.
    viewer.setSceneData( rootNode )

    return viewer.run()


if __name__ == "__main__":
    main(sys.argv)