def paintGL(self): newTime = time.time() deltaTime = newTime - self._prevTime # work around double repaint events collecting in the queue if deltaTime == 0.0: return self._prevTime = newTime width, height = self.calculateAspect(self.width(), self.height()) viewport = (int((self.width() - width) * 0.5), int((self.height() - height) * 0.5), width, height) if self._scene: uniforms = self._animator.evaluate(self._timer.time) textureUniforms = self._animator.additionalTextures(self._timer.time) cameraData = self._cameraData scene = self._scene modifier = currentProjectDirectory().join('animationprocessor.py') if modifier.exists(): beats = self._timer.time execfile(modifier, globals(), locals()) for name in self._textures: uniforms[name] = self._textures[name]._id self._scene.drawToScreen(self._timer.beatsToSeconds(self._timer.time), self._timer.time, uniforms, viewport, additionalTextureUniforms=textureUniforms) else: # no scene active, time cursor outside any enabled shots? global _noSignalImage if _noSignalImage is None: _noSignalImage = loadImage(FilePath(__file__).parent().join('icons', 'nosignal.png')) glDisable(GL_DEPTH_TEST) if _noSignalImage: glEnable(GL_BLEND) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) Scene.drawColorBufferToScreen(_noSignalImage, viewport) glDisable(GL_BLEND) if self.__overlays: image = self.__overlays.colorBuffer() if image: color = (self.__overlays.overlayColor().red() / 255.0, self.__overlays.overlayColor().green() / 255.0, self.__overlays.overlayColor().blue() / 255.0, self.__overlays.overlayColor().alpha() / 255.0) glDisable(GL_DEPTH_TEST) glEnable(GL_BLEND) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) Scene.drawColorBufferToScreen(image, viewport, color) glDisable(GL_BLEND)
def initializeGL(self): glEnable(GL_DEPTH_TEST) glDepthFunc(GL_LEQUAL) # glDepthMask(GL_TRUE) IMAGE_EXTENSIONS = '.png', '.bmp', '.tga' textureFolder = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Textures') if os.path.exists(textureFolder): for texture in os.listdir(textureFolder): fname, ext = os.path.splitext(texture) if ext.lower() in IMAGE_EXTENSIONS: self._textures[fname] = loadImage(os.path.join(textureFolder, texture)) self._prevTime = time.time() self._timer.kick()
def initializeGL(self): print(glGetString(GL_VERSION)) glEnable(GL_DEPTH_TEST) glDepthFunc(GL_LEQUAL) # glDepthMask(GL_TRUE) IMAGE_EXTENSIONS = '.png', '.bmp', '.tga' textureFolder = FilePath(__file__).join('..', 'Textures').abs() if textureFolder.exists(): for texture in textureFolder.iter(): if texture.ext() in IMAGE_EXTENSIONS: self._textures[texture.name()] = loadImage( textureFolder.join(texture)) self._prevTime = time.time() self._timer.kick()
def initializeGL(self): # TODO: Handle re-parenting of the widget in PySide6, it invalidates the context so we need to dirty every cache, or maybe just setCentralWidget and not dock the 3D view, # but it is a fundamental dual monitor or beamer feature so we might just have to deal with it. Lazy choice on Qt's part though, we have to reload EVERY model and texture and buffer. print(glGetString(GL_VERSION)) glEnable(GL_DEPTH_TEST) glDepthFunc(GL_LEQUAL) # glDepthMask(GL_TRUE) IMAGE_EXTENSIONS = '.png', '.bmp', '.tga' textureFolder = FilePath(__file__).join('..', 'Textures').abs() if textureFolder.exists(): for texture in textureFolder.iter(): if texture.ext() in IMAGE_EXTENSIONS: self._textures[texture.name()] = loadImage(textureFolder.join(texture)) self._prevTime = time.time() self._timer.kick() if qt_wrapper == 'PySide6': SceneView.screenFBO = self.defaultFramebufferObject()