def playVideo(self, video): # check if it is loadable try: # load the video texture self.tex = MovieTexture("MovieTexture") #print video self.tex.read(video) # Set up a fullscreen card to set the video texture on it. cm = CardMaker("Movie Card") cm.setFrameFullscreenQuad() cm.setUvRange(self.tex) self.card = NodePath(cm.generate()) self.card.reparentTo(base.render2d) self.card.setTexture(self.tex) self.card.setTexScale(TextureStage.getDefault(), self.tex.getTexScale()) # load the video self.sound = loader.loadSfx(video) # Synchronize the video to the sound. self.tex.synchronizeTo(self.sound) # play the video and audio self.sound.play() # start the task which checks if the video is finished taskMgr.add(self.isVideoFinish, "task_isVideoFinised") except: logging.error("Failed to load video: %s %s", video, sys.exc_info()) self.stopVideo() base.messenger.send(self.vidFinEvt)
def renderQuadInto(self, xsize, ysize, colortex=None, cmode = GraphicsOutput.RTMBindOrCopy, auxtex = None): buffer = self.createBuffer("filter-stage", xsize, ysize, colortex, cmode, auxtex) if (buffer == None): return None cm = CardMaker("filter-stage-quad") cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(0) quad.setDepthWrite(0) quad.setColor(Vec4(1,0.5,0.5,1)) quadcamnode = Camera("filter-quad-cam") lens = OrthographicLens() lens.setFilmSize(2, 2) lens.setFilmOffset(0, 0) lens.setNearFar(-1000, 1000) quadcamnode.setLens(lens) quadcam = quad.attachNewNode(quadcamnode) buffer.getDisplayRegion(0).setCamera(quadcam) buffer.getDisplayRegion(0).setActive(1) return quad, buffer
def __init__(self, texture_cube, window_size=512, texture_size=512): super().__init__() self.cube_ind = 0 self.num_slices = texture_cube.shape[0] self.cube = texture_cube # SET Frame rate ShowBaseGlobal.globalClock.setMode(ClockObject.MLimited) ShowBaseGlobal.globalClock.setFrameRate(40) #can lock this at #Create texture stage self.texture = Texture("Stimulus") # self.texture.setMagfilter(SamplerState.FT_nearest) # self.texture.setMinfilter(SamplerState.FT_nearest) self.texture.setup2dTexture(texture_size, texture_size, Texture.T_unsigned_byte, Texture.F_luminance) self.texture.setRamImageAs(self.cube[0, :, :], "L") self.textureStage = TextureStage("Stimulus") #Create scenegraph cm = CardMaker('card1') cm.setFrameFullscreenQuad() self.card1 = self.aspect2d.attachNewNode(cm.generate()) self.card1.setTexture(self.textureStage, self.texture) #ts, tx ShowBaseGlobal.base.setFrameRateMeter(True) self.taskMgr.add(self.setTextureTask, "setTextureTask")
def create_cards(self): """ Create cards: these are panda3d objects that are required for displaying textures. You can't just have a disembodied texture. In pandastim (at least for now) we are only showing 2d projections of textures, so we use cards. """ cardmaker = CardMaker("stimcard") cardmaker.setFrameFullscreenQuad() #Binocular cards if self.current_stim_params['stim_type'] == 'b': self.setBackgroundColor( (0, 0, 0, 1)) # without this the cards will appear washed out self.left_card = self.aspect2d.attachNewNode(cardmaker.generate()) self.left_card.setAttrib( ColorBlendAttrib.make( ColorBlendAttrib.M_add)) # otherwise only right card shows self.right_card = self.aspect2d.attachNewNode(cardmaker.generate()) self.right_card.setAttrib( ColorBlendAttrib.make(ColorBlendAttrib.M_add)) if self.profile_on: self.center_indicator = OnscreenText( "x", style=1, fg=(1, 1, 1, 1), bg=(0, 0, 0, .8), pos=self.current_stim_params['position'], scale=0.05) # Tex card elif self.current_stim_params['stim_type'] == 's': self.card = self.aspect2d.attachNewNode(cardmaker.generate()) self.card.setColor((1, 1, 1, 1)) #? self.card.setScale(self.scale) return
def __init__(self, experiment_structure, profile_on=False): super().__init__() self.current_stim_num = 0 self.stim_classes = experiment_structure['stim_classes'] self.stim_values = experiment_structure['stim_values'] self.stim_durations = experiment_structure['stim_durations'] self.stim_change_times = np.cumsum( self.stim_durations) #times to switch self.bgcolor = (1, 1, 1, 1) #Set up profiling if desired if profile_on: PStatClient.connect() #Window properties self.windowProps = WindowProperties() self.windowProps.setSize(512, 512) #Create scenegraph cm = CardMaker('card') cm.setFrameFullscreenQuad() self.card = self.aspect2d.attachNewNode(cm.generate()) self.card.setScale(np.sqrt(8)) self.card.setColor(self.bgcolor) #make this an add mode #Set initial texture self.taskMgr.add(self.set_stim_task, "set_stimulus_class")
def renderQuadInto(self, mul=1, div=1, align=1, depthtex=None, colortex=None, auxtex0=None, auxtex1=None): texgroup = ( depthtex, colortex, auxtex0, auxtex1) winx, winy = self.getScaledSize(mul, div, align) depthbits = bool(depthtex != None) buffer = self.createBuffer('filter-stage', winx, winy, texgroup, depthbits) if buffer == None: return cm = CardMaker('filter-stage-quad') cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(0) quad.setDepthWrite(0) quad.setColor(1, 0.5, 0.5, 1) quadcamnode = Camera('filter-quad-cam') lens = OrthographicLens() lens.setFilmSize(2, 2) lens.setFilmOffset(0, 0) lens.setNearFar(-1000, 1000) quadcamnode.setLens(lens) quadcam = quad.attachNewNode(quadcamnode) dr = buffer.makeDisplayRegion((0, 1, 0, 1)) dr.disableClears() dr.setCamera(quadcam) dr.setActive(True) dr.setScissorEnabled(False) buffer.setClearColor((0, 0, 0, 1)) buffer.setClearColorActive(True) self.buffers.append(buffer) self.sizes.append((mul, div, align)) return quad
def __init__(self, texture_array, scale=0.2, window_size=512, texture_size=512): super().__init__() self.scale = scale self.current_scale = 1 self.texture_array = texture_array self.texture_dtype = type(self.texture_array.flat[0]) self.ndims = self.texture_array.ndim self.center_shift = TransformState.make_pos2d((-0.5, -0.5)) self.shift_back = TransformState.make_pos2d((0.5, 0.5)) #Create texture stage self.texture = Texture("Stimulus") self.texture.setup2dTexture(texture_size, texture_size, Texture.T_unsigned_byte, Texture.F_luminance) self.texture.setWrapU(Texture.WM_clamp) self.texture.setWrapV(Texture.WM_clamp) self.texture.setRamImageAs(self.texture_array, "L") self.textureStage = TextureStage("Stimulus") #Create scenegraph cm = CardMaker('card1') cm.setFrameFullscreenQuad() self.card1 = self.aspect2d.attachNewNode(cm.generate()) self.card1.setTexture(self.textureStage, self.texture) #ts, tx if self.scale != 0: self.taskMgr.add(self.scaleTextureTask, "scaleTextureTask")
def __initSceneGraph(self): self.point_path = self.host_planet.point_path.attachNewNode("unit_center_node") self.model_path = self.point_path.attachNewNode("unit_node") self.model_path.reparentTo(self.point_path) self.model_path.setPos(Vec3(0,6,0)) self.model_path.setPythonTag('pyUnit', self) rad = 1 cnode = CollisionNode("coll_sphere_node") cnode.addSolid(CollisionBox(Point3(-rad,-rad,-rad),Point3(rad,rad,rad))) cnode.setIntoCollideMask(BitMask32.bit(1)) cnode.setTag('unit', str(id(self))) self.cnode_path = self.model_path.attachNewNode(cnode) #self.cnode_path.show() tex = loader.loadTexture("models/billboards/flare.png") cm = CardMaker('quad') cm.setFrameFullscreenQuad() self.quad_path = self.model_path.attachNewNode(cm.generate()) self.quad_path.setTexture(tex) self.quad_path.setTransparency(TransparencyAttrib.MAlpha) self.quad_path.setBillboardPointEye() self.quad_path.setColor(self.player.color)
def loadVideo(videoFileName, loop=False): videoPathStr = 'Video/{}' videoPathStr = videoPathStr.format(videoFileName) try: tex = MovieTexture(videoFileName) success = tex.read(videoPathStr) assert success, "Failed to load video!" # Set up a fullscreen card to set the video texture on. cm = CardMaker("My Fullscreen Card") cm.setFrameFullscreenQuad() # Tell the CardMaker to create texture coordinates that take into # account the padding region of the texture. cm.setUvRange(tex) # Now place the card in the scene graph and apply the texture to it. card = render2d.attachNewNode(cm.generate()) card.setTexture(tex) card.hide() sound = loader.loadMusic(videoPathStr) # set loop false sound.setLoop(loop) # Synchronize the video to the sound. tex.synchronizeTo(sound) return sound, card except Exception as e: #logging.debug("loadvideo: {}".format(traceback.format_exc())) pass return sound, card
class FirstTry(visual): def setup(self): self.tex1 = MovieTexture('videos/saturn5_apollo_launch.mp4') assert self.tex1.read('videos/saturn5_apollo_launch.mp4') self.tex2 = MovieTexture('videos/boards_eye_view.mp4') assert self.tex2.read('videos/boards_eye_view.mp4') self.cm1 = CardMaker('saturn') self.cm1.setFrameFullscreenQuad() self.cm1.setUvRange(self.tex1) self.card1 = NodePath(self.cm1.generate()) self.card1.reparentTo(self.path) self.card1.setPos(0,0,10) self.card1.setP(50) self.cm2 = CardMaker('board') self.cm2.setFrameFullscreenQuad() self.cm2.setUvRange(self.tex2) self.card2 = NodePath(self.cm2.generate()) self.card2.reparentTo(self.path) self.card2.setPos(0,0,-10) self.card2.setP(-50) self.card1.setTexture(self.tex1) self.card1.setTexScale(TextureStage.getDefault(), self.tex1.getTexScale()) self.card2.setTexture(self.tex2) self.card2.setTexScale(TextureStage.getDefault(), self.tex2.getTexScale()) self.card1.setScale(10) self.card2.setScale(10) def getBeat(self): pass
def __init__(self,manager,xml): self.updateTask = None self.sun = base.cam.attachNewNode('sun') loader.loadModel(manager.get('paths').getConfig().find('misc').get('path')+'/sphere').reparentTo(self.sun) self.sun.setScale(0.1) self.sun.setTwoSided(True) self.sun.setColorScale(10.0, 10.0, 10.0, 1.0, 10001) self.sun.setLightOff(1) self.sun.setShaderOff(1) self.sun.setFogOff(1) self.sun.setCompass() self.sun.setBin('background', 10) self.sun.setDepthWrite(False) self.sun.setDepthTest(False) # Workaround an annoyance in Panda. No idea why it's needed. self.sun.node().setBounds(OmniBoundingVolume()) isa = xml.find('isa') inst = xml.find('instance') if isa != None or inst != None: if inst != None: orig = Vec3(float(inst.get('x', '0')), float(inst.get('y', '0')), float(inst.get('z', '0'))) else: level = manager.get(isa.get('source')) orig = Vec3(level.getByIsA(isa.get('name'))[0].getPos(render)) orig.normalize() self.sun.setPos(orig) godrays = xml.find('godrays') if godrays != None: self.vlbuffer = base.win.makeTextureBuffer('volumetric-lighting', base.win.getXSize()/2, base.win.getYSize()/2) self.vlbuffer.setClearColor(Vec4(0, 0, 0, 1)) cam = base.makeCamera(self.vlbuffer) cam.node().setLens(base.camLens) cam.reparentTo(base.cam) initstatenode = NodePath('InitialState') initstatenode.setColorScale(0, 0, 0, 1, 10000) initstatenode.setShaderOff(10000) initstatenode.setLightOff(10000) initstatenode.setMaterialOff(10000) initstatenode.setTransparency(TransparencyAttrib.MBinary, 10000) cam.node().setCameraMask(BitMask32.bit(2)) cam.node().setInitialState(initstatenode.getState()) self.vltexture = self.vlbuffer.getTexture() self.vltexture.setWrapU(Texture.WMClamp) self.vltexture.setWrapV(Texture.WMClamp) card = CardMaker('VolumetricLightingCard') card.setFrameFullscreenQuad() self.finalQuad = render2d.attachNewNode(card.generate()) self.finalQuad.setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.MAdd, ColorBlendAttrib.OIncomingColor, ColorBlendAttrib.OFbufferColor)) self.finalQuad.setShader(Shader.load(posixpath.join(manager.get('paths').getConfig().find('shaders').get('path'), 'filter-vlight.cg'))) self.finalQuad.setShaderInput('src', self.vltexture) self.finalQuad.setShaderInput('vlparams', 32, 0.9/32.0, 0.97, 0.5) # Note - first 32 is now hardcoded into shader for cards that don't support variable sized loops. self.finalQuad.setShaderInput('casterpos', 0.5, 0.5, 0, 0) # Last parameter to vlcolor is the exposure vlcolor = Vec4(float(godrays.get('r', '1')), float(godrays.get('g', '1')), float(godrays.get('b', '1')), 0.04) self.finalQuad.setShaderInput('vlcolor', vlcolor) else: self.finalQuad = None
def renderQuadInto(self, mul=1, div=1, align=1, depthtex=None, colortex=None, auxtex0=None, auxtex1=None): """ Creates an offscreen buffer for an intermediate computation. Installs a quad into the buffer. Returns the fullscreen quad. The size of the buffer is initially equal to the size of the main window. The parameters 'mul', 'div', and 'align' can be used to adjust that size. """ texgroup = (depthtex, colortex, auxtex0, auxtex1) winx, winy = self.getScaledSize(mul, div, align) depthbits = bool(depthtex != None) buffer = self.createBuffer("filter-stage", winx, winy, texgroup, depthbits) if (buffer == None): return None cm = CardMaker("filter-stage-quad") cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(0) quad.setDepthWrite(0) quad.setColor(1, 0.5, 0.5, 1) quadcamnode = Camera("filter-quad-cam") lens = OrthographicLens() lens.setFilmSize(2, 2) lens.setFilmOffset(0, 0) lens.setNearFar(-1000, 1000) quadcamnode.setLens(lens) quadcam = quad.attachNewNode(quadcamnode) dr = buffer.makeDisplayRegion((0, 1, 0, 1)) dr.disableClears() dr.setCamera(quadcam) dr.setActive(True) dr.setScissorEnabled(False) # This clear stage is important if the buffer is padded, so that # any pixels accidentally sampled in the padded region won't # be reading from unititialised memory. buffer.setClearColor((0, 0, 0, 1)) buffer.setClearColorActive(True) self.buffers.append(buffer) self.sizes.append((mul, div, align)) return quad
def __init__(self, texture_array, scale=0.2, window_size=512, texture_size=512): super().__init__() self.scale = scale self.texture_array = texture_array self.texture_dtype = type(self.texture_array.flat[0]) self.ndims = self.texture_array.ndim #Set window title self.window_properties = WindowProperties() self.window_properties.setSize(window_size, window_size) self.window_properties.setTitle("FullFieldDrift") ShowBaseGlobal.base.win.requestProperties(self.window_properties) #Create texture stage self.texture = Texture("Stimulus") #Select Texture ComponentType (e.g., uint8 or whatever) if self.texture_dtype == np.uint8: self.texture_component_type = Texture.T_unsigned_byte elif self.texture_dtype == np.uint16: self.texture_component_type = Texture.T_unsigned_short #Select Texture Format (color or b/w etc) if self.ndims == 2: self.texture_format = Texture.F_luminance #grayscale self.texture.setup2dTexture(texture_size, texture_size, self.texture_component_type, self.texture_format) self.texture.setRamImageAs(self.texture_array, "L") elif self.ndims == 3: self.texture_format = Texture.F_rgb8 self.texture.setup2dTexture(texture_size, texture_size, self.texture_component_type, self.texture_format) self.texture.setRamImageAs(self.texture_array, "RGB") else: raise ValueError("Texture needs to be 2d or 3d") self.textureStage = TextureStage("Stimulus") #Create scenegraph cm = CardMaker('card') cm.setFrameFullscreenQuad() self.card = self.aspect2d.attachNewNode(cm.generate()) self.card.setTexture(self.textureStage, self.texture) #ts, tx #Set the scale on the card (note this is different from scaling the texture) self.card.setScale(np.sqrt(2)) if self.scale != 0: #Add task to taskmgr to translate texture self.taskMgr.add(self.scaleTextureTask, "scaleTextureTask")
def renderSceneInto(self, depthtex=None, colortex=None, auxtex=None, auxbits=0, textures=None): if textures: colortex = textures.get('color', None) depthtex = textures.get('depth', None) auxtex = textures.get('aux', None) auxtex0 = textures.get('aux0', auxtex) auxtex1 = textures.get('aux1', None) else: auxtex0 = auxtex auxtex1 = None if colortex == None: colortex = Texture('filter-base-color') colortex.setWrapU(Texture.WMClamp) colortex.setWrapV(Texture.WMClamp) texgroup = (depthtex, colortex, auxtex0, auxtex1) winx, winy = self.getScaledSize(1, 1, 1) buffer = self.createBuffer('filter-base', winx, winy, texgroup) if buffer == None: return cm = CardMaker('filter-base-quad') cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(0) quad.setDepthWrite(0) quad.setTexture(colortex) quad.setColor(1, 0.5, 0.5, 1) cs = NodePath('dummy') cs.setState(self.camstate) if auxbits: cs.setAttrib(AuxBitplaneAttrib.make(auxbits)) self.camera.node().setInitialState(cs.getState()) quadcamnode = Camera('filter-quad-cam') lens = OrthographicLens() lens.setFilmSize(2, 2) lens.setFilmOffset(0, 0) lens.setNearFar(-1000, 1000) quadcamnode.setLens(lens) quadcam = quad.attachNewNode(quadcamnode) self.region.setCamera(quadcam) self.setStackedClears(buffer, self.rclears, self.wclears) if auxtex0: buffer.setClearActive(GraphicsOutput.RTPAuxRgba0, 1) buffer.setClearValue(GraphicsOutput.RTPAuxRgba0, (0.5, 0.5, 1.0, 0.0)) if auxtex1: buffer.setClearActive(GraphicsOutput.RTPAuxRgba1, 1) self.region.disableClears() if self.isFullscreen(): self.win.disableClears() dr = buffer.makeDisplayRegion() dr.disableClears() dr.setCamera(self.camera) dr.setActive(1) self.buffers.append(buffer) self.sizes.append((1, 1, 1)) return quad
def _activateSunflare(self): self.deactivateHighlight() flare_tex = base.loader.loadTexture("models/billboards/sunflare.png") cm = CardMaker('flare') cm.setFrameFullscreenQuad() # so that the center acts as the origin (from -1 to 1) self.flare_path = self.point_path.attachNewNode(cm.generate()) self.flare_path.setTransparency(TransparencyAttrib.MAlpha) self.flare_path.setTexture(self.flare_ts,flare_tex) self.flare_path.setColor(Vec4(1.0, 1.0, 1.0, 1)) self.flare_path.setScale(50) self.flare_path.setPos(Vec3(0,0,0)) self.flare_path.setBillboardPointEye() self.flare_path.setLightOff()
def __init__(self, tex, angle=0, velocity=0.1, fps=30, window_name="ShowTexMoving", window_size=None, profile_on=False): super().__init__() self.tex = tex if window_size is None: self.window_size = self.tex.texture_size else: self.window_size = window_size self.angle = angle self.velocity = velocity self.texture_stage = TextureStage("texture_stage") self.window_name = window_name # Set frame rate (fps) ShowBaseGlobal.globalClock.setMode(ClockObject.MLimited) ShowBaseGlobal.globalClock.setFrameRate(fps) #Set up profiling if desired if profile_on: PStatClient.connect( ) # this will only work if pstats is running: see readme ShowBaseGlobal.base.setFrameRateMeter(True) #Show frame rate self.center_indicator = None #Window properties set up self.window_properties = WindowProperties() self.window_properties.setSize(self.window_size, self.window_size) self.window_properties.setTitle(window_name) ShowBaseGlobal.base.win.requestProperties(self.window_properties) #Create scenegraph, attach stimulus to card. cm = CardMaker('card') cm.setFrameFullscreenQuad() self.card = self.aspect2d.attachNewNode(cm.generate()) # Scale is so it can handle arbitrary rotations and shifts in binocular case self.card.setScale(np.sqrt(8)) self.card.setColor( (1, 1, 1, 1) ) # makes it bright when bright (default combination with card is add) self.card.setTexture(self.texture_stage, self.tex.texture) self.card.setTexRotate(self.texture_stage, self.angle) if self.velocity != 0: #Add task to taskmgr to translate texture self.taskMgr.add(self.moveTextureTask, "moveTextureTask")
def __init__(self, tex_classes, stim_params, window_size=512, profile_on=False, fps=30, save_path=None): super().__init__() self.tex_classes = tex_classes self.current_tex_num = 0 self.stim_params = stim_params self.window_size = window_size self.stimulus_initialized = False #to handle case from -1 (uninitalize) to 0 (first stim) self.fps = fps self.save_path = save_path if self.save_path: self.filestream = utils.save_initialize(save_path, tex_classes, stim_params) else: self.filestream = None #Window properties self.windowProps = WindowProperties() self.windowProps.setSize(self.window_size, self.window_size) self.set_title("Initializing") #Create scenegraph cm = CardMaker('card') cm.setFrameFullscreenQuad() self.card = self.aspect2d.attachNewNode(cm.generate()) self.card.setScale(np.sqrt(8)) self.texture_stage = TextureStage("texture_stage") # Set frame rate ShowBaseGlobal.globalClock.setMode(ClockObject.MLimited) ShowBaseGlobal.globalClock.setFrameRate( self.fps) #can lock this at whatever if profile_on: PStatClient.connect() ShowBaseGlobal.base.setFrameRateMeter(True) #Set initial texture self.set_stimulus(str(self.current_tex_num)) # Set up event handlers and tasks self.accept('0', self.set_stimulus, ['0']) #event handler self.accept('1', self.set_stimulus, ['1']) self.taskMgr.add(self.move_texture_task, "move_texture") #task
def __init__(self, stim): super().__init__() self.stim = stim self.bgcolor = (1, 1, 1, 1) #Window properties self.windowProps = WindowProperties() self.windowProps.setSize(512, 512) #Create scenegraph cm = CardMaker('card') cm.setFrameFullscreenQuad() self.card = self.aspect2d.attachNewNode(cm.generate()) self.card.setScale(np.sqrt(8)) self.card.setColor(self.bgcolor) #make this an add mode self.card.setTexture(self.stim.texture_stage, self.stim.texture)
def _makeFullscreenQuad(self): cm = CardMaker("BufferQuad") cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(0) quad.setDepthWrite(0) quad.setAttrib(TransparencyAttrib.make(TransparencyAttrib.MNone)) quad.setColor(Vec4(1, 0.5, 0.5, 1)) # No culling check quad.node().setFinal(True) quad.node().setBounds(OmniBoundingVolume()) quad.setBin("unsorted", 10) return quad
def _makeFullscreenQuad(self): """ Create a quad which fills the full screen """ cm = CardMaker("BufferQuad") cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(0) quad.setDepthWrite(0) quad.setAttrib(TransparencyAttrib.make(TransparencyAttrib.MNone), 1000) quad.setColor(Vec4(1, 0.5, 0.5, 1)) # No culling check quad.node().setFinal(True) quad.node().setBounds(OmniBoundingVolume()) quad.setBin("unsorted", 10) return quad
def _makeFullscreenQuad(self): """ Create a quad which fills the whole screen """ cm = CardMaker("BufferQuad") cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(False) quad.setDepthWrite(False) quad.setAttrib(TransparencyAttrib.make(TransparencyAttrib.MNone), 1000) quad.setColor(Vec4(1, 0.5, 0.5, 1)) # Disable culling quad.node().setFinal(True) quad.node().setBounds(OmniBoundingVolume()) quad.setBin("unsorted", 10) return quad
def activateHighlight(self, thin): if thin: flare_tex = base.loader.loadTexture("models/billboards/thin_ring.png") else: flare_tex = base.loader.loadTexture("models/billboards/ring.png") cm = CardMaker('quad') cm.setFrameFullscreenQuad() # so that the center acts as the origin (from -1 to 1) self.quad_path = self.point_path.attachNewNode(cm.generate()) self.quad_path.setTransparency(TransparencyAttrib.MAlpha) self.quad_path.setTexture(flare_tex) if thin: self.quad_path.setColor(Vec4(1,1,1, 1)) else: self.quad_path.setColor(Vec4(0.2, 0.3, 1.0, 1)) self.quad_path.setScale(5) self.quad_path.setPos(Vec3(0,0,0)) self.quad_path.setBillboardPointEye()
def create(self, window, settings=None, transparent=True): """ Creates the browser and returns a NodePath which can be used to display the browser :type window: libpanda.GraphicsWindow :type settings: dict :type transparent: bool :return: The new nodepath """ if not settings: settings = {} windowInfo = cefpython.WindowInfo() if window is not None: windowHandle = window.getWindowHandle().getIntHandle() windowInfo.SetAsOffscreen(windowHandle) else: windowInfo.SetAsChild(0) windowInfo.SetTransparentPainting(transparent) if self.texture is None: if window is None: raise RuntimeError("Texture is not initialized and no window was given!") else: self.setSize(window.getXSize(), window.getYSize()) self.browser = cefpython.CreateBrowserSync(windowInfo, settings, self.initialURL) self.browser.SendFocusEvent(True) self.browser.SetClientHandler(ClientHandler(self.browser, self.texture)) self.browser.WasResized() self.jsBindings = cefpython.JavascriptBindings(bindToFrames=False, bindToPopups=True) self.browser.SetJavascriptBindings(self.jsBindings) # Now create the node cardMaker = CardMaker("browser2d") cardMaker.setFrameFullscreenQuad() node = cardMaker.generate() nodePath = NodePath(node) nodePath.setTexture(self.texture) return nodePath
def playVideo(self): self.t.fadeIn(0) self.pickAToon.removeGui() self.movieTex = MovieTexture("tutorial") assert self.movieTex.read( "tutorial.avi"), "Failed to load tutorial video" cm = CardMaker("tutorialCard") cm.setFrameFullscreenQuad() self.card = NodePath(cm.generate()) self.card.reparentTo(render2d) self.card.setTexture(self.movieTex) self.card.setTexScale(TextureStage.getDefault(), self.movieTex.getTexScale()) self.movieSound = loader.loadSfx("tutorial.avi") self.movieTex.synchronizeTo(self.movieSound) self.movieSound.play() taskMgr.add(self.checkMovieStatus, "checkMovieStatus")
def __init__(self, texture_cube, window_size=512, texture_size=512): super().__init__() self.num_slices = texture_cube.shape[0] self.texture_cube = texture_cube self.cube_ind = 0 #Create texture stage self.texture = Texture("Stimulus") self.texture.setup2dTexture(texture_size, texture_size, Texture.T_unsigned_byte, Texture.F_luminance) self.texture.setRamImageAs(self.texture_cube[0, :, :], "L") self.textureStage = TextureStage("Stimulus") #Create scenegraph cm = CardMaker('card1') cm.setFrameFullscreenQuad() self.card1 = self.aspect2d.attachNewNode(cm.generate()) self.card1.setTexture(self.textureStage, self.texture) #ts, tx ShowBaseGlobal.base.setFrameRateMeter(True) self.taskMgr.add(self.setTextureTask, "setTextureTask")
def create(self): self.target = RenderTarget("ApplyLights") self.target.addColorTexture() self.target.setColorBits(16) self.target.prepareOffscreenBuffer() self.target.setClearColor(True) self.target.getQuad().removeNode() self.target.getNode().setAttrib( TransparencyAttrib.make(TransparencyAttrib.MNone), 1000) self.target.getNode().setAttrib( ColorBlendAttrib.make(ColorBlendAttrib.MAdd), 1000) self.quads = {} numInstances = self.tileCount.x * self.tileCount.y for lightType in ["DirectionalLightShadow"]: cm = CardMaker("BufferQuad-" + lightType) cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(0) quad.setDepthWrite(0) quad.setAttrib(TransparencyAttrib.make(TransparencyAttrib.MNone), 1000) quad.setColor(Vec4(1, 0.5, 0.5, 1)) # Disable culling quad.node().setFinal(True) quad.node().setBounds(OmniBoundingVolume()) quad.setBin("unsorted", 10) quad.setInstanceCount(numInstances) quad.reparentTo(self.target.getNode()) self.quads[lightType] = quad self.target.getNode().setShaderInput("tileCount", self.tileCount)
def _updateDebugNode(self): """ Internal method to generate new debug geometry. """ debugNode = NodePath("PointLightDebugNode") debugNode.setPos(self.position) # Create the inner image cm = CardMaker("PointLightDebug") cm.setFrameFullscreenQuad() innerNode = NodePath(cm.generate()) innerNode.setTexture(Globals.loader.loadTexture("Data/GUI/Visualization/PointLight.png")) innerNode.setBillboardPointEye() innerNode.reparentTo(debugNode) # Create the outer lines lineNode = debugNode.attachNewNode("lines") # Generate outer circles points1 = [] points2 = [] points3 = [] for i in range(self.visualizationNumSteps + 1): angle = float( i) / float(self.visualizationNumSteps) * math.pi * 2.0 points1.append(Vec3(0, math.sin(angle), math.cos(angle))) points2.append(Vec3(math.sin(angle), math.cos(angle), 0)) points3.append(Vec3(math.sin(angle), 0, math.cos(angle))) self._createDebugLine(points1, False).reparentTo(lineNode) self._createDebugLine(points2, False).reparentTo(lineNode) self._createDebugLine(points3, False).reparentTo(lineNode) lineNode.setScale(self.radius) # Remove the old debug node self.debugNode.node().removeAllChildren() # Attach the new debug node debugNode.reparentTo(self.debugNode)
def attackAnimation(self, time, task): try: if self.shockwave_path != None: self.shock_scale += 0.1 self.shockwave_path.setScale(self.shock_scale) except AttributeError: cm = CardMaker('quad') cm.setFrameFullscreenQuad() tex = loader.loadTexture("models/billboards/attack.png") self.shockwave_path = self.model_path.attachNewNode(cm.generate()) self.shockwave_path.setTexture(tex) self.shockwave_path.setTransparency(TransparencyAttrib.MAlpha) self.shock_scale = 0.2 self.shockwave_path.setScale(self.shock_scale) self.shockwave_path.setPos(Vec3(0, 0, -0.5)) self.shockwave_path.setP(-90) self.shockwave_path.setColor(Vec4(1.0, 0.65, 0, 0.6)) if task.time > time: self.shockwave_path.removeNode() self.shockwave_path = None del self.shock_scale del self.shockwave_path return task.done return task.cont
def _setupTextureBuffer(self): if self.buffer: self.card.remove() self.camera2d.remove() base.graphicsEngine.removeWindow(self.buffer) self.buffer = base.win.makeTextureBuffer( "textureBuffer-%s-%s" % (self.__id, self.getName()), base.win.getXSize(), base.win.getYSize()) self.buffer.setSort(100) self.buffer.setOneShot(True) self.buffer.setActive(True) self.buffer.getTexture().setOrigFileSize(base.win.getXSize(), base.win.getYSize()) self.camera2d = base.makeCamera2d(self.buffer) self.camera2d.reparentTo(self.fakeRender2d) cm = CardMaker('card-%s-%s' % (self.__id, self.getName())) cm.setFrameFullscreenQuad() cm.setUvRange(self.buffer.getTexture()) self.card = base.render2d.attachNewNode(cm.generate()) self.card.setTransparency(TransparencyAttrib.MAlpha) self.card.setTexture(self.buffer.getTexture())
def __init__(self, texture_array, mask_position, mask_scale, debug): super().__init__() self.debug = debug if self.debug >= 1: import matplotlib.pyplot as plt texture_size = 1024 self.mask_scale = mask_scale self.texture_array = texture_array self.mask_position_ndc = mask_position self.mask_position_uv = (self.ndc2uv(self.mask_position_ndc[0]), self.ndc2uv(self.mask_position_ndc[1])) #CREATE MASK (zeros on left, 255s on right) self.right_mask = 255*np.ones((texture_size,texture_size), dtype=np.uint8) #should set to 0/1 not 255? self.right_mask[:, texture_size//2: ] = 0 self.right_mask[texture_size//2 - 400:texture_size//2-300, -40:] = 120 #gray notch in RHS of zeros self.right_mask[texture_size//2 - 50:texture_size//2+50, texture_size//2: texture_size//2+80] = 180 #light notch in LHS of zeros if self.debug >= 2: plt.imshow(self.right_mask, cmap = 'gray') plt.show() #CREATE TEXTURE STAGES #Grating texture self.grating_texture = Texture("Grating") #T_unsigned_byte self.grating_texture.setup2dTexture(texture_size, texture_size, Texture.T_unsigned_byte, Texture.F_luminance) self.grating_texture.setRamImage(self.texture_array) self.left_texture_stage = TextureStage('grating') #Mask self.right_mask_texture = Texture("right_mask") self.right_mask_texture.setup2dTexture(texture_size, texture_size, Texture.T_unsigned_byte, Texture.F_luminance) self.right_mask_texture.setRamImage(self.right_mask) self.right_mask_stage = TextureStage('right_mask') #Multiply the texture stages together self.right_mask_stage.setCombineRgb(TextureStage.CMModulate, TextureStage.CSTexture, TextureStage.COSrcColor, TextureStage.CSPrevious, TextureStage.COSrcColor) #CREATE SCENE GRAPH cm = CardMaker('card') cm.setFrameFullscreenQuad() self.left_card = self.aspect2d.attachNewNode(cm.generate()) self.left_card.setTexture(self.left_texture_stage, self.grating_texture) self.left_card.setTexture(self.right_mask_stage, self.right_mask_texture) #CREATE TASKS TO MOVE MASK, AND PRINT OUT INFO ABOUT MASK ShowBaseGlobal.base.taskMgr.add(self.update) if self.debug >= 1: display_period = 2 #seconds ShowBaseGlobal.base.taskMgr.doMethodLater(display_period, self.print_updates, 'mask_display') #SCREEN TEXT FOR REFERENCE #Text mark the origin with green o self.title = OnscreenText("o", style = 1, fg = (1,1,0,1), bg = (0,1,0,0.8), pos = (0,0), scale = 0.05) #Text mark the desired location with white x self.title = OnscreenText("x", style = 1, fg = (1,1,1,1), bg = (0,0,0,0.5), pos = self.mask_position_ndc, scale = 0.08)
def renderSceneInto(self, depthtex=None, colortex=None, auxtex=None, auxbits=0, textures=None, fbprops=None): """ Causes the scene to be rendered into the supplied textures instead of into the original window. Puts a fullscreen quad into the original window to show the render-to-texture results. Returns the quad. Normally, the caller would then apply a shader to the quad. To elaborate on how this all works: * An offscreen buffer is created. It is set up to mimic the original display region - it is the same size, uses the same clear colors, and contains a DisplayRegion that uses the original camera. * A fullscreen quad and an orthographic camera to render that quad are both created. The original camera is removed from the original window, and in its place, the orthographic quad-camera is installed. * The fullscreen quad is textured with the data from the offscreen buffer. A shader is applied that tints the results pink. * Automatic shader generation NOT enabled. If you have a filter that depends on a render target from the auto-shader, you either need to set an auto-shader attrib on the main camera or scene, or, you need to provide these outputs in your own shader. * All clears are disabled on the original display region. If the display region fills the whole window, then clears are disabled on the original window as well. It is assumed that rendering the full-screen quad eliminates the need to do clears. Hence, the original window which used to contain the actual scene, now contains a pink-tinted quad with a texture of the scene. It is assumed that the user will replace the shader on the quad with a more interesting filter. """ if (textures): colortex = textures.get("color", None) depthtex = textures.get("depth", None) auxtex = textures.get("aux", None) auxtex0 = textures.get("aux0", auxtex) auxtex1 = textures.get("aux1", None) else: auxtex0 = auxtex auxtex1 = None if (colortex == None): colortex = Texture("filter-base-color") colortex.setWrapU(Texture.WMClamp) colortex.setWrapV(Texture.WMClamp) texgroup = (depthtex, colortex, auxtex0, auxtex1) # Choose the size of the offscreen buffer. (winx, winy) = self.getScaledSize(1, 1, 1) if fbprops is not None: buffer = self.createBuffer("filter-base", winx, winy, texgroup, fbprops=fbprops) else: buffer = self.createBuffer("filter-base", winx, winy, texgroup) if (buffer == None): return None cm = CardMaker("filter-base-quad") cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(0) quad.setDepthWrite(0) quad.setTexture(colortex) quad.setColor(1, 0.5, 0.5, 1) cs = NodePath("dummy") cs.setState(self.camstate) # Do we really need to turn on the Shader Generator? #cs.setShaderAuto() if (auxbits): cs.setAttrib(AuxBitplaneAttrib.make(auxbits)) self.camera.node().setInitialState(cs.getState()) quadcamnode = Camera("filter-quad-cam") lens = OrthographicLens() lens.setFilmSize(2, 2) lens.setFilmOffset(0, 0) lens.setNearFar(-1000, 1000) quadcamnode.setLens(lens) quadcam = quad.attachNewNode(quadcamnode) self.region.setCamera(quadcam) self.setStackedClears(buffer, self.rclears, self.wclears) if (auxtex0): buffer.setClearActive(GraphicsOutput.RTPAuxRgba0, 1) buffer.setClearValue(GraphicsOutput.RTPAuxRgba0, (0.5, 0.5, 1.0, 0.0)) if (auxtex1): buffer.setClearActive(GraphicsOutput.RTPAuxRgba1, 1) self.region.disableClears() if (self.isFullscreen()): self.win.disableClears() dr = buffer.makeDisplayRegion() dr.disableClears() dr.setCamera(self.camera) dr.setActive(1) self.buffers.append(buffer) self.sizes.append((1, 1, 1)) return quad
def __init__(self, parent = None, pos = (0,0,0), fading = False, fading_position_offset = (0,0,0), fading_duration = 0.5, backgroundImage = None, backgroundColor = None, enableMask = False, noFocus = False, shownFunc = None, hiddenFunc = None, funcExtraArgs = [], sort = None ): '''if fading enabled, it will apply a fading effect on show()&hide() Important Attributes: enableMask: This creates a big transparent plane (DirectButton) off screen so the directGui below won't be clicked (However due to this trick we won't be able to accept mouse events (I have paid back 'mouse3' by self.__maskClick)) noFocus: if it is true, it doesn't need SogalBase to manage its focus state (it will not affect other Sogalforms' focus state ''' self.__fading = fading self.__fadingPositionOffset = fading_position_offset self.__fadingDuration = fading_duration self.__originPos = pos self.__currentInterval = None self.__maskEnabled = enableMask self.__noFocus = noFocus self.__shownFunc = shownFunc self.__hiddenFunc = hiddenFunc self.__eventExtraArgs = funcExtraArgs self.__mask = None if self.__maskEnabled: self.__mask = DialogMask() #self.__mask = DirectButton(parent = aspect2d, frameColor =(1,1,1,0.1), relief = DGG.FLAT,commandButtons = [DGG.RMB],command = self.__maskClick) self.__mask.hide() self.__backgroundImage = backgroundImage self.__backgroundColor = backgroundColor self.__bgPath = None self.__imagePath = None self.__hidden = True NodePath.__init__(self,self.__class__.__name__) parent = parent or aspect2d if sort: self.reparentTo(parent, sort = sort) else: self.reparentTo(parent) self.setPos(pos) if self.__backgroundColor: self.__bgPath = NodePath('bgPath') self.__bgPath.setTransparency(TransparencyAttrib.MAlpha) cm = CardMaker('cm') cm.setFrameFullscreenQuad() cm.setColor(self.__backgroundColor) self.__bgPath.attachNewNode(cm.generate()) self.__bgPath.reparentTo(aspect2d,self.getSort()) self.__bgPath.hide() #TODO: backgroundImage self.setTransparency(TransparencyAttrib.MAlpha) self.accept('window-event', self.windowResize) self.windowResize(None) NodePath.hide(self)
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from direct.directnotify.DirectNotify import DirectNotify from panda3d.core import CardMaker, PNMImage, Texture, TransparencyAttrib from panda3d.core import loadPrcFile, loadPrcFileData from direct.showbase.ShowBase import ShowBase loadPrcFileData('', 'frame-rate-meter-scale 0.035') loadPrcFileData('', 'frame-rate-meter-side-margin 0.1') loadPrcFileData('', 'show-frame-rate-meter 1') loadPrcFileData('', 'window-title Panda3d bug test') loadPrcFileData('', "sync-video 0") loadPrcFileData('', 'task-timer-verbose 1') loadPrcFileData('', 'pstats-tasks 1') loadPrcFileData('', 'want-pstats 1') loadPrcFileData("", "textures-power-2 none") base = ShowBase() cardMaker = CardMaker("browser2d") cardMaker.setFrameFullscreenQuad() node = cardMaker.generate() node_path = sandbox.base.render2d.attachNewNode(node) node_path.setTransparency(TransparencyAttrib.MAlpha) base.run()
def __init__(self, texture_array, stim_angles = (0, 0), mask_angle = 0, position = (0, 0), velocity = 0, band_radius = 3, window_size = 512, texture_size = 512, bgcolor = (0, 0, 0, 1)): super().__init__() self.mask_position_ndc = position self.mask_position_uv = (ndc2uv(self.mask_position_ndc[0]), ndc2uv(self.mask_position_ndc[1])) self.scale = np.sqrt(8) self.texture_array = texture_array self.texture_dtype = type(self.texture_array.flat[0]) self.ndims = self.texture_array.ndim self.left_texture_angle = stim_angles[0] self.right_texture_angle = stim_angles[1] self.velocity = velocity self.mask_angle = mask_angle #this will change fairly frequently #Set window title and size self.window_properties = WindowProperties() self.window_properties.setSize(window_size, window_size) self.window_properties.setTitle("BinocularStatic") ShowBaseGlobal.base.win.requestProperties(self.window_properties) #base is a panda3d global #CREATE MASKS (right mask for left tex, left mask for right tex) self.right_mask = 255*np.ones((texture_size,texture_size), dtype=np.uint8) self.right_mask[:, texture_size//2 - band_radius :] = 0 self.left_mask = 255*np.ones((texture_size,texture_size), dtype=np.uint8) self.left_mask[:, : texture_size//2 + band_radius] = 0 if False: #set to True to debug import matplotlib.pyplot as plt plt.imshow(self.left_mask, cmap = 'gray') plt.show() #CREATE TEXTURE STAGES #Grating texture self.grating_texture = Texture("Grating") #T_unsigned_byte self.grating_texture.setup2dTexture(texture_size, texture_size, Texture.T_unsigned_byte, Texture.F_luminance) self.grating_texture.setRamImage(self.texture_array) #TEXTURE STAGES FOR RIGHT CARD self.right_texture_stage = TextureStage('right_texture') #Mask self.left_mask_texture = Texture("left_mask") self.left_mask_texture.setup2dTexture(texture_size, texture_size, Texture.T_unsigned_byte, Texture.F_luminance) self.left_mask_texture.setRamImage(self.left_mask) self.left_mask_stage = TextureStage('left_mask') #Multiply the texture stages together self.left_mask_stage.setCombineRgb(TextureStage.CMModulate, TextureStage.CSTexture, TextureStage.COSrcColor, TextureStage.CSPrevious, TextureStage.COSrcColor) #CREATE CARDS/SCENEGRAPH cm = CardMaker('stimcard') cm.setFrameFullscreenQuad() self.right_card = self.aspect2d.attachNewNode(cm.generate()) self.setBackgroundColor((0,0,0,1)) #set above self.right_card.setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.M_add)) #SET TEXTURE STAGES self.right_card.setTexture(self.right_texture_stage, self.grating_texture) self.right_card.setTexture(self.left_mask_stage, self.left_mask_texture) #SET static transforms #Left texture mask self.mask_transform = self.trs_transform() self.right_card.setTexTransform(self.left_mask_stage, self.mask_transform) #Right texture self.right_card.setTexScale(self.right_texture_stage, 1/self.scale) self.right_card.setTexRotate(self.right_texture_stage, self.right_texture_angle) #Set dynamic transforms if self.velocity != 0: self.taskMgr.add(self.texture_update, "moveTextureTask") self.title = OnscreenText("x", style = 1, fg = (1,1,1,1), bg = (0,0,0,.8), pos = self.mask_position_ndc, scale = 0.05)
class FaceDetection(DirectObject): def __init__(self): #Muestra un texto en pantalla, utilizando la interfaz 2D de Panda3D. #Posteriormente se actualizara con otros datos, por eso se mantiene la referencia self.title = OnscreenText(text="prueba 1", style=1, fg=(0,0,0,1), pos=(0.8,-0.95), scale = .07) #Lee los datos de configuracion de los clasificadores HAAR. En este caso, para deteccion de rostros en posicion frontal self.cascade = cv.Load("haarcascades\\haarcascade_frontalface_alt.xml") #Utiliza por defecto la camara para obtener las imagenes, y no guarda un archivo self.cameraHelper = CameraHelper(True, False, "d:\\face-detection.avi") #Crea una textura para utilizar como fondo, donde se mostraran las imagenes de video #CardMaker en realidad es un poligono rectangular, que es util para asociarlo al renderer2D, ya que #podremos hacer que ocupe toda la pantalla y mostrar nuestras imagenes como fondo. self.cardMaker = CardMaker("My Fullscreen Card"); self.cardMaker.setFrameFullscreenQuad() #Agrega el rectangulo al renderer render2dp. Asi como existe render2d, existe uno adicional que es utilizado #en general para casos donde necesita mostrarse un fondo, sea estatico o de video. El render2d estandar, #por el contrario, se usa para mostrar informacion al usuario que siempre debe ser visible self.card = NodePath(self.cardMaker.generate()) self.card.reparentTo(render2dp) #Le da baja prioridad para que los objetos dibujados posteriormente siempre se vean sobre ella base.cam2dp.node().getDisplayRegion(0).setSort(-20) #Crea un rectangulo que se utilizara para mostrar la imagen superpuesta sobre la cara self.faceMaker = CardMaker("Face Texture"); self.faceImage = NodePath(self.faceMaker.generate()) self.faceImage.setTexture(loader.loadTexture("margarita-glass3.png")) self.faceImage.reparentTo(aspect2d) #self.faceImage.reparentTo(render2d) self.faceImage.setTransparency(TransparencyAttrib.MAlpha) self.setup_handlers() self.setup_lights() self.count = 0 #Establece un fondo negro #base.setBackgroundColor(0, 0, 0) #Carga el objeto tetera (incluido con Panda3D), y lo ubica en el mundo #self.teapot = loader.loadModel('models/teapot') #self.teapot.reparentTo(base.render) #self.teapot.setPos(-10, -10, -10) #Coloca la camara en el origen de coordenadas, y hace que apunte hacia la tetera #camera.setPos(0, 0, 0) #camera.lookAt(-10, -10, -10) taskMgr.add(self.onFrameChanged, "frameChange") def exitApplication(self): self.cameraHelper = None sys.exit() def setup_handlers(self): #Deshabilita el manejo por defecto del mouse base.disableMouse() #Agrega un gestor que al recibir el mensaje de que se presiono ESC, sale del programa self.accept("escape", self.exitApplication) def setup_lights(self): #Crea una luz ambiente, para que los objetos tengan una iluminacion base por defecto self.ambientLight = AmbientLight("ambientLight") self.ambientLight.setColor(Vec4(.8, .8, .75, 1)) render.setLight(render.attachNewNode(self.ambientLight)) #Crea una luz direccional, que va a permitir destacar la profundidad de los objetos self.directionalLight = DirectionalLight("directionalLight") self.directionalLight.setDirection(Vec3(0, 0, -2.5)) self.directionalLight.setColor(Vec4(0.9, 0.8, 0.9, 1)) render.setLight(render.attachNewNode(self.directionalLight)) def onFrameChanged(self, task): #Captura un cuadro del origen indicado image = self.cameraHelper.captureFrame() #Si no quedan imagenes para mostrar, sale del programa (por ejemplo, se termina el video grabado) if image == None: return Task.done nuevaImagen = self.detectFaces(image) #Crea una textura utilizando la imagen capturada por OpenCV texture = self.createTexture(image) #En caso de que haya ocurrido un error, utilizar la imagen previa if texture != None: self.oldTexture = texture #Muestra la captura como fondo de la imagen self.card.setTexture(self.oldTexture) return Task.cont def createTexture(self, image): (width, height) = cv.GetSize(image) #Panda3D interpreta las imagenes al reves que OpenCV (verticalmente invertidas), por lo que es necesario tener esto en cuenta cv.Flip(image, image, 0) #OpenCV permite convertir la representacion interna de las imagenes a un formato descomprimido que puede ser guardado en un archivo. #Esto puede utilizarse desde Panda3D para tomar la imagen y utilizarla como una textura. imageString = image.tostring() #PTAUchar es una clase que permite tomar un bloque de datos y utilizarlo desde componentes de Panda3D (en particular es util para texturas) imagePointer = PTAUchar.emptyArray(0) imagePointer.setData(imageString) try: self.count += 1 #Crea un nuevo objeto textura texture = Texture('image' + str(self.count)) #Establece propiedades de la textura, como tamanio, tipo de datos y modelo de color. Las imagenes de OpenCV las estamos manejando #como RGB, donde cada canal es de 8bits (un numero entero) texture.setup2dTexture(width, height, Texture.TUnsignedByte, Texture.FRgb) #Indicamos que utilice el bloque de datos obtenido anteriormente como origen de datos para la textura texture.setRamImage(CPTAUchar(imagePointer), MovieTexture.CMOff) except: texture = None return texture def detectFaces(self, image): #Tamanio minimo de los rostros a detectar minFaceSize = (20, 20) #Escala a aplicar a la imagen para reducir tiempo de procesamiento imageScale = 2 haarScale = 1.2 minNeighbors = 2 haarFlags = 0 (screenWidth, screenHeight) = cv.GetSize(image) aspectRatio = float(screenWidth) / screenHeight #Crea imagenes temporales para la conversion a escala de grises y el escalado de la imagen grayImage = cv.CreateImage((image.width,image.height), 8, 1) smallImage = cv.CreateImage((cv.Round(image.width / imageScale), cv.Round (image.height / imageScale)), 8, 1) #Convierte la imagen capturada a escala de grises cv.CvtColor(image, grayImage, cv.CV_BGR2GRAY) #Crea una version de tamanio reducido para reducir el tiempo de procesamiento cv.Resize(grayImage, smallImage, cv.CV_INTER_LINEAR) #Ecualiza la imagen, con el fin de mejorar el contraste cv.EqualizeHist(smallImage, smallImage) #Detecta los rostros existentes en la imagen, y calcula el tiempo utilizado para hacerlo t = cv.GetTickCount() faces = cv.HaarDetectObjects(smallImage, self.cascade, cv.CreateMemStorage(0), haarScale, minNeighbors, haarFlags, minFaceSize) t = cv.GetTickCount() - t #Crea una imagen nueva donde se va a dibujar el cuadrado sin mostrar el fondo nuevaImagen = cv.CreateImage((image.width,image.height), 8, 3) pt1 = (int(0 * imageScale), int(0 * imageScale)) pt2 = (int((image.width) * imageScale), int((image.height) * imageScale)) cv.Rectangle(nuevaImagen, pt1, pt2, cv.RGB(118, 152, 141), -1, 8, 0) print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.)) if faces: for ((x, y, w, h), n) in faces: #Calcula los puntos de inicio y fin del rectangulo detectado pt1 = (int(x * imageScale), int(y * imageScale)) pt2 = (int((x + w) * imageScale), int((y + h) * imageScale)) #Dibuja un rectangulo en la imagen origen, para destacar el rostro detectado #cv.Rectangle(nuevaImagen, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0) cv.Rectangle(image, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0) #Calcula la posicion del objeto a mostrar en Panda3D #Se utiliza aspect2d, ya que permite tener independencia de dimensiones entre ancho y alto (con render2d la imagen se veria comprimida) #En aspect2d, la coordenada x corre desde -aspectRatio hasta aspectRatio. La coordenada y corre desde -1 a 1. # #En este caso en que utilizamos 2D, es mas notoria la necesidad de definir correctamente el "centro" de nuestros modelos. #Esto se realiza en los programas de modelado, pero afectara el posicionamiento dentro de nuestra aplicacion #position = (float(x) * imageScale, float(y + h) * imageScale) #position = (position[0] - screenWidth / 2, position[1] - screenHeight / 2) #position = (2 * aspectRatio * position[0] / screenWidth, 2 * (-position[1] / screenHeight), 0) position = (float(x) * imageScale, float(y) * imageScale) position = (position[0] - screenWidth / 2, position[1] - screenHeight / 2) position = (2 * aspectRatio * position[0] / screenWidth, 2 * (-position[1] / screenHeight), 0) #Ubica el centro del objeto en la posicion correcta self.faceImage.setPos(position[0], 0, position[1]) texture = self.faceImage.getTexture() #self.faceImage.setScale(2 * imageScale * float(w) / texture.getXSize(), 1, 2 * imageScale * float(h) / texture.getYSize()) self.faceImage.setScale(imageScale * float(w) / texture.getXSize(), 1, imageScale * float(h) / texture.getYSize()) print self.faceImage.getScale() return nuevaImagen
props.setSize(w, h) base.win.requestProperties(props) from panda3d.core import OrthographicLens lens = OrthographicLens() lens.setFilmSize(1, 1) lens.setNearFar(1, 1000) base.cam.node().setLens(lens) myShader = Shader.load(Shader.SL_GLSL, vertex="default.vert", fragment="smoke.frag") card = CardMaker('test') card.setFrameFullscreenQuad() class RenderToTexture: def __init__(self): self.A = Texture() self.B = Texture() self.buffer = makeOffscreenBuffer() self.camera = base.makeCamera(self.buffer, lens=lens) self.mesh = NodePath(card.generate()) self.camera.reparentTo(self.mesh) self.camera.setPos(0, -1, 0) self.mesh.setShader(myShader) self.mesh.setShaderInputs( res=(w, h), bufferTexture=self.A,
def _updateDebugNode(self): """ Internal method to generate new debug geometry. """ debugNode = NodePath("SpotLightDebugNode") # Create the inner image cm = CardMaker("SpotLightDebug") cm.setFrameFullscreenQuad() innerNode = NodePath(cm.generate()) innerNode.setTexture(Globals.loader.loadTexture("Data/GUI/Visualization/SpotLight.png")) innerNode.setBillboardPointEye() innerNode.reparentTo(debugNode) innerNode.setPos(self.position) innerNode.setColorScale(1,1,0,1) # Create the outer lines lineNode = debugNode.attachNewNode("lines") currentNodeTransform = render.getTransform(self.ghostCameraNode).getMat() currentCamTransform = self.ghostLens.getProjectionMat() currentRelativeCamPos = self.ghostCameraNode.getPos(render) currentCamBounds = self.ghostLens.makeBounds() currentCamBounds.xform(self.ghostCameraNode.getMat(render)) p = lambda index: currentCamBounds.getPoint(index) # Make a circle at the bottom frustumBottomCenter = (p(0) + p(1) + p(2) + p(3)) * 0.25 upVector = (p(0) + p(1)) / 2 - frustumBottomCenter rightVector = (p(1) + p(2)) / 2 - frustumBottomCenter points = [] for idx in xrange(64): rad = idx / 64.0 * math.pi * 2.0 pos = upVector * math.sin(rad) + rightVector * math.cos(rad) pos += frustumBottomCenter points.append(pos) frustumLine = self._createDebugLine(points, True) frustumLine.setColorScale(1,1,0,1) frustumLine.reparentTo(lineNode) # Create frustum lines which connect the origin to the bottom circle pointArrays = [ [self.position, frustumBottomCenter + upVector], [self.position, frustumBottomCenter - upVector], [self.position, frustumBottomCenter + rightVector], [self.position, frustumBottomCenter - rightVector], ] for pointArray in pointArrays: frustumLine = self._createDebugLine(pointArray, False) frustumLine.setColorScale(1,1,0,1) frustumLine.reparentTo(lineNode) # Create line which is in the direction of the spot light startPoint = (p(0) + p(1) + p(2) + p(3)) * 0.25 endPoint = (p(4) + p(5) + p(6) + p(7)) * 0.25 line = self._createDebugLine([startPoint, endPoint], False) line.setColorScale(1,1,1,1) line.reparentTo(lineNode) # Remove the old debug node self.debugNode.node().removeAllChildren() # Attach the new debug node debugNode.reparentTo(self.debugNode)
def renderSceneInto(self, depthtex=None, colortex=None, auxtex=None, auxbits=0, textures=None): """ Causes the scene to be rendered into the supplied textures instead of into the original window. Puts a fullscreen quad into the original window to show the render-to-texture results. Returns the quad. Normally, the caller would then apply a shader to the quad. To elaborate on how this all works: * An offscreen buffer is created. It is set up to mimic the original display region - it is the same size, uses the same clear colors, and contains a DisplayRegion that uses the original camera. * A fullscreen quad and an orthographic camera to render that quad are both created. The original camera is removed from the original window, and in its place, the orthographic quad-camera is installed. * The fullscreen quad is textured with the data from the offscreen buffer. A shader is applied that tints the results pink. * Automatic shader generation NOT enabled. If you have a filter that depends on a render target from the auto-shader, you either need to set an auto-shader attrib on the main camera or scene, or, you need to provide these outputs in your own shader. * All clears are disabled on the original display region. If the display region fills the whole window, then clears are disabled on the original window as well. It is assumed that rendering the full-screen quad eliminates the need to do clears. Hence, the original window which used to contain the actual scene, now contains a pink-tinted quad with a texture of the scene. It is assumed that the user will replace the shader on the quad with a more interesting filter. """ if (textures): colortex = textures.get("color", None) depthtex = textures.get("depth", None) auxtex = textures.get("aux", None) auxtex0 = textures.get("aux0", auxtex) auxtex1 = textures.get("aux1", None) else: auxtex0 = auxtex auxtex1 = None if (colortex == None): colortex = Texture("filter-base-color") colortex.setWrapU(Texture.WMClamp) colortex.setWrapV(Texture.WMClamp) texgroup = (depthtex, colortex, auxtex0, auxtex1) # Choose the size of the offscreen buffer. (winx, winy) = self.getScaledSize(1,1,1) buffer = self.createBuffer("filter-base", winx, winy, texgroup) if (buffer == None): return None cm = CardMaker("filter-base-quad") cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(0) quad.setDepthWrite(0) quad.setTexture(colortex) quad.setColor(1, 0.5, 0.5, 1) cs = NodePath("dummy") cs.setState(self.camstate) # Do we really need to turn on the Shader Generator? #cs.setShaderAuto() if (auxbits): cs.setAttrib(AuxBitplaneAttrib.make(auxbits)) self.camera.node().setInitialState(cs.getState()) quadcamnode = Camera("filter-quad-cam") lens = OrthographicLens() lens.setFilmSize(2, 2) lens.setFilmOffset(0, 0) lens.setNearFar(-1000, 1000) quadcamnode.setLens(lens) quadcam = quad.attachNewNode(quadcamnode) self.region.setCamera(quadcam) self.setStackedClears(buffer, self.rclears, self.wclears) if (auxtex0): buffer.setClearActive(GraphicsOutput.RTPAuxRgba0, 1) buffer.setClearValue(GraphicsOutput.RTPAuxRgba0, (0.5, 0.5, 1.0, 0.0)) if (auxtex1): buffer.setClearActive(GraphicsOutput.RTPAuxRgba1, 1) self.region.disableClears() if (self.isFullscreen()): self.win.disableClears() dr = buffer.makeDisplayRegion() dr.disableClears() dr.setCamera(self.camera) dr.setActive(1) self.buffers.append(buffer) self.sizes.append((1, 1, 1)) return quad
def renderSceneInto(self, depthtex=None, colortex=None, auxtex=None, auxbits=0, textures=None, fbprops=None, clamping=None): """ overload direct.filters.FilterManager.renderSceneInto :param depthtex: :param colortex: :param auxtex: :param auxbits: :param textures: :param fbprops: :param clamping: :return: """ if (textures): colortex = textures.get("color", None) depthtex = textures.get("depth", None) auxtex = textures.get("aux", None) auxtex0 = textures.get("aux0", auxtex) auxtex1 = textures.get("aux1", None) else: auxtex0 = auxtex auxtex1 = None if (colortex == None): colortex = Texture("filter-base-color") colortex.setWrapU(Texture.WMClamp) colortex.setWrapV(Texture.WMClamp) texgroup = (depthtex, colortex, auxtex0, auxtex1) # Choose the size of the offscreen buffer. (winx, winy) = self.getScaledSize(1, 1, 1) if fbprops is not None: buffer = self.createBuffer("filter-base", winx, winy, texgroup, fbprops=fbprops) else: buffer = self.createBuffer("filter-base", winx, winy, texgroup) if (buffer == None): return None cm = CardMaker("filter-base-quad") cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(0) quad.setDepthWrite(0) quad.setTexture(colortex) quad.setColor(1, 0.5, 0.5, 1) cs = NodePath("dummy") cs.setState(self.camstate) # Do we really need to turn on the Shader Generator? # cs.setShaderAuto() if (auxbits): cs.setAttrib(AuxBitplaneAttrib.make(auxbits)) if clamping is False: # Disables clamping in the shader generator. cs.setAttrib(LightRampAttrib.make_identity()) self.camera.node().setInitialState(cs.getState()) quadcamnode = Camera("filter-quad-cam") lens = OrthographicLens() lens.setFilmSize(2, 2) lens.setFilmOffset(0, 0) lens.setNearFar(-1000, 1000) quadcamnode.setLens(lens) quadcam = quad.attachNewNode(quadcamnode) self.region.setCamera(quadcam) self.setStackedClears(buffer, self.rclears, self.wclears) if (auxtex0): buffer.setClearActive(GraphicsOutput.RTPAuxRgba0, 1) buffer.setClearValue(GraphicsOutput.RTPAuxRgba0, (0.5, 0.5, 1.0, 0.0)) if (auxtex1): buffer.setClearActive(GraphicsOutput.RTPAuxRgba1, 1) self.region.disableClears() if (self.isFullscreen()): self.win.disableClears() dr = buffer.makeDisplayRegion() dr.disableClears() dr.setCamera(self.camera) dr.setActive(1) self.buffers.append(buffer) self.sizes.append((1, 1, 1)) return quad
def __init__(self, tex, stim_angles=(0, 0), strip_angle=0, position=(0, 0), velocities=(0, 0), strip_width=4, fps=30, window_size=None, window_name='BinocularDrift', profile_on=False): super().__init__() self.tex = tex if window_size == None: self.window_size = tex.texture_size else: self.window_size = window_size self.mask_position_card = position self.mask_position_uv = (utils.card2uv(self.mask_position_card[0]), utils.card2uv(self.mask_position_card[1])) self.scale = np.sqrt( 8) #so it can handle arbitrary rotations and shifts self.left_texture_angle = stim_angles[0] self.right_texture_angle = stim_angles[1] self.left_velocity = velocities[0] self.right_velocity = velocities[1] self.strip_angle = strip_angle #this will change fairly frequently self.fps = fps self.window_name = window_name self.profile_on = profile_on #Set window title and size self.window_properties = WindowProperties() self.window_properties.setSize(self.window_size, self.window_size) self.window_properties.setTitle(self.window_name) ShowBaseGlobal.base.win.requestProperties( self.window_properties) #base is a panda3d global #CREATE MASK ARRAYS self.left_mask_array = 255 * np.ones( (self.tex.texture_size, self.tex.texture_size), dtype=np.uint8) self.left_mask_array[:, self.tex.texture_size // 2 - strip_width // 2:] = 0 self.right_mask_array = 255 * np.ones( (self.tex.texture_size, self.tex.texture_size), dtype=np.uint8) self.right_mask_array[:, :self.tex.texture_size // 2 + strip_width // 2] = 0 #TEXTURE STAGES FOR LEFT CARD self.left_texture_stage = TextureStage('left_texture_stage') #Mask self.left_mask = Texture("left_mask_texture") self.left_mask.setup2dTexture(self.tex.texture_size, self.tex.texture_size, Texture.T_unsigned_byte, Texture.F_luminance) self.left_mask.setRamImage(self.left_mask_array) self.left_mask_stage = TextureStage('left_mask_array') #Multiply the texture stages together self.left_mask_stage.setCombineRgb(TextureStage.CMModulate, TextureStage.CSTexture, TextureStage.COSrcColor, TextureStage.CSPrevious, TextureStage.COSrcColor) #TEXTURE STAGES FOR RIGHT CARD self.right_texture_stage = TextureStage('right_texture_stage') #Mask self.right_mask = Texture("right_mask_texture") self.right_mask.setup2dTexture(self.tex.texture_size, self.tex.texture_size, Texture.T_unsigned_byte, Texture.F_luminance) self.right_mask.setRamImage(self.right_mask_array) self.right_mask_stage = TextureStage('right_mask_stage') #Multiply the texture stages together self.right_mask_stage.setCombineRgb(TextureStage.CMModulate, TextureStage.CSTexture, TextureStage.COSrcColor, TextureStage.CSPrevious, TextureStage.COSrcColor) #CREATE CARDS/SCENEGRAPH cm = CardMaker('stimcard') cm.setFrameFullscreenQuad() #self.setBackgroundColor((0,0,0,1)) self.left_card = self.aspect2d.attachNewNode(cm.generate()) self.right_card = self.aspect2d.attachNewNode(cm.generate()) self.left_card.setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.M_add)) self.right_card.setAttrib(ColorBlendAttrib.make( ColorBlendAttrib.M_add)) #ADD TEXTURE STAGES TO CARDS self.left_card.setTexture(self.left_texture_stage, self.tex.texture) self.left_card.setTexture(self.left_mask_stage, self.left_mask) self.right_card.setTexture(self.right_texture_stage, self.tex.texture) self.right_card.setTexture(self.right_mask_stage, self.right_mask) self.setBackgroundColor( (0, 0, 0, 1)) # without this the cards will appear washed out #TRANSFORMS #Masks self.mask_transform = self.trs_transform() self.left_card.setTexTransform(self.left_mask_stage, self.mask_transform) self.right_card.setTexTransform(self.right_mask_stage, self.mask_transform) #Left texture self.left_card.setTexScale(self.left_texture_stage, 1 / self.scale) self.left_card.setTexRotate(self.left_texture_stage, self.left_texture_angle) #Right texture self.right_card.setTexScale(self.right_texture_stage, 1 / self.scale) self.right_card.setTexRotate(self.right_texture_stage, self.right_texture_angle) #Set dynamic transforms if self.left_velocity != 0 and self.right_velocity != 0: self.taskMgr.add(self.textures_update, "move_both") elif self.left_velocity != 0 and self.right_velocity == 0: self.taskMgr.add(self.left_texture_update, "move_left") elif self.left_velocity == 0 and self.right_velocity != 0: self.taskMgr.add(self.right_texture_update, "move_right") # Set frame rate ShowBaseGlobal.globalClock.setMode(ClockObject.MLimited) ShowBaseGlobal.globalClock.setFrameRate( self.fps) #can lock this at whatever #Set up profiling if desired if profile_on: PStatClient.connect() # this will only work if pstats is running ShowBaseGlobal.base.setFrameRateMeter(True) #Show frame rate # Following will show a small x at the center self.title = OnscreenText("x", style=1, fg=(1, 1, 1, 1), bg=(0, 0, 0, .8), pos=self.mask_position_card, scale=0.05)
def __init__(self, tex, stim_angles=(0, 0), initial_angle=0, initial_position=(0, 0), velocities=(0, 0), strip_width=4, fps=30, window_size=None, window_name='position control', profile_on=False, save_path=None): super().__init__() self.render.setAntialias(AntialiasAttrib.MMultisample) self.aspect2d.prepareScene( ShowBaseGlobal.base.win.getGsg()) # pre-loads world self.tex = tex if window_size == None: self.window_size = tex.texture_size else: self.window_size = window_size self.mask_position_card = initial_position self.strip_width = strip_width self.scale = np.sqrt( 8) #so it can handle arbitrary rotations and shifts self.strip_angle = initial_angle #this will change fairly frequently self.stim_angles = stim_angles self.left_texture_angle = self.stim_angles[ 0] + self.strip_angle #make this a property self.right_texture_angle = self.stim_angles[1] + self.strip_angle self.left_velocity = velocities[0] self.right_velocity = velocities[1] self.fps = fps self.window_name = window_name self.profile_on = profile_on print(save_path) self.save_path = save_path if self.save_path: initial_params = { 'angles': stim_angles, 'initial_angle': self.strip_angle, 'velocities': velocities, 'strip_width': self.strip_width, 'initial_position': initial_position } print(tex, initial_params) self.filestream = utils.save_initialize(self.save_path, [tex], [initial_params]) print(self.filestream) else: self.filestream = None #Set window title and size self.window_properties = WindowProperties() self.window_properties.setSize(self.window_size, self.window_size) self.window_properties.setTitle(self.window_name) ShowBaseGlobal.base.win.requestProperties( self.window_properties) #base is a panda3d global # Set frame rate ShowBaseGlobal.globalClock.setMode(ClockObject.MLimited) ShowBaseGlobal.globalClock.setFrameRate( self.fps) #can lock this at whatever #CREATE MASK ARRAYS self.left_mask_array = 255 * np.ones( (self.tex.texture_size, self.tex.texture_size), dtype=np.uint8) self.left_mask_array[:, self.tex.texture_size // 2 - self.strip_width // 2:] = 0 self.right_mask_array = 255 * np.ones( (self.tex.texture_size, self.tex.texture_size), dtype=np.uint8) self.right_mask_array[:, :self.tex.texture_size // 2 + self.strip_width // 2] = 0 #TEXTURE STAGES FOR LEFT CARD self.left_texture_stage = TextureStage('left_texture_stage') #Mask self.left_mask = Texture("left_mask_texture") self.left_mask.setup2dTexture(self.tex.texture_size, self.tex.texture_size, Texture.T_unsigned_byte, Texture.F_luminance) self.left_mask.setRamImage(self.left_mask_array) self.left_mask_stage = TextureStage('left_mask_array') #Multiply the texture stages together self.left_mask_stage.setCombineRgb(TextureStage.CMModulate, TextureStage.CSTexture, TextureStage.COSrcColor, TextureStage.CSPrevious, TextureStage.COSrcColor) #TEXTURE STAGES FOR RIGHT CARD self.right_texture_stage = TextureStage('right_texture_stage') #Mask self.right_mask = Texture("right_mask_texture") self.right_mask.setup2dTexture(self.tex.texture_size, self.tex.texture_size, Texture.T_unsigned_byte, Texture.F_luminance) self.right_mask.setRamImage(self.right_mask_array) self.right_mask_stage = TextureStage('right_mask_stage') #Multiply the texture stages together self.right_mask_stage.setCombineRgb(TextureStage.CMModulate, TextureStage.CSTexture, TextureStage.COSrcColor, TextureStage.CSPrevious, TextureStage.COSrcColor) #CREATE CARDS/SCENEGRAPH cm = CardMaker('stimcard') cm.setFrameFullscreenQuad() #self.setBackgroundColor((0,0,0,1)) self.left_card = self.aspect2d.attachNewNode(cm.generate()) self.right_card = self.aspect2d.attachNewNode(cm.generate()) self.left_card.setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.M_add)) self.right_card.setAttrib(ColorBlendAttrib.make( ColorBlendAttrib.M_add)) #ADD TEXTURE STAGES TO CARDS self.left_card.setTexture(self.left_texture_stage, self.tex.texture) self.left_card.setTexture(self.left_mask_stage, self.left_mask) self.right_card.setTexture(self.right_texture_stage, self.tex.texture) self.right_card.setTexture(self.right_mask_stage, self.right_mask) self.setBackgroundColor( (0, 0, 0, 1)) # without this the cards will appear washed out #self.left_card.setAntialias(AntialiasAttrib.MMultisample) #self.right_card.setAntialias(AntialiasAttrib.MMultisample) #TRANSFORMS # Masks self.mask_transform = self.trs_transform() self.left_card.setTexTransform(self.left_mask_stage, self.mask_transform) self.right_card.setTexTransform(self.right_mask_stage, self.mask_transform) # Textures # Left self.left_card.setTexScale(self.left_texture_stage, 1 / self.scale) self.left_card.setTexRotate(self.left_texture_stage, self.left_texture_angle) # Right self.right_card.setTexScale(self.right_texture_stage, 1 / self.scale) self.right_card.setTexRotate(self.right_texture_stage, self.right_texture_angle) #Set task manager(s) for textures if self.left_velocity != 0 and self.right_velocity != 0: self.taskMgr.add(self.textures_update, "move_both") elif self.left_velocity != 0 and self.right_velocity == 0: self.taskMgr.add(self.left_texture_update, "move_left") elif self.left_velocity == 0 and self.right_velocity != 0: self.taskMgr.add(self.right_texture_update, "move_right") # Event handler to process the messages self.accept("stim", self.process_stim, []) #Set up profiling if desired if profile_on: PStatClient.connect() # this will only work if pstats is running ShowBaseGlobal.base.setFrameRateMeter(True) #Show frame rate
def __init__(self): ShowBase.__init__(self) FSM.__init__(self, "FSM-Game") # # BASIC APPLICATION CONFIGURATIONS # self.disableMouse() self.setBackgroundColor(0, 0, 0) self.camLens.setFov(75) self.camLens.setNear(0.8) # check if the config file hasn't been created base.textWriteSpeed = 0.05 mute = ConfigVariableBool("audio-mute", False).getValue() if mute: self.disableAllAudio() else: self.enableAllAudio() particles = ConfigVariableBool("particles-enabled", True).getValue() if particles: self.enableParticles() base.textWriteSpeed = ConfigVariableDouble("text-write-speed",0.05).getValue() base.controlType = ConfigVariableString("control-type", "Gamepad").getValue() base.mouseSensitivity = ConfigVariableDouble("mouse-sensitivity",1.0).getValue() if not os.path.exists(prcFile): self.__writeConfig() # set window properties # clear all properties not previously set self.win.clearRejectedProperties() # setup new window properties props = WindowProperties() # Fullscreen props.setFullscreen(True) # window icon print props.hasIconFilename() props.setIconFilename(windowicon) # get the displays width and height w = self.pipe.getDisplayWidth() h = self.pipe.getDisplayHeight() # set the window size to the screen resolution props.setSize(w, h) # request the new properties self.win.requestProperties(props) atexit.register(self.__writeConfig) # enable collision handling base.cTrav = CollisionTraverser("base collision traverser") base.pusher = CollisionHandlerPusher() base.pusher.addInPattern('%fn-in-%in') base.pusher.addOutPattern('%fn-out-%in') self.menu = Menu() self.options = OptionsMenu() self.musicMenu = loader.loadMusic("MayanJingle6_Menu.ogg") self.musicMenu.setLoop(True) cm = CardMaker("menuFade") cm.setFrameFullscreenQuad() self.menuCoverFade = NodePath(cm.generate()) self.menuCoverFade.setTransparency(TransparencyAttrib.MAlpha) self.menuCoverFade.setBin("fixed", 1000) self.menuCoverFade.reparentTo(render2d) self.menuCoverFade.hide() self.menuCoverFadeOutInterval = Sequence( Func(self.menuCoverFade.show), LerpColorScaleInterval( self.menuCoverFade, 1, LVecBase4f(0.0,0.0,0.0,1.0), LVecBase4f(0.0,0.0,0.0,0.0)), Func(self.menuCoverFade.hide)) self.menuCoverFadeInInterval = Sequence( Func(self.menuCoverFade.show), LerpColorScaleInterval( self.menuCoverFade, 1, LVecBase4f(0.0,0.0,0.0,0.0), LVecBase4f(0.0,0.0,0.0,1.0)), Func(self.menuCoverFade.hide)) self.lerpAudioFadeOut = LerpFunc( self.audioFade, fromData=1.0, toData=0.0, duration=0.25, extraArgs=[self.musicMenu]) self.fadeMusicOut = Sequence( self.lerpAudioFadeOut, Func(self.musicMenu.stop)) self.lerpAudioFadeIn = LerpFunc( self.audioFade, fromData=0.0, toData=1.0, duration=1, extraArgs=[self.musicMenu]) self.fadeMusicIn = Sequence( Func(self.musicMenu.play), self.lerpAudioFadeIn) self.seqFade = None self.acceptAll() self.request("Intro")
def enterIntro(self): helper.hide_cursor() cm = CardMaker("fade") cm.setFrameFullscreenQuad() self.gfLogo = NodePath(cm.generate()) self.gfLogo.setTransparency(TransparencyAttrib.MAlpha) gfLogotex = loader.loadTexture('GrimFangLogo.png') gfLogots = TextureStage('gfLogoTS') gfLogots.setMode(TextureStage.MReplace) self.gfLogo.setTexture(gfLogots, gfLogotex) self.gfLogo.setY(-50) self.gfLogo.reparentTo(render2d) self.gfLogo.hide() self.pandaLogo = NodePath(cm.generate()) self.pandaLogo.setTransparency(TransparencyAttrib.MAlpha) pandaLogotex = loader.loadTexture('Panda3DLogo.png') pandaLogots = TextureStage('pandaLogoTS') pandaLogots.setMode(TextureStage.MReplace) self.pandaLogo.setTexture(pandaLogots, pandaLogotex) self.pandaLogo.setY(-50) self.pandaLogo.reparentTo(render2d) self.pandaLogo.hide() gfFadeInInterval = LerpColorScaleInterval( self.gfLogo, 2, LVecBase4f(0.0,0.0,0.0,1.0), LVecBase4f(0.0,0.0,0.0,0.0)) gfFadeOutInterval = LerpColorScaleInterval( self.gfLogo, 2, LVecBase4f(0.0,0.0,0.0,0.0), LVecBase4f(0.0,0.0,0.0,1.0)) p3dFadeInInterval = LerpColorScaleInterval( self.pandaLogo, 2, LVecBase4f(0.0,0.0,0.0,1.0), LVecBase4f(0.0,0.0,0.0,0.0)) p3dFadeOutInterval = LerpColorScaleInterval( self.pandaLogo, 2, LVecBase4f(0.0,0.0,0.0,0.0), LVecBase4f(0.0,0.0,0.0,1.0)) self.fadeInOut = Sequence( Func(self.pandaLogo.show), p3dFadeInInterval, Wait(1.0), p3dFadeOutInterval, Wait(0.5), Func(self.pandaLogo.hide), Func(self.gfLogo.show), gfFadeInInterval, Wait(1.0), gfFadeOutInterval, Wait(0.5), Func(self.gfLogo.hide), Func(self.request, "Menu"), Func(helper.show_cursor), name="fadeInOut") self.fadeInOut.start()