class FirstTry(visual): def setup(self): self.tex1 = MovieTexture('videos/saturn5_apollo_launch.mp4') assert self.tex1.read('videos/saturn5_apollo_launch.mp4') self.tex2 = MovieTexture('videos/boards_eye_view.mp4') assert self.tex2.read('videos/boards_eye_view.mp4') self.cm1 = CardMaker('saturn') self.cm1.setFrameFullscreenQuad() self.cm1.setUvRange(self.tex1) self.card1 = NodePath(self.cm1.generate()) self.card1.reparentTo(self.path) self.card1.setPos(0,0,10) self.card1.setP(50) self.cm2 = CardMaker('board') self.cm2.setFrameFullscreenQuad() self.cm2.setUvRange(self.tex2) self.card2 = NodePath(self.cm2.generate()) self.card2.reparentTo(self.path) self.card2.setPos(0,0,-10) self.card2.setP(-50) self.card1.setTexture(self.tex1) self.card1.setTexScale(TextureStage.getDefault(), self.tex1.getTexScale()) self.card2.setTexture(self.tex2) self.card2.setTexScale(TextureStage.getDefault(), self.tex2.getTexScale()) self.card1.setScale(10) self.card2.setScale(10) def getBeat(self): pass
def loadVideo(videoFileName, loop=False): videoPathStr = 'Video/{}' videoPathStr = videoPathStr.format(videoFileName) try: tex = MovieTexture(videoFileName) success = tex.read(videoPathStr) assert success, "Failed to load video!" # Set up a fullscreen card to set the video texture on. cm = CardMaker("My Fullscreen Card") cm.setFrameFullscreenQuad() # Tell the CardMaker to create texture coordinates that take into # account the padding region of the texture. cm.setUvRange(tex) # Now place the card in the scene graph and apply the texture to it. card = render2d.attachNewNode(cm.generate()) card.setTexture(tex) card.hide() sound = loader.loadMusic(videoPathStr) # set loop false sound.setLoop(loop) # Synchronize the video to the sound. tex.synchronizeTo(sound) return sound, card except Exception as e: #logging.debug("loadvideo: {}".format(traceback.format_exc())) pass return sound, card
class MouseTunnel(ShowBase): def __init__(self): # Initialize the ShowBase class from which we inherit, which will # create a window and set up everything we need for rendering into it. ShowBase.__init__(self) self.stimtype = 'image_sequence' #session_start self.session_start_time = datetime.datetime.now() # self.accept("escape", sys.exit, [0])#don't let the user do this, because then the data isn't saved. self.accept('q', self.close) self.accept('Q', self.close) self.AUTO_REWARD = AUTO_REWARD # disable mouse control so that we can place the camera base.disableMouse() camera.setPosHpr(0, 0, 10, 0, -90, 0) mat = Mat4(camera.getMat()) mat.invertInPlace() base.mouseInterfaceNode.setMat(mat) # base.enableMouse() props = WindowProperties() # props.setFullscreen(True) props.setOrigin(-924, 70) # props.setSize(1880,1040) props.setCursorHidden(True) props.setMouseMode(WindowProperties.M_relative) base.win.requestProperties(props) base.setBackgroundColor(0, 0, 0) # set the background color to black #set up the textures # we now get buffer thats going to hold the texture of our new scene altBuffer = self.win.makeTextureBuffer("hello", 1524, 1024) # altBuffer.getDisplayRegion(0).setDimensions(0.5,0.9,0.5,0.8) # altBuffer = base.win.makeDisplayRegion() # altBuffer.makeDisplayRegion(0,1,0,1) # now we have to setup a new scene graph to make this scene self.dr2 = base.win.makeDisplayRegion(0, 0.1, 0, 0.1) altRender = NodePath("new render") # this takes care of setting up ther camera properly self.altCam = self.makeCamera(altBuffer) self.dr2.setCamera(self.altCam) self.altCam.reparentTo(altRender) self.altCam.setPos(0, -10, 0) self.bufferViewer.setPosition("llcorner") # self.bufferViewer.position = (.1,.4,.1,.4) self.bufferViewer.setCardSize(1.0, 0.0) print(self.bufferViewer.position) self.imagesTexture = MovieTexture("image_sequence") # success = self.imagesTexture.read("models/natural_images.avi") success = self.imagesTexture.read("models/movie_5hz.mpg") self.imagesTexture.setPlayRate(1.0) self.imagesTexture.setLoopCount(10) # self.imageTexture =loader.loadTexture("models/NaturalImages/BSDs_8143.tiff") # self.imagesTexture.reparentTo(altRender) cm = CardMaker("stimwindow") cm.setFrame(-4, 4, -3, 3) # cm.setUvRange(self.imagesTexture) self.card = NodePath(cm.generate()) self.card.reparentTo(altRender) if self.stimtype == 'image_sequence': self.card.setTexture(self.imagesTexture, 1) # self.imagesTexture.play() # self.bufferViewer.setPosition("lrcorner") # self.bufferViewer.setCardSize(1.0, 0.0) self.accept("v", self.bufferViewer.toggleEnable) self.accept("V", self.bufferViewer.toggleEnable) # Load the tunnel self.initTunnel() #initialize some things # for the tunnel construction: self.boundary_to_add_next_segment = -1 * TUNNEL_SEGMENT_LENGTH self.current_number_of_segments = 8 #task flow booleans self.in_waiting_period = False self.stim_started = False self.looking_for_a_cue_zone = True self.in_reward_window = False self.show_stimulus = False #for task control self.interval = 0 self.time_waiting_in_cue_zone = 0 self.wait_time = 1.83 self.stim_duration = 4.0 # in seconds self.max_stim_duration = 6.0 # in seconds self.stim_elapsed = 0.0 # in seconds self.last_position = base.camera.getZ() self.position_on_track = base.camera.getZ() #for reward control self.reward_window = REWARD_WINDOW # in seconds self.reward_elapsed = 0.0 # self.reward_volume = 0.008 # in mL. this is for the hardcoded 0.1 seconds of reward time self.reward_volume = int(REWARD_VOLUME) # in uL, for the stepper motor self.reward_time = 0.1 # in sec, based on volume. hard coded right now but should be modified by the (1) calibration and (2) optionally by the main loop for dynamic reward scheduling # self.lick_buffer = [] #INITIALIZE NIDAQ self.nidevice = 'Dev2' self.encodervinchannel = 1 self.encodervsigchannel = 0 self.invertdo = False self.diport = 1 self.lickline = 0 self.doport = 0 self.rewardline = 0 self.rewardlines = [0] self._setupDAQ() self.do.WriteBit(1, 1) self.do.WriteBit( 3, 1 ) #set reward high, because the logic is flipped somehow. possibly by haphazard wiring of the circuit (12/24/2018 djd) self.previous_encoder_position = self.ai.data[0][ self.encodervsigchannel] self.encoder_gain = 30 #INITIALIZE LICK SENSOR self._lickSensorSetup() #INITIALIZE output data self.lickData = [] self.x = [] self.t = [] self.trialData = [] self.rewardData = [] #INITIALIZE KEY SENSOR, for backup inputs and other user controls self.keys = key.KeyStateHandler() self.accept('r', self._give_reward, [self.reward_volume]) self.accept('l', self._toggle_reward) img_list = glob.glob('models/NaturalImages/*.tiff')[:10] print(img_list) self.imageTextures = [loader.loadTexture(img) for img in img_list] self._setupEyetracking() self._startEyetracking() if AUTO_MODE: self.gameTask = taskMgr.add(self.autoLoop2, "autoLoop2") self.rewardTask = taskMgr.add(self.rewardControl, "reward") self.cue_zone = concatenate((self.cue_zone,arange(\ self.current_number_of_segments*-TUNNEL_SEGMENT_LENGTH,\ self.current_number_of_segments*-TUNNEL_SEGMENT_LENGTH-TUNNEL_SEGMENT_LENGTH-80,\ -1))) self.auto_position_on_track = 0 self.auto_restart = False self.auto_running = True self.contTunnel() else: # Now we create the task. taskMgr is the task manager that actually # calls the function each frame. The add method creates a new task. # The first argument is the function to be called, and the second # argument is the name for the task. It returns a task object which # is passed to the function each frame. self.gameTask = taskMgr.add(self.gameLoop, "gameLoop") # self.stimulusTask = taskMgr.add(self.stimulusControl, "stimulus") self.lickTask = taskMgr.add(self.lickControl, "lick") self.rewardTask = taskMgr.add(self.rewardControl, "reward") # Code to initialize the tunnel def initTunnel(self): self.tunnel = [None] * 8 for x in range(8): # Load a copy of the tunnel self.tunnel[x] = loader.loadModel('models/tunnel') # The front segment needs to be attached to render if x == 0: self.tunnel[x].reparentTo(render) # The rest of the segments parent to the previous one, so that by moving # the front segement, the entire tunnel is moved else: self.tunnel[x].reparentTo(self.tunnel[x - 1]) # We have to offset each segment by its length so that they stack onto # each other. Otherwise, they would all occupy the same space. self.tunnel[x].setPos(0, 0, -TUNNEL_SEGMENT_LENGTH) # Now we have a tunnel consisting of 4 repeating segments with a # hierarchy like this: # render<-tunnel[0]<-tunnel[1]<-tunnel[2]<-tunnel[3] self.tunnel[0] = loader.loadModel('models/grating') self.tunnel[0].reparentTo(render) self.cue_zone = arange(0, TUNNEL_SEGMENT_LENGTH, -1) # This function is called to snap the front of the tunnel to the back # to simulate traveling through it def contTunnel(self): self.auto_position_on_track -= 50 position_on_track = self.auto_position_on_track print(str(int(position_on_track)) + ' ' + str(self.cue_zone)) if int(position_on_track) in np.array( self.cue_zone): #check for cue zone if not self.auto_restart: print('STOP!') self.tunnelMove.pause() self.auto_presentation = True # self.current_number_of_segments +=1 else: self.auto_restart = True self.tunnelMove.resume() else: self.in_waiting_period = False self.auto_presentation = False # base.setBackgroundColor([1,0 , 0]) if self.looking_for_a_cue_zone == False: self.looking_for_a_cue_zone = True if self.stim_started == True: self.stop_a_presentation() # This line uses slices to take the front of the list and put it on the # back. For more information on slices check the Python manual self.tunnel = self.tunnel[1:] + self.tunnel[0:1] # Set the front segment (which was at TUNNEL_SEGMENT_LENGTH) to 0, which # is where the previous segment started self.tunnel[0].setZ(0) # Reparent the front to render to preserve the hierarchy outlined above self.tunnel[0].reparentTo(render) # Set the scale to be apropriate (since attributes like scale are # inherited, the rest of the segments have a scale of 1) self.tunnel[0].setScale(.155, .155, .305) # Set the new back to the values that the rest of the segments have self.tunnel[3].reparentTo(self.tunnel[2]) self.tunnel[3].setZ(-TUNNEL_SEGMENT_LENGTH) self.tunnel[3].setScale(1) # Set up the tunnel to move one segment and then call contTunnel again # to make the tunnel move infinitely self.tunnelMove = Sequence( LerpFunc(self.tunnel[0].setZ, duration=TUNNEL_TIME, fromData=0, toData=TUNNEL_SEGMENT_LENGTH * .305), Func(self.contTunnel)) self.tunnelMove.start() def start_a_presentation(self): print("start") self.do.WriteBit(2, 1) # self.bufferViewer.toggleEnable() self.lick_buffer = [] if self.stimtype == 'random image': for i in range(randint(len(self.imageTextures))): self.card.setTexture(self.imageTextures[i], 1) if self.stimtype == 'image_sequence': self.imagesTexture.setTime(0.) self.dr2.setDimensions(0.4, 0.8, 0.4, 0.70) #floats (left, right, bottom, top) self.imagesTexture.play() def stop_a_presentation(self): if self.stim_started == True: self.dr2.setDimensions(0, 0.1, 0, 0.1) # self.bufferViewer.toggleEnable() self.stim_started = False self.stim_elapsed = 0. self.stim_duration = 0. while self.stim_duration < 1. or self.stim_duration > self.max_stim_duration * 2.: # set some limits on the random duration so is not too short or too long self.stim_duration = exponential(self.max_stim_duration) self.stim_off_time = globalClock.getFrameTime() self.do.WriteBit(2, 0) def _lickSensorSetup(self): """ Attempts to set up lick sensor NI task. """ ##TODO: Make lick sensor object if necessary. Let user select port and line. if self.di: self.lickSensor = self.di # just use DI for now licktest = [] for i in range(30): licktest.append(self.di.Read()[self.lickline]) time.sleep(0.01) licktest = np.array(licktest, dtype=np.uint8) if len(licktest[np.where(licktest > 0)]) > 25: self.lickSensor = None self.lickData = [np.zeros(len(self.rewardlines))] print("Lick sensor failed startup test.") else: print('lick sensor setup succeeded.') self.keycontrol = True else: print( "Could not initialize lick sensor. Ensure that NIDAQ is connected properly." ) self.keycontrol = True self.lickSensor = None self.lickData = [np.zeros(len(self.rewardlines))] self.keys = key.KeyStateHandler() # self.window.winHandle.push_handlers(self.keys) # def _read_licks(self): # not yet implemented; should be replaces with check to beam break def _give_reward(self, volume): print("reward!") self.rewardData.extend([globalClock.getFrameTime()]) self.do.WriteBit(3, 0) time.sleep(self.reward_time) self.do.WriteBit( 3, 1) # put a TTL on a line to indicate that a reward was given s.dispense(volume) #pass # not yet implemented def _toggle_reward(self): if self.AUTO_REWARD: self.AUTO_REWARD = False print('switched to lick sensing for reward.') else: self.AUTO_REWARD = True print('switched to automatic rewards after stimuli.') def autoLoop2(self, task): dt = globalClock.getDt() current_time = globalClock.getFrameTime() self.x.extend([self.auto_position_on_track]) self.t.extend([globalClock.getFrameTime()]) if self.auto_presentation: self.auto_running = False if self.in_waiting_period: self.time_waited += dt else: self.time_waited = 0 self.in_waiting_period = True if self.time_waited > self.wait_time: #if in cue zone,see if we have been ther for long enough #start a trial self.start_position = self.auto_position_on_track self.start_time = current_time if not self.stim_started: self.start_a_presentation() # print(self.stim_duration) self.stim_started = True self.show_stimulus = True else: self.stim_elapsed += dt if self.stim_elapsed > self.stim_duration: self.show_stimulus = False self.in_reward_window = True self.stop_a_presentation() self.auto_restart = False # print(self.current_number_of_segments) self.current_number_of_segments += 9 #redefine the cue zone as the next one self.cue_zone = arange( self.current_number_of_segments * -TUNNEL_SEGMENT_LENGTH, self.current_number_of_segments * -TUNNEL_SEGMENT_LENGTH - TUNNEL_SEGMENT_LENGTH - 80, -1) #extend cue zone, keeping old ones # self.cue_zone = concatenate((self.cue_zone,arange(self.current_number_of_segments*-TUNNEL_SEGMENT_LENGTH-40, # self.current_number_of_segments*-TUNNEL_SEGMENT_LENGTH-TUNNEL_SEGMENT_LENGTH-40, # -1))) self.contTunnel() self.time_waited = 0 self.looking_for_a_cue_zone = False # base.setBackgroundColor([0, 0, 1]) else: pass # base.setBackgroundColor([0, 1, 0]) else: self.auto_running = True return Task.cont # Since every return is Task.cont, the task will def gameLoop(self, task): # get the time elapsed since the next frame. dt = globalClock.getDt() current_time = globalClock.getFrameTime() # get the camera position. position_on_track = base.camera.getZ() #get the encoder position from NIDAQ Analog Inputs channel 2 encoder_position = self.ai.data[0][ self. encodervsigchannel] #zeroth sample in buffer [0], from ai2 [2] #convert to track coordinates encoder_position_diff = (encoder_position - self.previous_encoder_position) if abs( encoder_position_diff ) > 0.025: #manually set threshold to remove encoder noise jittering the positionq if encoder_position_diff > 4.5: encoder_position_diff -= 5. if encoder_position_diff < -4.5: encoder_position_diff += 5. encoder_position_diff *= self.encoder_gain self.previous_encoder_position = encoder_position position_on_track = base.camera.getZ() + encoder_position_diff #reset the camera position self.camera.setPos(base.camera.getX(), base.camera.getY(), position_on_track) self.x.extend([position_on_track]) self.t.extend([globalClock.getFrameTime()]) #first check if the mouse moved on the last frame. if abs( self.last_position - position_on_track ) < 1.5: #the mouse didn't move more than 0.5 units on the track self.moved = False if int(position_on_track) in self.cue_zone: #check for cue zone if self.looking_for_a_cue_zone: #make sure we transitioning from the tunnel to a cue zone #increment how long we've been waiting in the cue zone. if self.in_waiting_period: self.time_waited += dt else: self.time_waited = 0 self.in_waiting_period = True if self.time_waited > self.wait_time: #if in cue zone,see if we have been ther for long enough #start a trial self.start_position = position_on_track self.start_time = current_time if not self.stim_started: self.start_a_presentation() print(self.stim_duration) self.stim_started = True self.show_stimulus = True else: self.stim_elapsed += dt if self.stim_elapsed > self.stim_duration: self.show_stimulus = False self.in_reward_window = True self.stop_a_presentation() self.time_waited = 0 self.looking_for_a_cue_zone = False # base.setBackgroundColor([0, 0, 1]) else: pass # base.setBackgroundColor([0, 1, 0]) else: self.in_waiting_period = False # base.setBackgroundColor([1,0 , 0]) if self.looking_for_a_cue_zone == False: self.looking_for_a_cue_zone = True if self.stim_started == True: self.stop_a_presentation() else: #the mouse did move self.moved = True if self.stim_started == True: #check if it moved during a presenation self.stop_a_presentation() self.time_waited = 0 self.looking_for_a_cue_zone = False self.show_stimulus = False #if we need to add another segment, do so if position_on_track < self.boundary_to_add_next_segment: self.tunnel.extend([None]) x = self.current_number_of_segments if x % 8 == 0: self.tunnel[x] = loader.loadModel('models/grating') self.cue_zone = concatenate((self.cue_zone,arange(\ self.current_number_of_segments*-TUNNEL_SEGMENT_LENGTH,\ self.current_number_of_segments*-TUNNEL_SEGMENT_LENGTH-TUNNEL_SEGMENT_LENGTH-80,\ -1))) else: self.tunnel[x] = loader.loadModel('models/tunnel') self.tunnel[x].setPos(0, 0, -TUNNEL_SEGMENT_LENGTH) self.tunnel[x].reparentTo(self.tunnel[x - 1]) #increment self.boundary_to_add_next_segment -= TUNNEL_SEGMENT_LENGTH self.current_number_of_segments += 1 else: pass #print('current:'+str(position_on_track) +' next boundary:' + str(self.boundary_to_add_next_segment)) self.last_position = position_on_track # lick_times = self. # self._read_licks() return Task.cont # Since every return is Task.cont, the task will # continue indefinitely, under control of the mouse (animal) def stimulusControl(self, task): if self.show_stimulus and not self.bufferViewer.isEnabled(): # self.bufferViewer.toggleEnable() self.dr2.setDimensions(0.5, 0.9, 0.5, 0.8) if not self.show_stimulus and self.bufferViewer.isEnabled(): # self.bufferViewer.toggleEnable() self.dr2.setDimensions(0, 0.1, 0, 0.1) return Task.cont def lickControl(self, task): """ Checks to see if a lick is occurring. """ ##TODO: Let user select line for lick sensing. if self.lickSensor: if self.lickSensor.Read()[self.lickline]: self.lickData.extend([globalClock.getFrameTime()]) print('lick happened at: ' + str(self.lickData[-1])) elif self.keycontrol == True: #NO NI BOARD. KEY INPUT? if self.keys[key.SPACE]: data = [globalClock.getFrameTime()] elif self.keys[key.NUM_1]: # print(self.lickData) # elif self.keys[key.NUM_3]: # data = [0,1] # else: # data = [0,0] self.lickData.extend(data) return Task.cont def rewardControl(self, task): if self.in_reward_window: self.reward_elapsed += globalClock.getDt() if not self.AUTO_REWARD: if self.reward_elapsed < self.reward_window: if len( np.where( np.array(self.lickData) > self.stim_off_time) [0] ) > 1: # this checks if there has been more than zero licks since the stimulus turned off self._give_reward(self.reward_volume) self.in_reward_window = False self.reward_elapsed = 0. #reset else: self.in_reward_window = False self.reward_elapsed = 0. #reset else: self._give_reward(self.reward_volume) self.in_reward_window = False self.reward_elapsed = 0. #reset # self.reward_elapsed=0. # base.setBackgroundColor([1, 1, 0]) # if self.keys[key.NUM_1]: # print('reward!') if self.reward_elapsed > self.reward_window: self.in_reward_window = False self.reward_elapsed = 0. return Task.cont def _setupEyetracking(self): """ sets up eye tracking""" try: eyetrackerip = "DESKTOP-EE5KKDO" eyetrackerport = 10000 trackeyepos = False from aibs.Eyetracking.EyetrackerClient import Client self.eyetracker = Client( outgoing_ip=eyetrackerip, outgoing_port=eyetrackerport, output_filename=str(datetime.datetime.now()).replace( ':', '').replace('.', '').replace(' ', '-')) self.eyetracker.setup() # eyedatalog = [] # if trackeyepos: # eyeinitpos = None except: print("Could not initialize eyetracker:") self.eyetracker = None def _startEyetracking(self): if self.eyetracker: self.eyetracker.recordStart() def _setupDAQ(self): """ Sets up some digital IO for sync and tiggering. """ print('SETTING UP DAQ') try: if self.invertdo: istate = 'low' else: istate = 'high' self.do = DigitalOutput(self.nidevice, self.doport, initial_state='low') self.do.StartTask() except: # Exception, e: print("Error starting DigitalOutput task:") self.do = None try: self.di = DigitalInput(self.nidevice, self.diport) self.di.StartTask() except: # Exception, e: print("Error starting DigitalInput task:") self.di = None # try: #set up 8 channels, only use 2 though for now self.ai = AnalogInput(self.nidevice, range(8), buffer_size=25, terminal_config='RSE', clock_speed=6000.0, voltage_range=[-5.0, 5.0]) self.ai.StartTask() print(type(self.ai)) # except:# Exception, e: # print("Error starting AnalogInput task:") # self.ai = None # try: # self.ao = AnalogOutput(self.nidevice, channels=[0, 1],terminal_config = 'RSE', # voltage_range=[0.0, 5.0]) # self.ao.StartTask() # except:# Exception, e: # print("Error starting AnalogOutput task:") # self.ao = None def close(self): if self.eyetracker: self.eyetracker.recordStop() print('stop eyetracking') save_path = os.path.join(os.getcwd(),'data',str(MOUSE_ID)+'_'+\ str(self.session_start_time.year)+'_'+\ str(self.session_start_time.month)+'_'+\ str(self.session_start_time.day)+'-'+\ str(self.session_start_time.hour)+'_'+\ str(self.session_start_time.minute)+'_'+\ str(self.session_start_time.second)) if ~os.path.isdir(save_path): os.mkdir(save_path) print("saving data to " + save_path) np.save(os.path.join(save_path, 'licks.npy'), self.lickData) np.save(os.path.join(save_path, 'x.npy'), self.x) np.save(os.path.join(save_path, 't.npy'), self.t) np.save(os.path.join(save_path, 'trialData.npy'), self.trialData) np.save(os.path.join(save_path, 'rewardData.npy'), self.rewardData) print('rewardData:') print(np.shape(self.rewardData)) sys.exit(0)
class DistributedCinemaInterior(DistributedToonInterior.DistributedToonInterior ): notify = directNotify.newCategory("DistributedCinemaInterior") def __init__(self, cr): DistributedToonInterior.DistributedToonInterior.__init__(self, cr) self.fsm = ClassicFSM.ClassicFSM('DCinemaInterior', [ State.State('off', self.enterOff, self.exitOff), State.State('show', self.enterShow, self.exitShow), State.State('intermission', self.enterIntermission, self.exitIntermission) ], 'off', 'off') self.fsm.enterInitialState() self.state = None self.cinemaIndex = None self.movieTex = None self.movieCard = None self.movieSound = None self.movieTrack = None self.intermissionText = None def makeInterior(self): # Always use the same room for cinemas. DistributedToonInterior.DistributedToonInterior.makeInterior( self, roomIndex=0) def announceGenerate(self): DistributedToonInterior.DistributedToonInterior.announceGenerate(self) self.sendUpdate('requestStateAndTimestamp') def disable(self): self.fsm.requestFinalState() self.fsm = None self.state = None self.cinemaIndex = None self.movieTex = None self.movieCard = None self.movieSound = None self.movieTrack = None self.intermissionText = None self.cr.playGame.hood.loader.interiorMusic.stop() DistributedToonInterior.DistributedToonInterior.disable(self) def darkenInterior(self): darkenIval = self.interior.colorScaleInterval( 3.0, colorScale=(0.3, 0.3, 0.3, 1.0), startColorScale=(1.0, 1.0, 1.0, 1.0), blendType='easeInOut') darkenIval.start() def lightenInterior(self): lightenIval = self.interior.colorScaleInterval( 3.0, colorScale=(1, 1, 1, 1.0), startColorScale=(0.3, 0.3, 0.3, 1.0), blendType='easeInOut') lightenIval.start() def enterShow(self, ts=0): self.darkenInterior() self.cr.playGame.hood.loader.interiorMusic.stop() videoFile = CinemaGlobals.Cinemas[self.cinemaIndex][0] audioFile = CinemaGlobals.Cinemas[self.cinemaIndex][1] self.movieTex = MovieTexture(self.uniqueName("movieTex")) self.movieTex.read(videoFile) card = CardMaker(self.uniqueName('movieCard')) card.setFrame(-1.5, 1.5, -1, 1) self.movieCard = NodePath(card.generate()) self.movieCard.reparentTo(render) self.movieCard.setPos( self.interior.find('**/sign_origin;+s').getPos(render)) #self.movieCard.setX(self.movieCard, -0.05) self.movieCard.setHpr( self.interior.find('**/sign_origin;+s').getHpr(render)) self.movieCard.setDepthWrite(1, 1) self.movieCard.setTwoSided(True) self.movieCard.setTexture(self.movieTex) self.movieCard.setTexScale(TextureStage.getDefault(), self.movieTex.getTexScale()) self.movieCard.setScale(2.5) self.movieSound = base.loadSfx(audioFile) self.movieTex.synchronizeTo(self.movieSound) self.movieTrack = SoundInterval(self.movieSound, name=self.uniqueName('movieTrack')) self.movieTrack.setDoneEvent(self.movieTrack.getName()) self.acceptOnce(self.movieTrack.getDoneEvent(), self.fsm.request, ['off']) self.movieTrack.start(ts) def exitShow(self): self.ignore(self.movieTrack.getDoneEvent()) self.movieTrack.finish() self.movieTrack = None self.movieSound = None self.movieCard.removeNode() self.movieCard = None self.movieTex = None self.lightenInterior() self.cr.playGame.hood.loader.interiorMusic.play() def enterIntermission(self, ts=0): self.intermissionText = DirectLabel( relief=None, text_decal=True, text="", scale=0.7, parent=self.interior.find('**/sign_origin;+s'), text_font=CIGlobals.getMickeyFont(), text_fg=(1, 0.9, 0, 1)) self.movieTrack = Sequence(name=self.uniqueName('intermissionTrack')) for second in range(CinemaGlobals.IntermissionLength + 1): timeRemaining = CinemaGlobals.IntermissionLength - second self.movieTrack.append( Func(self.setIntermissionText, "Next show in:\n%d" % timeRemaining)) self.movieTrack.append(Wait(1.0)) self.movieTrack.setDoneEvent(self.movieTrack.getName()) self.acceptOnce(self.movieTrack.getDoneEvent(), self.fsm.request, ['off']) self.movieTrack.start(ts) def setIntermissionText(self, text): self.intermissionText['text'] = text def exitIntermission(self): self.ignore(self.movieTrack.getDoneEvent()) self.movieTrack.finish() self.movieTrack = None self.intermissionText.destroy() self.intermissionText = None def enterOff(self): pass def exitOff(self): pass def setCinemaIndex(self, index): self.cinemaIndex = index def getCinemaIndex(self): return self.cinemaIndex def setState(self, state, timestamp): ts = globalClockDelta.localElapsedTime(timestamp) self.state = state self.fsm.request(state, [ts]) def getState(self): return self.state
class Tutorial: def __init__(self, pat): self.t = Transitions(loader) self.pickAToon = pat def askTutorial(self): self.firstTimeMsg = YesNoDialog(text=CIGlobals.FirstTimeMsg, text_scale=0.07, text_wordwrap=18, buttonGeomList=[ CIGlobals.getOkayBtnGeom(), CIGlobals.getCancelBtnGeom() ], button_relief=None, button_text_pos=(0, -0.1), command=self.handleTutorialDecision, image_color=CIGlobals.DialogColor, fadeScreen=1) def handleTutorialDecision(self, value): if value: self.firstTimeMsg.destroy() self.startTutorial() base.hoodBGM.stop() else: self.firstTimeMsg.destroy() self.enablePatButtons() def enablePatButtons(self): for btn in self.pickAToon.btnList: btn['state'] = DGG.NORMAL self.pickAToon.quit_btn['state'] = DGG.NORMAL def startTutorial(self): self.t.fadeOut(1) Sequence(Wait(1.2), Func(self.playVideo)).start() def playVideo(self): self.t.fadeIn(0) self.pickAToon.removeGui() self.movieTex = MovieTexture("tutorial") assert self.movieTex.read( "tutorial.avi"), "Failed to load tutorial video" cm = CardMaker("tutorialCard") cm.setFrameFullscreenQuad() self.card = NodePath(cm.generate()) self.card.reparentTo(render2d) self.card.setTexture(self.movieTex) self.card.setTexScale(TextureStage.getDefault(), self.movieTex.getTexScale()) self.movieSound = loader.loadSfx("tutorial.avi") self.movieTex.synchronizeTo(self.movieSound) self.movieSound.play() taskMgr.add(self.checkMovieStatus, "checkMovieStatus") def checkMovieStatus(self, task): if self.movieSound.status() == AudioSound.READY: self.stopVideo() return task.done return task.cont def stopVideo(self): self.movieSound.stop() self.card.removeNode() self.t.fadeOut(0) self.pickAToon.createGui(1) Sequence(Wait(0.2), Func(self.t.fadeIn, 1)).start()
class DistributedCinemaInterior(DistributedToonInterior.DistributedToonInterior): notify = directNotify.newCategory('DistributedCinemaInterior') def __init__(self, cr): DistributedToonInterior.DistributedToonInterior.__init__(self, cr) self.fsm = ClassicFSM.ClassicFSM('DCinemaInterior', [State.State('off', self.enterOff, self.exitOff), State.State('show', self.enterShow, self.exitShow), State.State('intermission', self.enterIntermission, self.exitIntermission)], 'off', 'off') self.fsm.enterInitialState() self.state = None self.cinemaIndex = None self.movieTex = None self.movieCard = None self.movieSound = None self.movieTrack = None self.intermissionText = None return def makeInterior(self): DistributedToonInterior.DistributedToonInterior.makeInterior(self, roomIndex=0) def announceGenerate(self): DistributedToonInterior.DistributedToonInterior.announceGenerate(self) self.sendUpdate('requestStateAndTimestamp') def disable(self): self.fsm.requestFinalState() self.fsm = None self.state = None self.cinemaIndex = None self.movieTex = None self.movieCard = None self.movieSound = None self.movieTrack = None self.intermissionText = None self.cr.playGame.hood.loader.interiorMusic.stop() DistributedToonInterior.DistributedToonInterior.disable(self) return def darkenInterior(self): darkenIval = self.interior.colorScaleInterval(3.0, colorScale=(0.3, 0.3, 0.3, 1.0), startColorScale=(1.0, 1.0, 1.0, 1.0), blendType='easeInOut') darkenIval.start() def lightenInterior(self): lightenIval = self.interior.colorScaleInterval(3.0, colorScale=(1, 1, 1, 1.0), startColorScale=(0.3, 0.3, 0.3, 1.0), blendType='easeInOut') lightenIval.start() def enterShow(self, ts = 0): self.darkenInterior() self.cr.playGame.hood.loader.interiorMusic.stop() videoFile = CinemaGlobals.Cinemas[self.cinemaIndex][0] audioFile = CinemaGlobals.Cinemas[self.cinemaIndex][1] self.movieTex = MovieTexture(self.uniqueName('movieTex')) self.movieTex.read(videoFile) card = CardMaker(self.uniqueName('movieCard')) card.setFrame(-1.5, 1.5, -1, 1) self.movieCard = NodePath(card.generate()) self.movieCard.reparentTo(render) self.movieCard.setPos(self.interior.find('**/sign_origin;+s').getPos(render)) self.movieCard.setHpr(self.interior.find('**/sign_origin;+s').getHpr(render)) self.movieCard.setDepthWrite(1, 1) self.movieCard.setTwoSided(True) self.movieCard.setTexture(self.movieTex) self.movieCard.setTexScale(TextureStage.getDefault(), self.movieTex.getTexScale()) self.movieCard.setScale(2.5) self.movieSound = base.loadSfx(audioFile) self.movieTex.synchronizeTo(self.movieSound) self.movieTrack = SoundInterval(self.movieSound, name=self.uniqueName('movieTrack')) self.movieTrack.setDoneEvent(self.movieTrack.getName()) self.acceptOnce(self.movieTrack.getDoneEvent(), self.fsm.request, ['off']) self.movieTrack.start(ts) def exitShow(self): self.ignore(self.movieTrack.getDoneEvent()) self.movieTrack.finish() self.movieTrack = None self.movieSound = None self.movieCard.removeNode() self.movieCard = None self.movieTex = None self.lightenInterior() self.cr.playGame.hood.loader.interiorMusic.play() return def enterIntermission(self, ts = 0): self.intermissionText = DirectLabel(relief=None, text_decal=True, text='', scale=0.7, parent=self.interior.find('**/sign_origin;+s'), text_font=CIGlobals.getMickeyFont(), text_fg=(1, 0.9, 0, 1)) self.movieTrack = Sequence(name=self.uniqueName('intermissionTrack')) for second in range(CinemaGlobals.IntermissionLength + 1): timeRemaining = CinemaGlobals.IntermissionLength - second self.movieTrack.append(Func(self.setIntermissionText, 'Next show in:\n%d' % timeRemaining)) self.movieTrack.append(Wait(1.0)) self.movieTrack.setDoneEvent(self.movieTrack.getName()) self.acceptOnce(self.movieTrack.getDoneEvent(), self.fsm.request, ['off']) self.movieTrack.start(ts) return def setIntermissionText(self, text): self.intermissionText['text'] = text def exitIntermission(self): self.ignore(self.movieTrack.getDoneEvent()) self.movieTrack.finish() self.movieTrack = None self.intermissionText.destroy() self.intermissionText = None return def enterOff(self): pass def exitOff(self): pass def setCinemaIndex(self, index): self.cinemaIndex = index def getCinemaIndex(self): return self.cinemaIndex def setState(self, state, timestamp): ts = globalClockDelta.localElapsedTime(timestamp) self.state = state self.fsm.request(state, [ts]) def getState(self): return self.state