def onMouseDown(self, event): mp = event.GetPosition() if (self.gr[0] <= mp[0] <= self.gr[0]+self.gr[2]) and \ (self.gr[1] <= mp[1] <= self.gr[1]+self.gr[3]): # group is touched ### set a new destination for scattering motion steps = self.refresh_rate / 4 call_session_mngr_time = 1000 / 4 - 10 # make small pieces to scatter number_of_fo_ = int(len(self.fo)) for i in range(len(self.fo)): for j in range(10): x = self.fo[i]['track'][0][0] y = self.fo[i]['track'][0][1] self.fo.append( dict( track=[(randint(x - 10, x + 10), randint(y - 10, y + 10)) ], # list of coordinates to move along rad=randint(1, self.fo_min_rad / 2), # radius rad_change= 0, # change of radius on each drawing (-1,0 or 1) col='#CCCCCC')) # fill color for i in range(number_of_fo_): self.fo.pop(0) self.calc_group_rect() self.flag_rad_change = False # set track points for i in range(len(self.fo)): orig_ = (self.fo[i]['track'][0][0], self.fo[i]['track'][0][1]) dest_ = [-1, -1] if orig_[0] < self.gctr[0]: dest_[0] = randint(orig_[0] - 500, orig_[0] - 1) elif orig_[0] > self.gctr[0]: dest_[0] = randint(orig_[0] + 1, orig_[0] + 500) else: dest_[0] = orig_[0] if orig_[1] < self.gctr[1]: dest_[1] = randint(orig_[1] - 500, orig_[1] - 1) elif orig_[1] > self.gctr[1]: dest_[1] = randint(orig_[1] + 1, orig_[1] + 500) else: dest_[1] = orig_[1] tl = [] xstep = abs(dest_[0] - orig_[0]) / steps ystep = abs(dest_[1] - orig_[1]) / steps for j in range(steps): if orig_[0] < dest_[0]: x = orig_[0] + xstep * j else: x = orig_[0] - xstep * j if orig_[1] < dest_[1]: y = orig_[1] + ystep * j else: y = orig_[1] - ystep * j tl.append((x, y)) self.fo[i]['track'] = tl writeFile( self.parent.log_file_path, '%s, [videoOut], The target is touched.\n' % (get_time_stamp())) wx.CallLater(call_session_mngr_time, self.parent.mods["session_mngr"].msg_q.put, 'videoOut/stim_touched', True, None)
def onStartStopSession(self, event): '''Start/Stop a training or experimental session ''' if DEBUG: print('CATOSFrame.onStartStopSession()') if self.session_start_time == -1: # not in session. start a session if flagMods["videoIn"] and self.mods["videoIn"] != []: self.stop_mods(mod="videoIn") if flagMods["videoIn"]: self.start_mods(mod='videoIn') # start webcam # (watching over 3 screens surrounded area) self.start_mods(mod='all') self.session_start_time = time() e_time = time() - self.session_start_time log = "%s, [CATOS]," % (get_time_stamp()) log += " %.3f, Beginning of session.\n" % (e_time) writeFile(self.log_file_path, log) self.btn_cam.Disable() self.btn_session.SetLabel('End session') else: # in session. stop it. if flagMods["videoIn"]: self.stop_mods(mod='videoIn') # stop webcam self.stop_mods(mod='all') e_time = time() - self.session_start_time log = "%s, [CATOS]," % (get_time_stamp()) log += " %.3f, End of session.\n" % (e_time) writeFile(self.log_file_path, log) self.session_start_time = -1 self.last_play_time = -1 # time when the last stimulus was play self.sTxt_time.SetLabel('0:00:00') self.sTxt_s_time.SetLabel('0:00:00') self.btn_cam.Enable() self.btn_session.SetLabel('Start session')
def play(self, snd_idx, loop=True): self.snd_src = SoundSource(gain=0.25, position=[0,0,0]) self.snd_src.looping = loop self.snd_src.queue( self.snd_data[snd_idx] ) self.sink.play(self.snd_src) self.sink.update() writeFile(self.parent.log_file_path, '%s, [audioOut], sound (%s) play starts.\n'%(get_time_stamp(), self.wav_file_paths[snd_idx]))
def onClose(self, event): self.timer.Stop() #for k in self.timers.keys(): self.timers[k].Stop() if self.session_start_time != -1: self.onStartStopSession(None) # stop session if it's running if self.mods["videoIn"] != []: self.stop_mods( mod='videoIn' ) # cams were open with 'Check webcam views' if flag_arduino == True: self.stop_arduino() writeFile(self.log_file_path, '%s, [CATOS], End of the program\n'%(get_time_stamp())) wx.FutureCall(1000, self.Destroy)
def start_mods(self, mod='all', option_str=''): if DEBUG: print('CATOSFrame.start_mods()') if mod == 'videoIn' or mod == 'feeder_videoIn': if option_str == 'chk_cam_view': chk_cam_view = True mod_name = 'videoIn' cam_idx = self.cam_idx + self.cam_idx_feeder flag_feeder = False else: chk_cam_view = False if mod == 'videoIn': mod_name = 'videoIn' cam_idx = self.cam_idx flag_feeder = False elif mod == 'feeder_videoIn': mod_name = 'feeder_videoIn' cam_idx = self.cam_idx_feederf flag_feeder = True pos = list(self.cam_view_pos) for i in range(len(cam_idx)): self.mods[mod_name].append( VideoIn(self, cam_idx[i], tuple(pos))) self.mods[mod_name][-1].thrd = Thread( target=self.mods[mod_name][-1].run, args=( chk_cam_view, flag_feeder, )) self.mods[mod_name][-1].thrd.start() pos[0] += 400 if mod == 'videoOut' or (mod == 'all' and flagMods["videoOut"]): self.mods["videoOut"] = VideoOut(self) self.mods["videoOut"].Show(True) if mod == 'audioIn' or (mod == 'all' and flagMods["audioIn"]): self.mods["audioIn"] = AudioIn(self) self.mods["audioIn"].thrd = Thread(target=self.mods["audioIn"].run) self.mods["audioIn"].thrd.start() if mod == 'audioOut' or (mod == 'all' and flagMods["audioOut"]): self.mods["audioOut"] = AudioOut(self, self.audio_files) if mod == 'session_mngr' or (mod == 'all' and flagMods["session_mngr"]): self.mods["session_mngr"] = ESessionManager(self) writeFile( self.log_file_path, '%s, [CATOS], session manager mod init.\n' % (get_time_stamp())) if mod == 'feeder_mngr' or (mod == 'all' and flagMods["feeder_mngr"]): self.mods["feeder_mngr"] = FeederManager(self) writeFile( self.log_file_path, '%s, [CATOS], feeder manager mod init.\n' % (get_time_stamp()))
def finish_rec(self, aDataBuff, snd_file, r_cnt, prev_fps): if r_cnt > 0: snd_file.writeframes(np.array(aDataBuff[-r_cnt:], dtype=np.int16).tostring()) snd_file.close() writeFile( self.parent.log_file_path, "%s, [audioIn], finished writing WAV, recent-fps %s.\n" % (get_time_stamp(), str(prev_fps)), ) snd_file = None return snd_file
def onEnterInTextCtrl(self, event): if event.GetEventObject().GetName().strip() == 'txt_notes': value = self.txt_notes.GetValue() writeFile(self.log_file_path, '%s, [CATOS], %s\n'%(get_time_stamp(), value)) self.show_msg_in_statbar("'%s' is written in the log."%value) self.txt_notes.SetValue('') else: # entered in txt_arduino cmd = self.txt_arduino.GetValue() self.mods["arduino"].send(cmd.encode()) # send a message to Arduino self.txt_arduino.SetValue("")
def send(self, msg='', flag_log=True): self.aConn.write(msg) # send a message to Arduino #sleep(0.1) self.aConn.flush() # flush the serial connection if flag_log == True: if path.isfile(self.log_file_path) == False: self.log_file_path = update_log_file_path(self.output_folder) writeFile( self.log_file_path, "%s, [arduino], '%s' was sent to Arduino\n" % (get_time_stamp(), msg))
def start_arduino(self): if DEBUG: print('CATOSFrame.start_arduino()') self.mods["arduino"] = Arduino(self, self.output_folder) if self.mods["arduino"].aConn == None: return # !!! remove for actual setup. This is for testing purpose. msg = "Arduino chip is not found.\nPlease connect it and retry." show_msg(msg, self.panel) self.onClose(None) writeFile(self.log_file_path, "%s, [CATOS], arduino mod init.\n" % (get_time_stamp()))
def __init__(self, parent, wav_file_paths): self.parent = parent self.wav_file_paths = wav_file_paths self.snd_src = None self.sink = SoundSink() self.sink.activate() self.listener = SoundListener() self.sink.listener = self.listener self.snd_data = [] for fp in wav_file_paths: self.snd_data.append( load_wav_file(fp) ) self.wav_file_paths = wav_file_paths writeFile(self.parent.log_file_path, '%s, [audioOut], audioOut mod init.\n'%(get_time_stamp()))
def onClose(self, event): if DEBUG: print('CATOSFrame.onClose()') self.timer.Stop() #for k in self.timers.keys(): self.timers[k].Stop() if self.session_start_time != -1: self.onStartStopSession(None) # stop session if it's running if self.mods["videoIn"] != []: self.stop_mods(mod='videoIn') if flagMods["arduino"]: self.stop_arduino() writeFile(self.log_file_path, '%s, [CATOS], End of the program\n' % (get_time_stamp())) wx.CallLater(1000, self.Destroy)
def onEnterInTextCtrl(self, event): if DEBUG: print('CATOSFrame.onEnterInTextCtrl()') if event.GetEventObject().GetName().strip() == 'txt_notes': value = self.txt_notes.GetValue() log = "%s, [CATOS], %s\n" % (get_time_stamp(), value) writeFile(self.log_file_path, log) self.show_msg_in_statbar("'%s' is written in the log." % value) self.txt_notes.SetValue('') else: # entered in txt_arduino cmd = self.txt_arduino.GetValue() self.mods["arduino"].send( cmd.encode()) # send a message to Arduino self.txt_arduino.SetValue("")
def init_trial(self): if self.state == 'inTrial': return if self.session_type == 'feed': self.feed_intv = randint( 10000, 30000) / 1000.0 # random feeding interval in seconds elif self.session_type == 'static': self.parent.mods['videoOut'].init_static_img() elif self.session_type == 'movements' or \ self.session_type == 'immersion': self.parent.mods["videoOut"].init_floating_obj(1) if self.session_type == 'immersion': self.parent.mods["audioOut"].play(0) writeFile(self.parent.log_file_path, '%s, [session_mngr], Trial init.\n' % (get_time_stamp())) wx.CallLater(100, self.init_trial_state)
def stop_mods(self, mod='all', option_str=''): if DEBUG: print('CATOSFrame.stop_mods()') if mod == 'videoIn' or (mod == 'all' and flagMods["videoIn"]): for i in range(len(self.mods['videoIn'])): self.mods['videoIn'][i].msg_q.put('main/quit/True', True, None) #wx.CallLater(1000, self.mods['videoIn'][i].thrd.join) self.mods['videoIn'] = [] if mod == 'feeder_videoIn' or \ (mod == 'all' and flagMods["feeder_mngr"]): for i in range(len(self.mods['feeder_videoIn'])): self.mods['feeder_videoIn'][i].msg_q.put( 'main/quit/True', True, None) #wx.CallLater(1000, self.mods['feeder_videoIn'][i].thrd.join) self.mods['feeder_videoIn'] = [] if mod == 'videoOut' or (mod == 'all' and flagMods["videoOut"]): if self.mods["videoOut"] != None: self.mods["videoOut"].onClose(None) self.mods["videoOut"] = None if mod == 'audioIn' or (mod == 'all' and flagMods["audioIn"]): if self.mods["audioIn"] != None: self.mods["audioIn"].msg_q.put('main/quit/True', True, None) self.mods["audioIn"] = None if mod == 'audioOut' or (mod == 'all' and flagMods["audioOut"]): if self.mods["audioOut"] != None: self.mods["audioOut"].stop() self.mods["audioOut"] = None if mod == 'session_mngr' or (mod == 'all' and flagMods["session_mngr"]): if self.mods["session_mngr"] != None: self.mods["session_mngr"].quit() self.mods["session_mngr"] = None log = "%s, [CATOS]," % (get_time_stamp()) log += " session manager mod finished.\n" writeFile(self.log_file_path, log) if mod == 'feeder_mngr' or (mod == 'all' and flagMods["feeder_mngr"]): if self.mods["feeder_mngr"] != None: self.mods["feeder_mngr"].quit() self.mods["feeder_mngr"] = None log = "%s, [CATOS]," % (get_time_stamp()) log += " feeder manager mod finished.\n" writeFile(self.log_file_path, log)
def __init__(self, parent): self.parent = parent self.input_block_time = 0.1 self.fps = int(1.0 / self.input_block_time) self.buff_sz = self.fps * 3 # buffer size is equivalent to number of frame for 3 seconds self.pa = pyaudio.PyAudio() self.dev_keyword = "USB Dongle" # keyword to find the audio device self.dev_idx, self.dev_info = self.find_input_dev() self.rp = dict( format=pyaudio.paInt16, sampWidth=2, channels=1, sampleRate=int(self.dev_info["defaultSampleRate"]) ) # parameters for recording self.rp["input_frames_per_block"] = int(self.rp["sampleRate"] * self.input_block_time) self.rp["freq_res"] = self.rp["sampleRate"] / float(self.rp["input_frames_per_block"]) # frequency resolution self.dMax = 2 ** (8 * self.rp["sampWidth"]) # max data value self.cutoff_hz = 1000 self.stop_latency = 1 # time (seconds) of no valid data to record (to stop recording) self.msg_q = Queue.Queue() writeFile(self.parent.log_file_path, "%s, [audioIn], audioIn mod init.\n" % (get_time_stamp()))
def start_mods(self, mod='all', option_str=''): if mod == 'videoIn' or mod == 'feeder_videoIn': if option_str == 'chk_cam_view': chk_cam_view = True mod_name = 'videoIn' cam_idx = self.cam_idx + self.cam_idx_feeder flag_feeder = False else: chk_cam_view = False if mod == 'videoIn': mod_name = 'videoIn' cam_idx = self.cam_idx flag_feeder = False elif mod == 'feeder_videoIn': mod_name = 'feeder_videoIn' cam_idx = self.cam_idx_feederf flag_feeder = True pos = list(self.cam_view_pos) for i in xrange(len(cam_idx)): self.mods[mod_name].append( VideoIn(self, cam_idx[i], tuple(pos)) ) self.mods[mod_name][-1].thrd = Thread( target=self.mods[mod_name][-1].run, args=(chk_cam_view, flag_feeder,) ) self.mods[mod_name][-1].thrd.start() pos[0] += 100; pos[1] += 100 if mod == 'videoOut' or (mod == 'all' and flag_videoOut == True): self.mods["videoOut"] = VideoOut(self) self.mods["videoOut"].Show(True) if mod == 'audioIn' or (mod == 'all' and flag_audioIn == True): self.mods["audioIn"] = AudioIn(self) self.mods["audioIn"].thrd = Thread( target=self.mods["audioIn"].run ) self.mods["audioIn"].thrd.start() if mod == 'audioOut' or (mod == 'all' and flag_audioOut == True): self.mods["audioOut"] = AudioOut(self) if mod == 'session_mngr' or (mod == 'all' and flag_session_mngr == True): self.mods["session_mngr"] = ESessionManager(self) writeFile(self.log_file_path, '%s, [CATOS], session manager mod init.\n'%(get_time_stamp())) if mod == 'feeder_mngr' or (mod == 'all' and flag_feeder_mngr == True): self.mods["feeder_mngr"] = FeederManager(self) writeFile(self.log_file_path, '%s, [CATOS], feeder manager mod init.\n'%(get_time_stamp()))
def stop_mods(self, mod='all', option_str=''): if mod == 'videoIn' or (mod =='all' and flag_videoIn == True): for i in xrange(len(self.mods['videoIn'])): self.mods['videoIn'][i].msg_q.put('main/quit/True', True, None) #wx.FutureCall(1000, self.mods['videoIn'][i].thrd.join) self.mods['videoIn'] = [] if mod == 'feeder_videoIn' or (mod == 'all' and flag_feeder_mngr == True): for i in xrange(len(self.mods['feeder_videoIn'])): self.mods['feeder_videoIn'][i].msg_q.put('main/quit/True', True, None) #wx.FutureCall(1000, self.mods['feeder_videoIn'][i].thrd.join) self.mods['feeder_videoIn'] = [] if mod == 'videoOut' or (mod == 'all' and flag_videoOut == True): if self.mods["videoOut"] != None: self.mods["videoOut"].onClose(None) self.mods["videoOut"] = None if mod == 'audioIn' or (mod == 'all' and flag_audioIn == True): if self.mods["audioIn"] != None: self.mods["audioIn"].msg_q.put('main/quit/True', True, None) self.mods["audioIn"] = None if mod == 'audioOut' or (mod == 'all' and flag_audioOut == True): if self.mods["audioOut"] != None: self.mods["audioOut"].stop() self.mods["audioOut"] = None if mod == 'session_mngr' or (mod == 'all' and flag_session_mngr == True): if self.mods["session_mngr"] != None: self.mods["session_mngr"].quit() self.mods["session_mngr"] = None writeFile(self.log_file_path, '%s, [CATOS], session manager mod finished.\n'%(get_time_stamp())) if mod == 'feeder_mngr' or (mod == 'all' and flag_feeder_mngr == True): if self.mods["feeder_mngr"] != None: self.mods["feeder_mngr"].quit() self.mods["feeder_mngr"] = None writeFile(self.log_file_path, '%s, [CATOS], feeder manager mod finished.\n'%(get_time_stamp()))
def onStartStopSession(self, event): '''Start/Stop a training or experimental session ''' if self.session_start_time == -1: # not in session. start a session if flag_videoIn == True and self.mods["videoIn"] != []: self.stop_mods(mod="videoIn") if flag_videoIn == True: self.start_mods(mod='videoIn') # start webcam (watching over 3 screens surrounded area) self.start_mods( mod='all' ) self.session_start_time = time() e_time = time() - self.session_start_time writeFile(self.log_file_path, '%s, [CATOS], %.3f, Beginning of session.\n'%(get_time_stamp(), e_time)) self.btn_cam.Disable() self.btn_session.SetLabel('End session') else: # in session. stop it. if flag_videoIn == True: self.stop_mods(mod='videoIn') # stop webcam self.stop_mods( mod='all' ) e_time = time() - self.session_start_time writeFile(self.log_file_path, '%s, [CATOS], %.3f, End of session.\n'%(get_time_stamp(), e_time)) self.session_start_time = -1 self.last_play_time = -1 # time when the last stimulus was play self.sTxt_time.SetLabel('0:00:00') self.sTxt_s_time.SetLabel('0:00:00') self.btn_cam.Enable() self.btn_session.SetLabel('Start session')
def __init__(self): if DEBUG: print('CATOSFrame.__init__()') ##### [begin] setting up attributes ----- ### output folder check output_folder = path.join(CWD, 'output') if path.isdir(output_folder) == False: mkdir(output_folder) self.output_folder = output_folder # output folder # determine log file path self.log_file_path = get_log_file_path(output_folder) # audio file to load self.audio_files = ['input/snd_fly.wav', 'input/pos_fb.wav'] self.w_size = (400, 300) # window size self.font = wx.Font(12, wx.FONTFAMILY_DEFAULT, wx.NORMAL, wx.NORMAL) # init dict to contain necessary modules self.mods = dict( arduino=None, videoIn=[], feeder_videoIn=[], # when feeder uses a cam videoOut=None, audioOut=None, session_mngr=None) self.program_start_time = time() self.session_start_time = -1 self.last_play_time = -1 # last stimulus play time self.cam_idx = [0] #[0,1,2] # webcam indices # [[TEMP]] using one # webcam for testing self.cam_idx_feeder = [] # webcam index for the feeder self.cam_view_pos = [ 50 + self.w_size[0], wx.GetDisplaySize()[1] - 350 ] # position of # webcam view checking windows self.msg_q = Queue() ##### [end] setting up attributes ----- # init frame wx.Frame.__init__(self, None, -1, 'CATOS', size=self.w_size) self.SetPosition((50, wx.GetDisplaySize()[1] - self.w_size[1])) self.Show(True) self.panel = wx.Panel(self, pos=(0, 0), size=self.w_size) self.panel.SetBackgroundColour('#000000') ##### [begin] user interface setup ----- posX = 5 posY = 10 btn_width = 150 b_space = 30 self.btn_cam = wx.Button(self.panel, -1, label='Check webcam views', pos=(posX, posY), size=(btn_width, -1)) self.btn_cam.Bind(wx.EVT_LEFT_UP, self.onChkWebcamView) posY += b_space self.btn_session = wx.Button(self.panel, -1, label='Start session', pos=(posX, posY), size=(btn_width, -1)) self.btn_session.Bind(wx.EVT_LEFT_UP, self.onStartStopSession) posY += b_space self.btn_trial = wx.Button(self.panel, -1, label='Start trial', pos=(posX, posY), size=(btn_width, -1)) self.btn_trial.Bind(wx.EVT_LEFT_UP, self.onStartTrial) posY += b_space + 10 _stxt = wx.StaticText(self.panel, -1, label="Leave a note in LOG file", pos=(posX + 5, posY)) _stxt.SetForegroundColour('#CCCCCC') posY += 20 self.txt_notes = wx.TextCtrl(self.panel, -1, name='txt_notes', pos=(posX + 5, posY), size=(btn_width, -1), style=wx.TE_PROCESS_ENTER) self.txt_notes.Bind(wx.EVT_TEXT_ENTER, self.onEnterInTextCtrl) posY += b_space + 10 _stxt = wx.StaticText(self.panel, -1, label="Send a direct command to Arduino", pos=(posX + 5, posY)) _stxt.SetForegroundColour('#CCCCCC') posY += 20 self.txt_arduino = wx.TextCtrl(self.panel, -1, name='txt_arduino', pos=(posX + 5, posY), size=(btn_width, -1), style=wx.TE_PROCESS_ENTER) self.txt_arduino.Bind(wx.EVT_TEXT_ENTER, self.onEnterInTextCtrl) posY += b_space + 10 self.btn_quit = wx.Button(self.panel, -1, label='QUIT', pos=(posX, posY), size=(btn_width, -1)) self.btn_quit.Bind(wx.EVT_LEFT_UP, self.onClose) posX = 170 posY = 15 # time since program-start self.sTxt_pr_time = wx.StaticText(self.panel, -1, label='0:00:00', pos=(posX, posY)) _x = self.sTxt_pr_time.GetPosition()[0] + \ self.sTxt_pr_time.GetSize()[0] + 15 _stxt = wx.StaticText(self.panel, -1, label='since program started', pos=(_x, posY)) _stxt.SetForegroundColour('#CCCCCC') self.sTxt_pr_time.SetFont(self.font) self.sTxt_pr_time.SetBackgroundColour('#000000') self.sTxt_pr_time.SetForegroundColour('#00FF00') posY += b_space # time since session-start self.sTxt_s_time = wx.StaticText(self.panel, -1, label='0:00:00', pos=(posX, posY)) _x = self.sTxt_s_time.GetPosition()[0] + \ self.sTxt_s_time.GetSize()[0] + 15 _stxt = wx.StaticText(self.panel, -1, label='since session started', pos=(_x, posY)) _stxt.SetForegroundColour('#CCCCCC') self.sTxt_s_time.SetFont(self.font) self.sTxt_s_time.SetBackgroundColour('#000000') self.sTxt_s_time.SetForegroundColour('#CCCCFF') posY += b_space self.sTxt_time = wx.StaticText(self.panel, -1, label='0:00:00', pos=(posX, posY)) _x = self.sTxt_time.GetPosition()[0] + \ self.sTxt_time.GetSize()[0] + 15 _stxt = wx.StaticText(self.panel, -1, label='since last stimulus', pos=(_x, posY)) _stxt.SetForegroundColour('#CCCCCC') self.sTxt_time.SetFont(self.font) self.sTxt_time.SetBackgroundColour('#000000') self.sTxt_time.SetForegroundColour('#FFFF00') statbar = wx.StatusBar(self, -1) self.SetStatusBar(statbar) ##### [end] user interface setup ----- ### keyboard binding quit_btnId = wx.NewIdRef(count=1) session_btnId = wx.NewIdRef(count=1) note_btnId = wx.NewIdRef(count=1) self.Bind(wx.EVT_MENU, self.onClose, id=quit_btnId) self.Bind(wx.EVT_MENU, self.onStartStopSession, id=session_btnId) self.Bind(wx.EVT_MENU, self.onEnterNote, id=note_btnId) accel_tbl = wx.AcceleratorTable([ (wx.ACCEL_CTRL, ord('S'), session_btnId), (wx.ACCEL_CTRL, ord('N'), note_btnId), (wx.ACCEL_CTRL, ord('Q'), quit_btnId) ]) self.SetAcceleratorTable(accel_tbl) ### set timer for processing message and ### updating the current running time self.timer = wx.Timer(self) self.Bind(wx.EVT_TIMER, self.onTimer, self.timer) self.timer.Start(100) if flagMods["arduino"]: self.start_arduino() self.txt_notes.SetFocus() self.Bind(wx.EVT_CLOSE, self.onClose) writeFile( self.log_file_path, "%s, [CATOS], Begining of the program\n" % (get_time_stamp()))
def stop_arduino(self): self.mods["arduino"] = None writeFile(self.log_file_path, '%s, [CATOS], arduino mod finished.\n'%(get_time_stamp()))
def run(self, flag_chk_cam_view=False, flag_feeder=False): fSz = self.fSize # frame size msg = '' fps=0; prev_fps=[]; prev_fps_time=time() mod_name = 'videoIn-%i'%(self.cam_idx) first_run = True recent_imgs = [] # buffer to store 60 recent frames recent_m = [] # storing whether meaningful movements # happened in the recent 60 frames recent_m_time = -1 # time when movements were enough # to start video recording log = "%s, [%s],"%(get_time_stamp(), mod_name) log += " webcam %i starts."%(self.cam_idx) log += " Frame-size: %s\n"%(str(fSz)) writeFile(self.parent.log_file_path, log) sleep(1) for i in range(10): ret, frame_arr = self.cap_cam.read() # retrieve some images # giving some time to camera to adjust ### find ROI with red color ### (red tape is attached on bottom of side monitors) r = (0, 0) + fSz # rect to find the color HSV_min = (175,100,90) HSV_max = (180,255,255) red_col = self.find_color(r, frame_arr, HSV_min, HSV_max, (0,0,0)) wr, rects = self.chk_contours(red_col, self.contour_threshold) if wr == (-1,-1,0,0): writeFile(self.parent.log_file_path, "%s, [%s], Red color detection failed.\n"%(get_time_stamp(), mod_name)) redY = -1 else: redY = int(wr[1]+wr[3]/2) # middle y position of red tape bgImg = frame_arr.copy() # store background image while True: fps, prev_fps, prev_fps_time = chk_fps(mod_name, fps, prev_fps, prev_fps_time, self.parent.log_file_path) ret, frame_arr = self.cap_cam.read() # get a new frame if ret == False: sleep(0.1); continue recent_imgs.append(frame_arr) if len(recent_imgs) > 60: recent_imgs.pop(0) recent_m.append(False) if len(recent_m) > 60: recent_m.pop(0) if flag_chk_cam_view == False: ### extract subject image by obtaining difference image ### between the frame_arr and bgImg diff = cv2.absdiff(frame_arr, bgImg) diff = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY) __, diff = cv2.threshold(diff, 50, 255, cv2.THRESH_BINARY) kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3)) diff = cv2.morphologyEx(diff, cv2.MORPH_OPEN, kernel, iterations=1) # decrease noise and # minor features #M = cv2.moments(diff) #print self.cam_idx, M['m00']/255 diff = cv2.Canny(diff, 150, 150) sbr, rects = self.chk_contours(diff.copy(), 20) # sbr = subject # bounding rect if sbr != (-1,-1,0,0): cv2.rectangle(frame_arr, sbr[:2], (sbr[0]+sbr[2],sbr[1]+sbr[3]), (0,255,0), 2) dist_to_s = sbr[1]-redY # distance from red tape(screen) # to the subject msg = None if self.cam_idx == 1: # center screen if dist_to_s < 10: msg = 'center' else: sMid = int(sbr[0] + sbr[2]/2) if sMid < int(fSz[0]/6): msg='left' elif sMid > int(fSz[0]-fSz[0]/6): msg='right' else: if dist_to_s < 100: # close to the screen if self.cam_idx == 0: msg='left' else: msg='right' if msg != None and self.parent.mods["session_mngr"] != None: self.parent.mods["session_mngr"].msg_q.put( "%s/close_to_screen/%s"%(mod_name,msg), True, None ) # red color bottom line cv2.line(frame_arr, (0,redY), (640,redY), (0,255,255), 2) else: # chk_cam_view pass if self.flagWindow: if flag_chk_cam_view: cv2.imshow("CATOS_CAM%.2i"%(self.cam_idx), frame_arr) else: cv2.imshow("CATOS_CAM%.2i"%(self.cam_idx), frame_arr) cv2.waitKey(5) # listen to a message msg_src, msg_body, msg_details = chk_msg_q(self.msg_q) if msg_body == 'quit': break self.cap_cam.release() if self.flagWindow: cv2.destroyWindow("CATOS_CAM%.2i"%(self.cam_idx)) log = "%s, [%s],"%(get_time_stamp(), mod_name) log += " webcam %i stopped.\n"%(self.cam_idx) writeFile(self.parent.log_file_path, log) if self.video_rec != None: self.video_rec.release()
def start_arduino(self): self.mods["arduino"] = Arduino(self, self.output_folder) if self.mods["arduino"].aConn == None: show_msg('Arduino chip is not found.\nPlease connect it and retry.', self.panel) self.onClose(None) writeFile(self.log_file_path, '%s, [CATOS], arduino mod init.\n'%(get_time_stamp()))
def run(self): aDataBuff = [] # buffer for audio data r_cnt = 0 # counting how many new frames were appended after last writing to WAV last_valid_time = -1 # last time when data was valid to record # writing to WAV file occurs once per second snd_file = None is_recording = False fps = 0 prev_fps = [] prev_fps_time = time() stream = self.open_mic_stream() writeFile(self.parent.log_file_path, "%s, [audioIn], 'run' starts.\n" % (get_time_stamp())) num_of_IOErr = 0 while True: fps, prev_fps, prev_fps_time = chk_fps("audioIn", fps, prev_fps, prev_fps_time, self.parent.log_file_path) msg_src, msg_body, msg_details = chk_msg_q(self.msg_q) # listen to a message if msg_src == "main": if msg_body == "quit": if is_recording == True and r_cnt > 0: snd_file = self.finish_rec(aDataBuff, snd_file, r_cnt, prev_fps) is_recording = False break try: ### get audio data aDataBuff.append( np.fromstring( stream.read(self.rp["input_frames_per_block"], exception_on_overflow=False), dtype=np.short ).tolist() ) if len(aDataBuff) > self.buff_sz: aDataBuff.pop(0) ### record to file if is_recording == True: r_cnt += 1 if r_cnt > (self.fps * 2): snd_file.writeframes(np.array(aDataBuff[-(self.fps * 2) :], dtype=np.int16).tostring()) r_cnt = 0 ### check data to record _fData = np.asarray(abs(np.fft.fft(aDataBuff[-1]))[: self.rp["input_frames_per_block"] / 2]) _d = _fData / self.dMax * 100 # data range 0~100 _d = _d[self.cutoff_hz / self.rp["freq_res"] :] # cut off low frequency data if np.sum(_d) > _d.shape[0] and np.average(_d) > (np.median(_d) * 1.5): # Sum of data is bigger than the length of data : each data is bigger than 1 on average # Average is bigger than median*1.5 : amplitude is more concentrated in some areas last_valid_time = time() if is_recording == False: # not recording ### start recording is_recording = True r_cnt = 0 n_ = datetime.now() folder = path.join(self.parent.output_folder, "%.4i_%.2i_%.2i" % (n_.year, n_.month, n_.day)) if path.isdir(folder) == False: mkdir(folder) wav_fp = path.join(folder, "%s.wav" % (get_time_stamp())) snd_file = wave.open(wav_fp, "wb") snd_file.setparams( ( self.rp["channels"], self.rp["sampWidth"], self.rp["sampleRate"], 0, "NONE", "noncompressed", ) ) snd_file.writeframes(np.array(aDataBuff[-(self.fps * 2) :], dtype=np.int16).tostring()) writeFile( self.parent.log_file_path, "%s, [audioIn], start to write WAV, %s.\n" % (get_time_stamp(), wav_fp), ) else: if is_recording == True: # currently recording if ( time() - last_valid_time > self.stop_latency ): # there was no valid data to record for some time ### stop recording is_recording = False snd_file = self.finish_rec(aDataBuff, snd_file, r_cnt, prev_fps) except IOError, e: if num_of_IOErr < 10: msg_ = "%s, [audioIn], IOError : %s\n" % (get_time_stamp(), e) writeFile(self.parent.log_file_path, msg_) num_of_IOErr += 1 sleep(self.input_block_time / 2)
def onTimer(self, event): ''' Timer for checking message and processing with the current state ''' ### retrieve messages flag_foMove = False flag_stim_touched = False close_to_rScreen = False close_to_cScreen = False close_to_lScreen = False while not self.msg_q.empty(): # listen to a message msg_src, msg_body, msg_details = chk_msg_q(self.msg_q) if msg_body == 'foMove': flag_foMove = True fo_pos = [ int(msg_details[0]), int(msg_details[1]), int(msg_details[2]) ] elif msg_body == 'close_to_screen': # movement happened around a screen if msg_details[0] == 'left': close_to_lScreen = True if msg_details[0] == 'center': close_to_cScreen = True if msg_details[0] == 'right': close_to_rScreen = True elif msg_body == 'stim_touched': flag_stim_touched = True ### processing with the current state and received messages if self.state == 'inTrial': if flag_foMove == True: # group FOs is located in a differect section of the screen, # sound source should move accordingly if self.session_type == 'immersion': # move sound source position self.parent.mods["audioOut"].move(fo_pos) if flag_stim_touched == True: # stimulus was touched if self.session_type == 'immersion': self.parent.mods["audioOut"].stop() # play positive feedback sound self.parent.mods["audioOut"].play(1, False) if self.parent.mods["videoOut"].timer != None: self.parent.mods["videoOut"].timer.Stop() self.parent.mods["videoOut"].flag_trial = False self.parent.mods["videoOut"].panel.Refresh() #self.parent.stop_mods(mod='videoIn') # stop webcam writeFile( self.parent.log_file_path, '%s, [session_mngr], Trial finished.\n' % (get_time_stamp())) self.parent.mods["arduino"].send("feed".encode()) writeFile( self.parent.log_file_path, '%s, [session_mngr], Feed message sent.\n' % (get_time_stamp())) self.state = 'pause' # pause for ITI wx.CallLater(self.ITI, self.init_trial) if self.session_type == 'feed': if time() - self.last_feed_time >= self.feed_intv: self.parent.mods["arduino"].send("feed".encode()) log = "%s, [session_mngr]," % (get_time_stamp()) log += "Feed message sent.\n" writeFile(self.parent.log_file_path, log) self.state = 'pause' # pause for ITI wx.CallLater(self.ITI, self.init_trial) self.last_feed_time = time() if self.session_type == 'immersion': if close_to_lScreen or close_to_cScreen or close_to_rScreen: # subject is close to a screen voMod = self.parent.mods["videoOut"] screenCtr = voMod.wSize[0] / 2 dest = (randint(voMod.ctr_rect[0], voMod.ctr_rect[2]), randint(voMod.ctr_rect[1], voMod.ctr_rect[3])) #print close_to_lScreen, close_to_cScreen, close_to_rScreen if (close_to_rScreen and \ voMod.gctr[0] > (voMod.s_w[0]+voMod.s_w[1])) or \ (close_to_lScreen and \ voMod.gctr[0] < voMod.s_w[0]): steps = randint(30, 40) # determine steps to travel # (shorter = faster movement) ### set a new destination which should be ### in the center screen / line, not a curve for i in xrange(len(voMod.fo)): orig_ = (voMod.fo[i]['track'][0][0], voMod.fo[i]['track'][0][1]) dest_ = (randint(dest[0] - 50, dest[0] + 50), randint(dest[1] - 50, dest[1] + 50)) tl = [] xstep = int(abs(dest_[0] - orig_[0]) / steps) ystep = int(abs(dest_[1] - orig_[1]) / steps) for j in xrange(steps): if orig_[0] < dest_[0]: x = orig_[0] + xstep * j else: x = orig_[0] - xstep * j if orig_[1] < dest_[1]: y = orig_[1] + ystep * j else: y = orig_[1] - ystep * j tl.append((x, y)) voMod.fo[i]['track'] = tl elif close_to_cScreen and \ (voMod.s_w[0] <= voMod.gctr[0] <= voMod.s_w[0]+voMod.s_w[1]): # subject is close to the center screen and # the stimulus is already on the screen. if self.ctrFOResetTime == -1 or \ (time()-self.ctrFOResetTime)>1: # FO track was set before a half second ago. steps = randint(70, 120) # steps to travel # (shorter = faster movement) h1 = (randint(voMod.ctr_rect[0] - 50, voMod.ctr_rect[2] + 50), randint(voMod.ctr_rect[1] - 50, voMod.ctr_rect[3] + 50)) h2 = (randint(voMod.ctr_rect[0] - 50, voMod.ctr_rect[2] + 50), randint(voMod.ctr_rect[1] - 50, voMod.ctr_rect[3] + 50)) for i in xrange(len(voMod.fo)): orig_ = (voMod.fo[i]['track'][0][0], voMod.fo[i]['track'][0][1]) dest_ = (randint(dest[0] - 50, dest[0] + 50), randint(dest[1] - 50, dest[1] + 50)) h1_ = (randint(h1[0] - 50, h1[0] + 50), randint(h1[1] - 50, h1[1] + 50)) h2_ = (randint(h2[0] - 50, h2[0] + 50), randint(h2[1] - 50, h2[1] + 50)) voMod.fo[i]['track'] = make_b_curve_coord( orig_, h1_, h2_, dest_, steps) self.ctrFOResetTime = time()
def run(self, flag_chk_cam_view=False, flag_feeder=False): msg = '' fps=0; prev_fps=[]; prev_fps_time=time() mod_name = 'videoIn' first_run = True # average NMC ( Number of Movement Contours, Image moment is not used. Image moment will be much bigger if a monkey is closer to the webcam ) # average distance between movement contours # average CMC ( center of movement contours : (int(sum(contours.X) / NMC), int(sum(contours.Y) / NMC)) ) # length of video nmc = 0 # Number of movement contours dist_b_mc = [] # average Distance between movement contours cmcX = [] # X-pos of average Center of movement contours cmcY = [] # Y-pos of average Center of movement contours movLog_fp = path.join( self.parent.output_folder, '%s_MovLog.txt'%(get_time_stamp()) ) # movement log file if flag_chk_cam_view == False: f = open(movLog_fp, 'w') f.write('timestamp, sum.NMC, avg.Dist_b_MC, avg.CMC-X, avg.CMC-Y\n') f.close() writeFile(self.parent.log_file_path, '%s, [%s], webcam %i starts. Frame-size: %s\n'%(get_time_stamp(), mod_name, self.cam_idx, str(self.fSize))) # Wait for a few seconds while retreiving webcam images # (When webcam is initialized, retreived images change at the beginning, # and it's recognized as movements.) func_init_time = time() while time()-func_init_time < 1: ret, frame_arr = self.cap_cam.read() # get a new frame cv2.waitKey(100) last_mov_log_time = time() while True: fps, prev_fps, prev_fps_time = chk_fps(mod_name, fps, prev_fps, prev_fps_time, self.parent.log_file_path) ret, frame_arr = self.cap_cam.read() # get a new frame if ret == False: sleep(0.1); continue if flag_chk_cam_view == False: grey_img = cv2.cvtColor(frame_arr, cv2.COLOR_RGB2GRAY) # grey image grey_img = self.preprocessing(grey_img) # preprocess the grey image ### leave only the area surrounded by three screens mask = np.zeros( (grey_img.shape[0], grey_img.shape[1]) , dtype=np.uint8 ) cv2.fillConvexPoly(mask, np.asarray(self.roi_pts), 255) grey_img = cv2.bitwise_and( grey_img, grey_img, mask=mask ) ### processing of motion around screens if first_run == True: first_run = False grey_avg = cv2.convertScaleAbs(grey_img) grey_avg = grey_avg.astype(np.float32) else: cv2.accumulateWeighted(grey_img, grey_avg, 0.8) ### contour of movements grey_tmp = cv2.convertScaleAbs(grey_avg) grey_diff = cv2.absdiff(grey_img, grey_tmp) grey_diff = cv2.Canny(grey_diff, 10, 15) wrect, rects = self.chk_contours(grey_diff, self.contour_threshold) if (self.cam_idx in self.parent.cam_idx) and (rects != []) and (self.m_wrectTh[0] < wrect[2]+wrect[3] < self.m_wrectTh[1]): # if this is a cam for watching subject and there's a meaningful movement nmc += len(rects) sumX = 0; sumY = 0 sum_dist_b_mc = 0 for ri in range(len(rects)): _r = rects[ri] _x = _r[0]+_r[2]/2; _y = _r[1]+_r[3]/2 cv2.circle(grey_img, (_x,_y), 5, 200, 2) if ri > 0: _pr = rects[ri-1] _x2 = _pr[0]+_pr[2]/2; _y2 = _pr[1]+_pr[3]/2 cv2.line(grey_img, (_x,_y), (_x2,_y2), 200, 1) sum_dist_b_mc += np.sqrt( abs(_x-_x2)**2 + abs(_y-_y2)**2 ) sumX += _x; sumY += _y #cv2.rectangle(grey_img, (_r[0],_r[1]), (_r[0]+_r[2],_r[1]+_r[3]), 255, 1) avgX = sumX/len(rects); avgY = sumY/len(rects) cmcX.append(avgX); cmcY.append(avgY) dist_b_mc.append(sum_dist_b_mc/len(rects)) else: # there's no meaningful movement pass if time()-last_mov_log_time > 10: # every 10 seconds ### record the movement data f = open(movLog_fp, 'a') if nmc > 0: f.write( '%s, %i, %i, %i, %i\n'%(get_time_stamp(), nmc, int(np.average(dist_b_mc)), int(np.average(cmcX)), int(np.average(cmcY))) ) else: f.write( '%s, 0, 0, 0, 0\n'%(get_time_stamp()) ) f.close() nmc=0; dist_b_mc=[]; cmcX=[]; cmcY=[] # init last_mov_log_time = time() else: # chk_cam_view ### draw ROI lines for i in range(len(self.roi_pts)): pt1 = self.roi_pts[(i-1)] pt2 = self.roi_pts[i] cv2.line(frame_arr, pt1, pt2, (0,0,255), 2) if flag_window == True: if flag_chk_cam_view == True: cv2.imshow("CATOS_CAM%.2i"%(self.cam_idx), frame_arr) else: cv2.imshow("CATOS_CAM%.2i"%(self.cam_idx), grey_img) cv2.waitKey(5) msg_src, msg_body, msg_details = chk_msg_q(self.msg_q) # listen to a message if msg_body == 'quit': break self.cap_cam.release() if flag_window == True: cv2.destroyWindow("CATOS_CAM%.2i"%(self.cam_idx)) log_ = '%s, [%s], webcam %i stopped.\n'%(get_time_stamp(), mod_name, self.cam_idx) writeFile(self.parent.log_file_path, log_)
def __init__(self, parent): self.parent = parent self.fo = [] self.refresh_rate = 60 self.scr_sec = [ 11, 5 ] # number of sections in screen [horizontal, vertical] self.gr = (-1, -1, -1, -1) # group rect (x,y,w,h) self.gctr = (-1, -1) # center point of group self.flag_trial = True # in trial or NOT self.aPos = ( -1, -1) # animal subject's current position. (-1,-1)=out of sight self.bgCol = '#777777' self.timer = None self.fo_min_rad = 15 # minimum radius of an object self.fo_max_rad = 25 # maximum radius self.fo_min_spd = 25 # minimum number of pixels to move per frame self.fo_max_spd = 30 # maximum number of pixels self.flag_rad_change = True # random radius change mode = 'test' posX = [] self.s_w = [] # widths screen of the first 3 screens self.s_h = [] # screen heights of the first 3 screens screenCnt = wx.Display.GetCount() # * Note that this module is meant # to work with three screens, surrounding subject for i in range(screenCnt): g = wx.Display(i).GetGeometry() posX.append(g[0]) self.s_w.append(g[2]) self.s_h.append(g[3]) #self.wPos = (min(posX), 0) self.wPos = (-1, 0) if mode == 'debug': self.wSize = (sum(self.s_w), max(self.s_h) / 3 * 2) dp_size = (sum(self.s_w), max(self.s_h) / 3 * 2) else: self.wSize = (sum(self.s_w), max(self.s_h) - 40) dp_size = (sum(self.s_w), max(self.s_h) - 40) self.ctr_rect = [ self.wSize[0] / 2 - 50, self.wSize[1] / 2 - 50, self.wSize[0] / 2 + 50, self.wSize[1] / 2 + 50 ] # center rect; x1,y1,x2,y2 wx.Frame.__init__(self, None, -1, '', pos=self.wPos, size=dp_size, style=wx.NO_FULL_REPAINT_ON_RESIZE) self.SetPosition(self.wPos) self.SetSize(dp_size) self.SetBackgroundColour(self.bgCol) self.panel = wx.Panel(self, pos=(0, 0), size=self.wSize) self.panel.SetBackgroundColour(self.bgCol) cursor = wx.Cursor(wx.CURSOR_BLANK) self.panel.SetCursor(cursor) # hide cursor self.panel.Bind(wx.EVT_PAINT, self.onPaint) self.panel.Bind(wx.EVT_LEFT_DOWN, self.onMouseDown) self.Bind(wx.EVT_CLOSE, self.onClose) ### keyboard binding quit_btnId = wx.NewId() playStim_btnId = wx.NewId() self.Bind(wx.EVT_MENU, self.parent.onStartStopSession, id=quit_btnId) accel_tbl = wx.AcceleratorTable([(wx.ACCEL_CTRL, ord('Q'), quit_btnId) ]) self.SetAcceleratorTable(accel_tbl) writeFile(self.parent.log_file_path, '%s, [videoOut], videoOut mod init.\n' % (get_time_stamp()))
def stop(self): if self.snd_src != None: self.sink.stop(self.snd_src) self.sink.update() self.snd_src = None writeFile(self.parent.log_file_path, '%s, [audioOut], sound stopped.\n'%(get_time_stamp()))
def stop(self): self.pa.terminate() writeFile(self.parent.log_file_path, "%s, [audioIn], audioIn mod stopped.\n" % (get_time_stamp()))
def stop_arduino(self): if DEBUG: print('CATOSFrame.stop_arduino()') self.mods["arduino"] = None writeFile(self.log_file_path, "%s, [CATOS], arduino mod finished.\n" % (get_time_stamp()))
def __init__(self): ### output folder check output_folder = path.join(CWD, 'output') if path.isdir(output_folder) == False: mkdir(output_folder) self.output_folder = output_folder ### opening log file self.log = "" self.log_file_path = get_log_file_path(output_folder) writeFile(self.log_file_path, '%s, [CATOS], Begining of the program\n'%(get_time_stamp())) ### init variables self.mods = dict(arduino=None, videoIn=[], feeder_videoIn=[], videoOut=None, audioOut=None, session_mngr=None) self.program_start_time = time() self.session_start_time = -1 self.last_play_time = -1 # last stimulus play time self.cam_idx = [0] # webcam indices self.cam_idx_feeder = [] # webcam index for the feeder self.cam_view_pos = [wx.GetDisplaySize()[0]-1200, 30] # position of webcam view checking windows print self.cam_view_pos self.msg_q = Queue.Queue() #self.feeder_phase = -1 # 0 : running two conveyors, 1: running the bottom conveyors, 2: sweep of servor motor, 3: ready to dispense self.w_size = (400, 300) wx.Frame.__init__(self, None, -1, 'CATOS', size=self.w_size) # init frame self.SetPosition( (wx.GetDisplaySize()[0]-self.w_size[0]-1200, 30) ) self.Show(True) self.panel = wx.Panel(self, pos=(0,0), size=self.w_size) self.panel.SetBackgroundColour('#000000') ### user interface setup posX = 5 posY = 10 btn_width = 150 b_space = 30 self.btn_cam = wx.Button(self.panel, -1, label='Check webcam views', pos=(posX,posY), size=(btn_width, -1)) self.btn_cam.Bind(wx.EVT_LEFT_UP, self.onChkWebcamView) posY += b_space self.btn_session = wx.Button(self.panel, -1, label='Start session', pos=(posX,posY), size=(btn_width, -1)) self.btn_session.Bind(wx.EVT_LEFT_UP, self.onStartStopSession) posY += b_space self.btn_trial = wx.Button(self.panel, -1, label='Start trial', pos=(posX,posY), size=(btn_width, -1)) self.btn_trial.Bind(wx.EVT_LEFT_UP, self.onStartTrial) posY += b_space + 10 _stxt = wx.StaticText(self.panel, -1, label="Leave a note in LOG file", pos=(posX+5,posY)) _stxt.SetForegroundColour('#CCCCCC') posY += 20 self.txt_notes = wx.TextCtrl(self.panel, -1, name='txt_notes', pos=(posX+5,posY), size=(btn_width,-1), style=wx.TE_PROCESS_ENTER) self.txt_notes.Bind(wx.EVT_TEXT_ENTER, self.onEnterInTextCtrl) posY += b_space + 10 _stxt = wx.StaticText(self.panel, -1, label="Send a direct command to Arduino", pos=(posX+5,posY)) _stxt.SetForegroundColour('#CCCCCC') posY += 20 self.txt_arduino = wx.TextCtrl(self.panel, -1, name='txt_arduino', pos=(posX+5,posY), size=(btn_width,-1), style=wx.TE_PROCESS_ENTER) self.txt_arduino.Bind(wx.EVT_TEXT_ENTER, self.onEnterInTextCtrl) posY += b_space + 10 self.btn_quit = wx.Button(self.panel, -1, label='QUIT', pos=(posX,posY), size=(btn_width, -1)) self.btn_quit.Bind(wx.EVT_LEFT_UP, self.onClose) posX = 170 posY = 15 self.sTxt_pr_time = wx.StaticText(self.panel, -1, label='0:00:00', pos=(posX, posY)) # time since program starts _x = self.sTxt_pr_time.GetPosition()[0] + self.sTxt_pr_time.GetSize()[0] + 15 _stxt = wx.StaticText(self.panel, -1, label='since program started', pos=(_x, posY)) _stxt.SetForegroundColour('#CCCCCC') self.font = wx.Font(12, wx.FONTFAMILY_DEFAULT, wx.NORMAL, wx.NORMAL) self.sTxt_pr_time.SetFont(self.font) self.sTxt_pr_time.SetBackgroundColour('#000000') self.sTxt_pr_time.SetForegroundColour('#00FF00') posY += b_space self.sTxt_s_time = wx.StaticText(self.panel, -1, label='0:00:00', pos=(posX, posY)) # time since session starts _x = self.sTxt_s_time.GetPosition()[0] + self.sTxt_s_time.GetSize()[0] + 15 _stxt = wx.StaticText(self.panel, -1, label='since session started', pos=(_x, posY)) _stxt.SetForegroundColour('#CCCCCC') self.sTxt_s_time.SetFont(self.font) self.sTxt_s_time.SetBackgroundColour('#000000') self.sTxt_s_time.SetForegroundColour('#CCCCFF') posY += b_space self.sTxt_time = wx.StaticText(self.panel, -1, label='0:00:00', pos=(posX, posY)) _x = self.sTxt_time.GetPosition()[0] + self.sTxt_time.GetSize()[0] + 15 _stxt = wx.StaticText(self.panel, -1, label='since last stimulus', pos=(_x, posY)) _stxt.SetForegroundColour('#CCCCCC') self.sTxt_time.SetFont(self.font) self.sTxt_time.SetBackgroundColour('#000000') self.sTxt_time.SetForegroundColour('#FFFF00') statbar = wx.StatusBar(self, -1) self.SetStatusBar(statbar) ### keyboard binding quit_btnId = wx.NewId() playStim_btnId = wx.NewId() self.Bind(wx.EVT_MENU, self.onClose, id=quit_btnId) accel_tbl = wx.AcceleratorTable([ (wx.ACCEL_CTRL, ord('Q'), quit_btnId) ]) self.SetAcceleratorTable(accel_tbl) ### set timer for processing message and updating the current running time self.timer = wx.Timer(self) self.Bind(wx.EVT_TIMER, self.onTimer, self.timer) self.timer.Start(100) #wx.FutureCall(100, self.init_timers) # init other timers self.Bind( wx.EVT_CLOSE, self.onClose ) if flag_arduino == True: self.start_arduino()
def onClose(self, event): if self.timer != None: self.timer.Stop() writeFile( self.parent.log_file_path, '%s, [videoOut], videoOut mod finished.\n' % (get_time_stamp())) self.Destroy()