def run(self, flag_chk_cam_view=False, flag_feeder=False): fSz = self.fSize # frame size msg = '' fps=0; prev_fps=[]; prev_fps_time=time() mod_name = 'videoIn-%i'%(self.cam_idx) first_run = True recent_imgs = [] # buffer to store 60 recent frames recent_m = [] # storing whether meaningful movements # happened in the recent 60 frames recent_m_time = -1 # time when movements were enough # to start video recording log = "%s, [%s],"%(get_time_stamp(), mod_name) log += " webcam %i starts."%(self.cam_idx) log += " Frame-size: %s\n"%(str(fSz)) writeFile(self.parent.log_file_path, log) sleep(1) for i in range(10): ret, frame_arr = self.cap_cam.read() # retrieve some images # giving some time to camera to adjust ### find ROI with red color ### (red tape is attached on bottom of side monitors) r = (0, 0) + fSz # rect to find the color HSV_min = (175,100,90) HSV_max = (180,255,255) red_col = self.find_color(r, frame_arr, HSV_min, HSV_max, (0,0,0)) wr, rects = self.chk_contours(red_col, self.contour_threshold) if wr == (-1,-1,0,0): writeFile(self.parent.log_file_path, "%s, [%s], Red color detection failed.\n"%(get_time_stamp(), mod_name)) redY = -1 else: redY = int(wr[1]+wr[3]/2) # middle y position of red tape bgImg = frame_arr.copy() # store background image while True: fps, prev_fps, prev_fps_time = chk_fps(mod_name, fps, prev_fps, prev_fps_time, self.parent.log_file_path) ret, frame_arr = self.cap_cam.read() # get a new frame if ret == False: sleep(0.1); continue recent_imgs.append(frame_arr) if len(recent_imgs) > 60: recent_imgs.pop(0) recent_m.append(False) if len(recent_m) > 60: recent_m.pop(0) if flag_chk_cam_view == False: ### extract subject image by obtaining difference image ### between the frame_arr and bgImg diff = cv2.absdiff(frame_arr, bgImg) diff = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY) __, diff = cv2.threshold(diff, 50, 255, cv2.THRESH_BINARY) kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3)) diff = cv2.morphologyEx(diff, cv2.MORPH_OPEN, kernel, iterations=1) # decrease noise and # minor features #M = cv2.moments(diff) #print self.cam_idx, M['m00']/255 diff = cv2.Canny(diff, 150, 150) sbr, rects = self.chk_contours(diff.copy(), 20) # sbr = subject # bounding rect if sbr != (-1,-1,0,0): cv2.rectangle(frame_arr, sbr[:2], (sbr[0]+sbr[2],sbr[1]+sbr[3]), (0,255,0), 2) dist_to_s = sbr[1]-redY # distance from red tape(screen) # to the subject msg = None if self.cam_idx == 1: # center screen if dist_to_s < 10: msg = 'center' else: sMid = int(sbr[0] + sbr[2]/2) if sMid < int(fSz[0]/6): msg='left' elif sMid > int(fSz[0]-fSz[0]/6): msg='right' else: if dist_to_s < 100: # close to the screen if self.cam_idx == 0: msg='left' else: msg='right' if msg != None and self.parent.mods["session_mngr"] != None: self.parent.mods["session_mngr"].msg_q.put( "%s/close_to_screen/%s"%(mod_name,msg), True, None ) # red color bottom line cv2.line(frame_arr, (0,redY), (640,redY), (0,255,255), 2) else: # chk_cam_view pass if self.flagWindow: if flag_chk_cam_view: cv2.imshow("CATOS_CAM%.2i"%(self.cam_idx), frame_arr) else: cv2.imshow("CATOS_CAM%.2i"%(self.cam_idx), frame_arr) cv2.waitKey(5) # listen to a message msg_src, msg_body, msg_details = chk_msg_q(self.msg_q) if msg_body == 'quit': break self.cap_cam.release() if self.flagWindow: cv2.destroyWindow("CATOS_CAM%.2i"%(self.cam_idx)) log = "%s, [%s],"%(get_time_stamp(), mod_name) log += " webcam %i stopped.\n"%(self.cam_idx) writeFile(self.parent.log_file_path, log) if self.video_rec != None: self.video_rec.release()
def run(self, flag_chk_cam_view=False, flag_feeder=False): msg = '' fps=0; prev_fps=[]; prev_fps_time=time() mod_name = 'videoIn' first_run = True # average NMC ( Number of Movement Contours, Image moment is not used. Image moment will be much bigger if a monkey is closer to the webcam ) # average distance between movement contours # average CMC ( center of movement contours : (int(sum(contours.X) / NMC), int(sum(contours.Y) / NMC)) ) # length of video nmc = 0 # Number of movement contours dist_b_mc = [] # average Distance between movement contours cmcX = [] # X-pos of average Center of movement contours cmcY = [] # Y-pos of average Center of movement contours movLog_fp = path.join( self.parent.output_folder, '%s_MovLog.txt'%(get_time_stamp()) ) # movement log file if flag_chk_cam_view == False: f = open(movLog_fp, 'w') f.write('timestamp, sum.NMC, avg.Dist_b_MC, avg.CMC-X, avg.CMC-Y\n') f.close() writeFile(self.parent.log_file_path, '%s, [%s], webcam %i starts. Frame-size: %s\n'%(get_time_stamp(), mod_name, self.cam_idx, str(self.fSize))) # Wait for a few seconds while retreiving webcam images # (When webcam is initialized, retreived images change at the beginning, # and it's recognized as movements.) func_init_time = time() while time()-func_init_time < 1: ret, frame_arr = self.cap_cam.read() # get a new frame cv2.waitKey(100) last_mov_log_time = time() while True: fps, prev_fps, prev_fps_time = chk_fps(mod_name, fps, prev_fps, prev_fps_time, self.parent.log_file_path) ret, frame_arr = self.cap_cam.read() # get a new frame if ret == False: sleep(0.1); continue if flag_chk_cam_view == False: grey_img = cv2.cvtColor(frame_arr, cv2.COLOR_RGB2GRAY) # grey image grey_img = self.preprocessing(grey_img) # preprocess the grey image ### leave only the area surrounded by three screens mask = np.zeros( (grey_img.shape[0], grey_img.shape[1]) , dtype=np.uint8 ) cv2.fillConvexPoly(mask, np.asarray(self.roi_pts), 255) grey_img = cv2.bitwise_and( grey_img, grey_img, mask=mask ) ### processing of motion around screens if first_run == True: first_run = False grey_avg = cv2.convertScaleAbs(grey_img) grey_avg = grey_avg.astype(np.float32) else: cv2.accumulateWeighted(grey_img, grey_avg, 0.8) ### contour of movements grey_tmp = cv2.convertScaleAbs(grey_avg) grey_diff = cv2.absdiff(grey_img, grey_tmp) grey_diff = cv2.Canny(grey_diff, 10, 15) wrect, rects = self.chk_contours(grey_diff, self.contour_threshold) if (self.cam_idx in self.parent.cam_idx) and (rects != []) and (self.m_wrectTh[0] < wrect[2]+wrect[3] < self.m_wrectTh[1]): # if this is a cam for watching subject and there's a meaningful movement nmc += len(rects) sumX = 0; sumY = 0 sum_dist_b_mc = 0 for ri in range(len(rects)): _r = rects[ri] _x = _r[0]+_r[2]/2; _y = _r[1]+_r[3]/2 cv2.circle(grey_img, (_x,_y), 5, 200, 2) if ri > 0: _pr = rects[ri-1] _x2 = _pr[0]+_pr[2]/2; _y2 = _pr[1]+_pr[3]/2 cv2.line(grey_img, (_x,_y), (_x2,_y2), 200, 1) sum_dist_b_mc += np.sqrt( abs(_x-_x2)**2 + abs(_y-_y2)**2 ) sumX += _x; sumY += _y #cv2.rectangle(grey_img, (_r[0],_r[1]), (_r[0]+_r[2],_r[1]+_r[3]), 255, 1) avgX = sumX/len(rects); avgY = sumY/len(rects) cmcX.append(avgX); cmcY.append(avgY) dist_b_mc.append(sum_dist_b_mc/len(rects)) else: # there's no meaningful movement pass if time()-last_mov_log_time > 10: # every 10 seconds ### record the movement data f = open(movLog_fp, 'a') if nmc > 0: f.write( '%s, %i, %i, %i, %i\n'%(get_time_stamp(), nmc, int(np.average(dist_b_mc)), int(np.average(cmcX)), int(np.average(cmcY))) ) else: f.write( '%s, 0, 0, 0, 0\n'%(get_time_stamp()) ) f.close() nmc=0; dist_b_mc=[]; cmcX=[]; cmcY=[] # init last_mov_log_time = time() else: # chk_cam_view ### draw ROI lines for i in range(len(self.roi_pts)): pt1 = self.roi_pts[(i-1)] pt2 = self.roi_pts[i] cv2.line(frame_arr, pt1, pt2, (0,0,255), 2) if flag_window == True: if flag_chk_cam_view == True: cv2.imshow("CATOS_CAM%.2i"%(self.cam_idx), frame_arr) else: cv2.imshow("CATOS_CAM%.2i"%(self.cam_idx), grey_img) cv2.waitKey(5) msg_src, msg_body, msg_details = chk_msg_q(self.msg_q) # listen to a message if msg_body == 'quit': break self.cap_cam.release() if flag_window == True: cv2.destroyWindow("CATOS_CAM%.2i"%(self.cam_idx)) log_ = '%s, [%s], webcam %i stopped.\n'%(get_time_stamp(), mod_name, self.cam_idx) writeFile(self.parent.log_file_path, log_)
def onTimer(self, event): ''' refresh panel to redraw FO and updates its data ''' self.fps, self.prev_fps, self.prev_fps_time = chk_fps( 'videoOut', self.fps, self.prev_fps, self.prev_fps_time, self.parent.log_file_path) self.calc_group_rect() # calculate center point of the group self.panel.Refresh() fo_pos_idx_ = [ int(float(self.gctr[0]) / self.wSize[0] * self.scr_sec[0]) + 1, int(float(self.gctr[1]) / self.wSize[1] * self.scr_sec[1]) ] idxAO = [ fo_pos_idx_[0] - (self.scr_sec[0] / 2), fo_pos_idx_[1] - (self.scr_sec[1] / 2) ] idxAO[0] *= -1 # change symbol (left <-> right) #if idxAO[0] != 0: idxAO[0] = idxAO[0]/2 #if idxAO[1] != 0: idxAO[1] = idxAO[1]/2 idxAO.append(0) #idxAO.append( -abs(idxAO[0]) ) # z index depends on # x index in our three screen configuration if self.fo_pos_idx != fo_pos_idx_: # if FO group's position area is changed self.fo_pos_idx = [fo_pos_idx_[0], fo_pos_idx_[1]] # update the info self.parent.mods["session_mngr"].msg_q.put( 'videoOut/foMove/%i/%i/%i' % (idxAO[0], idxAO[1], idxAO[2]), True, None) tBuff = 50 # buffer (pixels) for determining track if len(self.fo[0]['track']) == 1: ### ran out of track data. ### determine a new destination with relevant variables if self.parent.mods["session_mngr"].session_type == 'immersion': if uniform(0, 1) < 0.33: range_ = (tBuff, self.wSize[0] - tBuff) else: if self.gctr[0] < self.wSize[0] / 2: # remain in left screen range_ = (tBuff, self.s_w[0] - tBuff) elif self.gctr[0] > self.wSize[0] / 2: range_ = (self.s_w[0] + self.s_w[1] + tBuff, self.wSize[0] - tBuff ) # remain in right screen else: range_ = (tBuff, self.wSize[0] - tBuff) else: # stimulus moves in the center only range_ = (self.ctr_rect[0], self.ctr_rect[2]) dest = (randint(range_[0], range_[1]), randint(tBuff, min(self.s_h) - tBuff)) if self.gctr[0] < dest[0]: h1 = (randint( self.gctr[0], int(self.gctr[0] + (dest[0] - self.gctr[0]) * 0.666)), randint(tBuff, min(self.s_h) - tBuff)) h2 = (randint( int(self.gctr[0] + (dest[0] - self.gctr[0]) * 0.333), dest[0]), randint(tBuff, min(self.s_h) - tBuff)) else: h1 = (randint(int(dest[0] + (self.gctr[0] - dest[0]) * 0.333), self.gctr[0]), randint(tBuff, min(self.s_h) - tBuff)) h2 = (randint(dest[0], int(dest[0] + (self.gctr[0] - dest[0]) * 0.666)), randint(tBuff, min(self.s_h) - tBuff)) ### travel distance t_dist = np.sqrt((self.gctr[0] - h1[0])**2 + (self.gctr[1] - h1[1])**2) t_dist += np.sqrt((h1[0] - h2[0])**2 + (h1[1] - h2[1])**2) t_dist += np.sqrt((h2[0] - dest[0])**2 + (h2[1] - dest[1])**2) number_of_track_pts = randint(int(t_dist / self.fo_max_spd), int(t_dist / self.fo_min_spd)) # this # determines speed of FO movements number_of_track_pts = max(self.refresh_rate, number_of_track_pts) # set the minimum # track points for i in range(len(self.fo)): if len(self.fo[i]['track']) > 1: self.fo[i]['track'].pop(0) # pop out the used coordinate # from the track else: # ran out of track data. get a new bezier curve points ### make bezier curve track points with individual randomness orig_ = (self.fo[i]['track'][0][0], self.fo[i]['track'][0][1]) dest_ = (randint(dest[0] - tBuff, dest[0] + tBuff), randint(dest[1] - tBuff, dest[1] + tBuff)) h1_ = (randint(h1[0] - tBuff * 4, h1[0] + tBuff * 4), randint(h1[1] - tBuff * 4, h1[1] + tBuff * 4)) h2_ = (randint(h2[0] - tBuff * 4, h2[0] + tBuff * 4), randint(h2[1] - tBuff * 4, h2[1] + tBuff * 4)) self.fo[i]['track'] = make_b_curve_coord( orig_, h1_, h2_, dest_, number_of_track_pts) if self.flag_rad_change == True: ### random radius change of each fo if self.fo[i]['rad_change'] == 0: if uniform(0, 1) < self.chance_to_change_rad: if uniform(0, 1) > 0.5: self.fo[i]['rad_change'] = 1 else: self.fo[i]['rad_change'] = -1 else: self.fo[i]['rad'] += self.fo[i]['rad_change'] if self.fo[i]['rad'] < self.fo_min_rad: self.fo[i]['rad'] = int(self.fo_min_rad) self.fo[i]['rad_change'] = 0 elif self.fo[i]['rad'] > self.fo_max_rad: self.fo[i]['rad'] = int(self.fo_max_rad) self.fo[i]['rad_change'] = 0
def run(self): aDataBuff = [] # buffer for audio data r_cnt = 0 # counting how many new frames were appended after last writing to WAV last_valid_time = -1 # last time when data was valid to record # writing to WAV file occurs once per second snd_file = None is_recording = False fps = 0 prev_fps = [] prev_fps_time = time() stream = self.open_mic_stream() writeFile(self.parent.log_file_path, "%s, [audioIn], 'run' starts.\n" % (get_time_stamp())) num_of_IOErr = 0 while True: fps, prev_fps, prev_fps_time = chk_fps("audioIn", fps, prev_fps, prev_fps_time, self.parent.log_file_path) msg_src, msg_body, msg_details = chk_msg_q(self.msg_q) # listen to a message if msg_src == "main": if msg_body == "quit": if is_recording == True and r_cnt > 0: snd_file = self.finish_rec(aDataBuff, snd_file, r_cnt, prev_fps) is_recording = False break try: ### get audio data aDataBuff.append( np.fromstring( stream.read(self.rp["input_frames_per_block"], exception_on_overflow=False), dtype=np.short ).tolist() ) if len(aDataBuff) > self.buff_sz: aDataBuff.pop(0) ### record to file if is_recording == True: r_cnt += 1 if r_cnt > (self.fps * 2): snd_file.writeframes(np.array(aDataBuff[-(self.fps * 2) :], dtype=np.int16).tostring()) r_cnt = 0 ### check data to record _fData = np.asarray(abs(np.fft.fft(aDataBuff[-1]))[: self.rp["input_frames_per_block"] / 2]) _d = _fData / self.dMax * 100 # data range 0~100 _d = _d[self.cutoff_hz / self.rp["freq_res"] :] # cut off low frequency data if np.sum(_d) > _d.shape[0] and np.average(_d) > (np.median(_d) * 1.5): # Sum of data is bigger than the length of data : each data is bigger than 1 on average # Average is bigger than median*1.5 : amplitude is more concentrated in some areas last_valid_time = time() if is_recording == False: # not recording ### start recording is_recording = True r_cnt = 0 n_ = datetime.now() folder = path.join(self.parent.output_folder, "%.4i_%.2i_%.2i" % (n_.year, n_.month, n_.day)) if path.isdir(folder) == False: mkdir(folder) wav_fp = path.join(folder, "%s.wav" % (get_time_stamp())) snd_file = wave.open(wav_fp, "wb") snd_file.setparams( ( self.rp["channels"], self.rp["sampWidth"], self.rp["sampleRate"], 0, "NONE", "noncompressed", ) ) snd_file.writeframes(np.array(aDataBuff[-(self.fps * 2) :], dtype=np.int16).tostring()) writeFile( self.parent.log_file_path, "%s, [audioIn], start to write WAV, %s.\n" % (get_time_stamp(), wav_fp), ) else: if is_recording == True: # currently recording if ( time() - last_valid_time > self.stop_latency ): # there was no valid data to record for some time ### stop recording is_recording = False snd_file = self.finish_rec(aDataBuff, snd_file, r_cnt, prev_fps) except IOError, e: if num_of_IOErr < 10: msg_ = "%s, [audioIn], IOError : %s\n" % (get_time_stamp(), e) writeFile(self.parent.log_file_path, msg_) num_of_IOErr += 1 sleep(self.input_block_time / 2)