示例#1
0
 def data_recording(self, fps):
     if self.flag_recording_started == False: # This is the beginning of storing the video_data
         curr_time_stamp = get_time_stamp()
         if self.recording_timestamp != curr_time_stamp:
             self.recording_timestamp = copy(curr_time_stamp)
             self.recording_frame_cnt = 0
             if FLAG_DEBUG: print "Video-recording starts @ %s"%self.recording_timestamp
             self.start_new_tmp_img_dir()
             self.start_new_MR()
             writeFile(LOG, '%s, Start a video-recording from CAM#%i'%(self.recording_timestamp, self.cID))
             self.record_past_buffer_imgs()
             self.buffer_images = [] # clear the buffer images
             self.save_tmp_image()
             if fps != -1: self.rec_fps.append(fps)
             self.flag_recording_started = True
     else: # The recording already started
         if self.recording_frame_cnt < self.maximum_frame_cnt:
             self.save_tmp_image()
             if fps != -1: self.rec_fps.append(fps)
             #if fps != -1: self.rec_fps = (self.rec_fps + fps)/2
         else:
             if FLAG_DEBUG: print "Video-recording finishes @ %s due to exceeding the maximum frame-rate."%get_time_stamp()
             writeFile(LOG, '%s, Finish the video-recording from %i\nThe average FPS during the recording was, %i'%(get_time_stamp(), self.cID, int(np.median(self.rec_fps))))
             self.init_movie_params()
             self.data_recording(fps)
示例#2
0
    def play_sound(self, snd_key):
        '''
        if snd_key == 'button1' : stream = self.stream[1] #self.stream[1] # usb-output
        elif snd_key == 'button2': stream = self.stream[3] #self.stream[3] switched the position between button2 & 3
        elif snd_key == 'button3': stream = self.stream[2] #self.stream[2] # usb-output
        elif snd_key == 'foil':
            stream_idx = randint(1, 3)
            stream = self.stream[stream_idx] # randomly choose a stream if it's a foil sound
        else: stream = self.stream[0] # built-in output stream

        if self.flag_4th_speaker_test == True:
            stream = self.stream[0]
            self.flag_4th_speaker_test = False
        '''
        stream = self.stream[0]

        if self.flag_SWS_test == True:
            snd_key += '_SWS'
            self.flag_SWS_test = False
        elif flag_NVS_test == True:
            snd_key += '_NVS'
            self.flag_NVS_test = False

        audio_output_data = self.wf[snd_key].readframes(self.chunk)
        while audio_output_data != '':
            stream.write(audio_output_data)
            audio_output_data = self.wf[snd_key].readframes(self.chunk)
        self.wf[snd_key].rewind()

        if snd_key == 'foil':
            writeFile(LOG, '%s, Played the <%s> sound through a stream #%i'%(get_time_stamp(), snd_key, stream_idx))
        else:
            writeFile(LOG, '%s, Played the <%s> sound'%(get_time_stamp(), snd_key))
示例#3
0
 def start_new_MR(self):
 # start a new MovementRecord file
     time_stamp = os.path.split(self.tmp_img_dir_path)[1].replace("x", "") # obtain the time-stamp without 'x's
     self.behavior_rec_path = os.path.join(self.cwd, self.output_folder, "%s_cID%.2i_MR.csv"%(time_stamp, self.cID))
     txt = "Movement-rect, Movement-center, Foreground-rect, "
     txt += "Foreground-center, Number of blobs, Foreground Blob moment-area, Whitish blob moment-area, "
     txt += "Whitish blob position, Grouped Points for Foreground-Blobs, TimeStamp\n------------------------------------"
     writeFile(self.behavior_rec_path, txt)
示例#4
0
 def quiet_now(self, fps):
     if self.flag_recording_started: # if it's not recording, there's no need to check the quiet state
         curr_time = time()
         if self.has_been_quiet_since == -1: self.has_been_quiet_since = time()
         else:
             if curr_time - self.has_been_quiet_since > self.maximum_q_duration:
             # quiet moment is longer than maximum-quiet-duration
                 if FLAG_DEBUG: print "Video-recording finishes @ %s"%get_time_stamp()
                 writeFile(LOG, '%s, Finish the video-recording from CAM#%i\nThe average FPS during the recording was, %i'%(get_time_stamp(), self.cID, int(np.median(self.rec_fps))))
                 return "save_the_movie"
             else:
                 self.data_recording(fps)
     return None
示例#5
0
    def run(self, fps):
        global PLACE_TO_CHECK_MOVEMENTS

        if self.flag_take_first_bg_image == True: # First bg-image has to be taken at the beginning
            self.take_bg_img()
            self.flag_take_first_bg_image = False

        self.color_image = cv.QueryFrame(self.cap_cam)

        #########################################################################################
        # Pre-processing
        #########################################################################################
        cv.Resize(self.color_image, self.color_image_resize)

        cv.CvtColor(self.color_image_resize, self.grey_image, cv.CV_RGB2GRAY)
        cv.Smooth(self.grey_image, self.grey_image, cv.CV_GAUSSIAN, 7, 0)       
        cv.Dilate(self.grey_image, self.grey_image, None, 2)
        cv.Erode(self.grey_image, self.grey_image, None, 2)

        #########################################################################################
        # Checking movements
        #########################################################################################
        if self.first_run:
            self.first_run = False
            cv.ConvertScale(self.grey_image, self.grey_avg, 1.0, 0.0)
        else:
            cv.RunningAvg(self.grey_image, self.grey_avg, 0.3, None)

        cv.ConvertScale(self.grey_avg, self.temp_grey, 1.0, 0.0) # Convert the scale of the moving average. (for Motion-detection)
        cv.AbsDiff(self.grey_image, self.temp_grey, self.diff_grey) # Minus the current frame from the moving average. (for Motion-detection)
        cv.Canny(self.diff_grey, self.diff_grey, 10, 15) # Find Edges for Motion-detection

        m_min_pt1, m_max_pt2, m_center_pt_list = self.get_points(self.diff_grey) # get some useful points from the movement's contours


        #########################################################################################
        # Get rid of changing blobs from the recognized blobs to figure out which blob is 'STILL'
        #########################################################################################
        if time() - self.last_blob_chk_time > self.still_blob_chk_interval:
            removal_idx = []
            for i in range(len(self.recognized_blobs)):
                recognized_time = self.recognized_blobs[i][1]
                bound_rect = self.recognized_blobs[i][2]
                cv.Zero(self.mask_img)
                curr_img_in_blob_location = cv.CloneImage(self.mask_img)
                diff_img = cv.CloneImage(self.mask_img)
                cv.Rectangle(self.mask_img, (bound_rect[0],bound_rect[1]), (bound_rect[0]+bound_rect[2],bound_rect[1]+bound_rect[3]), 255, cv.CV_FILLED)
                cv.Copy(self.grey_image, curr_img_in_blob_location, self.mask_img)
                cv.AbsDiff(self.recognized_blobs[i][0], curr_img_in_blob_location, diff_img)
                cv.Threshold(diff_img, diff_img, 30, 255, cv.CV_THRESH_BINARY)
                mat_diff_img = cv.GetMat(diff_img)
                moments = cv.Moments(mat_diff_img)
                overall_changed_area = cv.GetCentralMoment(moments, 0, 0)
                if overall_changed_area < 2000:
                    pass
                    '''
                    x = bound_rect[0]*2
                    y = bound_rect[1]*2
                    w = bound_rect[2]*2
                    h = bound_rect[3]*2
                    cv.Rectangle(self.color_image, (x,y), (x+w,y+h), (255,0,0), 2)
                    '''
                else:
                    removal_idx.append(i)

                if time() - recognized_time > self.max_still_blob_dur: # this blob is recognized over certain time
                    ### get little bigger image around the blob, then update the background - image
                    cv.Rectangle(self.mask_img, (bound_rect[0]-20,bound_rect[1]-20), (bound_rect[0]+bound_rect[2]+20,bound_rect[1]+bound_rect[3]+20), 255, cv.CV_FILLED)
                    cv.Zero(curr_img_in_blob_location)
                    new_bg_img = cv.CloneImage(curr_img_in_blob_location)
                    cv.Copy(self.grey_image, curr_img_in_blob_location, self.mask_img) # get the changed image part
                    cv.Not(self.mask_img, self.mask_img) # invert the mask image
                    cv.Copy(self.grey_bg_img, new_bg_img, self.mask_img) # get the unchanged bg-image
                    cv.Add(curr_img_in_blob_location, new_bg_img, self.grey_bg_img) # combine the above two into the bg-img
                    timestamp = get_time_stamp()
                    writeFile(LOG, '%s, Background was updated on Cam #%i'%(timestamp, self.cID))
                    '''
                    ### Save the changed bg-img
                    capture_file_name = "_changed_bg_img_cID%.2i_%s.jpg"%(self.cID, timestamp)
                    capture_file_path = os.path.join(self.cwd, self.output_folder, capture_file_name)
                    cv.SaveImage(capture_file_path, self.grey_bg_img)
                    '''
                    if i not in removal_idx:
                        removal_idx.append(i) # this blob info will be removed
            for i in range(len(removal_idx)): self.recognized_blobs.pop(removal_idx[i]-i)
            self.last_blob_chk_time = time()


        #########################################################################################
        # Foreground blob checking
        #########################################################################################
        foreground, fg_mask_img, fg_size = self.bg_subtraction(self.grey_image) # background subtraction

        f_min_pt1, f_max_pt2, f_center_pt_list = self.get_points(foreground) # get some useful points from the foregournd contours

        ### checking foreground blobs and register it
        number_of_fBlobs = 0
        grouped_points_for_fBlobs = []
        if len(f_center_pt_list) > 0:
            
            number_of_fBlobs, grouped_points_for_fBlobs = self.clustering(f_center_pt_list) # clustrering foreground-blobs
            if number_of_fBlobs < 2:
                if f_min_pt1 != [] and f_max_pt2 != []:
                    number_of_fBlobs = 1
                    x = f_min_pt1[0] + (f_max_pt2[0]-f_min_pt1[0])/2
                    y = f_min_pt1[1] + (f_max_pt2[1]-f_min_pt1[1])/2
                    grouped_points_for_fBlobs = [[[x, y]]]
                    if len(self.recognized_blobs) < self.max_blob_number:
                        bound_rect = (f_min_pt1[0], f_min_pt1[1], f_max_pt2[0]-f_min_pt1[0], f_max_pt2[1]-f_min_pt1[1])
                        self.chk_and_register_fBlob(bound_rect)
            else:
                for grouped_points in grouped_points_for_fBlobs:
                    if len(self.recognized_blobs) < self.max_blob_number:
                        bound_rect = cv.BoundingRect(grouped_points)
                        self.chk_and_register_fBlob(bound_rect)

        #########################################################################################
        # Movement Recording
        #########################################################################################
        if len(m_center_pt_list) > 0: # there was a rect indicating the movements
            whole_bounding_rect = (m_min_pt1[0], m_min_pt1[1], m_max_pt2[0]-m_min_pt1[0], m_max_pt2[1]-m_min_pt1[1])
            if self.min_th_for_movement < whole_bounding_rect[2] + whole_bounding_rect[3] < self.max_th_for_movement:
            # bounding rect for all the movement points is within the thresholds
                self.has_been_quiet_since = -1 # it's not quiet
                self.data_recording(fps) # data-recording

                ### Record movement detail with certain interval
                if time() > self.last_time_behavior_recorded + self.MR_interval:

                    ### Find out where the white blob is
                    whitish_area = -1
                    white_x = -1
                    white_y = -1
                    '''
                    ### Temporarily disabled. Not very useful at the moment.
                    if len(f_center_pt_list) > 0:
                        ### make the color image with the area left(= detected blob) after background subtraction
                        cv.Zero(self.color_image_for_masking)
                        cv.Copy(self.color_image_resize, self.color_image_for_masking, fg_mask_img)

                        ### save the masked color image for analysis purpose (Temporary)
                        #capture_file_path = os.path.join(self.cwd, self.output_folder, '_tmp', '%s.jpg'%get_time_stamp())
                        #cv.SaveImage(capture_file_path, self.color_image_for_masking)

                        ### extract whitish color
                        cv.CvtColor(self.color_image_for_masking, self.color_image_for_masking, cv.CV_BGR2HSV)
                        cv.InRangeS(self.color_image_for_masking, self.HSV_min, self.HSV_max, self.grey_image)
                        ### calculate its(whitish blob) size and position
                        mat_result_whitish_blob = cv.GetMat(self.grey_image)
                        moments = cv.Moments(mat_result_whitish_blob)
                        whitish_area = cv.GetCentralMoment(moments,0,0)
                        white_x = -1
                        white_y = -1
                        if whitish_area > 1000:
                            white_x = int(cv.GetSpatialMoment(moments, 1, 0)/whitish_area)
                            white_y = int(cv.GetSpatialMoment(moments, 0, 1)/whitish_area)
                    '''
                    ### make the movement-record
                    movement_rect = (m_min_pt1[0], m_min_pt1[1], m_max_pt2[0]-m_min_pt1[0], m_max_pt2[1]-m_min_pt1[1])
                    if len(f_min_pt1) != 0:
                        foreground_rect = (f_min_pt1[0], f_min_pt1[1], f_max_pt2[0]-f_min_pt1[0], f_max_pt2[1]-f_min_pt1[1])
                        fg_x = foreground_rect[0]+foreground_rect[2]/2
                        fg_y = foreground_rect[1]+foreground_rect[3]/2
                    else:
                        foreground_rect = '(-1/ -1/ -1/ -1)'
                        fg_x = fg_y = -1
                    writeFile(self.behavior_rec_path, "%s, %i/%i, %s, %i/%i, %i, %i, %i, %i/%i, %s, %s\n"%(
                                str(movement_rect).replace(",","/"), 
                                movement_rect[0]+movement_rect[2]/2, movement_rect[1]+movement_rect[3]/2, 
                                str(foreground_rect).replace(",","/"), 
                                fg_x, fg_y, number_of_fBlobs, int(fg_size),
                                int(whitish_area), white_x, white_y, 
                                str(grouped_points_for_fBlobs).replace(",", "/"), get_time_stamp()))
                    self.last_time_behavior_recorded = time()

                    ### Checking on order from main_module
                    if len(PLACE_TO_CHECK_MOVEMENTS) > 0:
                        if PLACE_TO_CHECK_MOVEMENTS == 'feeder':
                            if self.x_range_for_feeder_movements[0] < fg_x < self.x_range_for_feeder_movements[1]:
                                if self.y_range_for_feeder_movements[0] < fg_y < self.y_range_for_feeder_movements[1]:
                                    self.vi_conn.send("motion_at_feeder")
                                    PLACE_TO_CHECK_MOVEMENTS = ""
            else:
                self.flag_save_movie = self.quiet_now(fps)
        else:
            self.flag_save_movie = self.quiet_now(fps)

        if self.flag_recording_started == False: self.save_buffer_img() # if it's not recording, save the buffer image
示例#6
0
 def send(self, msg=''):
     self.aConn.write(msg) # send a message to Arduino
     sleep(0.2)
     self.aConn.flushOutput() # flush the serial connection
     self.log_file_path = update_log_file_path(self.output_folder, self.log_file_path)
     if os.path.isfile(self.log_file_path): writeFile(self.log_file_path, "%s, Message - '%s' was sent to Arduino"%(get_time_stamp(), msg))