Ejemplo n.º 1
0
    def process_frame(self, frame):
        self.frame_count += 1
        frame = cv2.GaussianBlur(frame, (3, 3), -1)
        display = frame.copy()

        fgmask = self.fgbg.apply(frame)
        diff = cv2.absdiff(fgmask, self.prev_background)
        self.prev_background = fgmask

        diff = cv2.morphologyEx(diff, cv2.MORPH_CLOSE, self.se)
        diff = cv2.morphologyEx(diff, cv2.MORPH_OPEN, self.se)

        timestamp = float(self.frame_count) / self.fps
        cv2.updateMotionHistory(diff, self.motion_history, timestamp,
                                MHI_DURATION)
        mseg_mask, mseg_bounds = cv2.segmentMotion(self.motion_history,
                                                   timestamp, MAX_TIME_DELTA)

        mseg_bounds = filter_inside(mseg_bounds, self.screen_area)

        if len(mseg_bounds) > 0:
            self.track_human(frame, mseg_bounds)

        people, visible = self.calculator.get_visible()
        draw_detections(display, visible, 3)
        return display, people
Ejemplo n.º 2
0
    def on_frame(self, frame):
        h, w = frame.shape[:2]
        qi = 0
        #print "on_frame %d x %d" % (h, w)
        frame_diff = cv2.absdiff(frame, self.prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
        ret, motion_mask = cv2.threshold(gray_diff, self._threshold, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, self.motion_history, timestamp, MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient(self.motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
        seg_mask, seg_bounds = cv2.segmentMotion(self.motion_history, timestamp, MAX_TIME_DELTA)

        centers = []
        rects = []
        draws = []
        for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
            x, y, rw, rh = rect
            area = rw*rh
            if area < 64**2:
                continue
            silh_roi   = motion_mask        [y:y+rh,x:x+rw]
            orient_roi = mg_orient          [y:y+rh,x:x+rw]
            mask_roi   = mg_mask            [y:y+rh,x:x+rw]
            mhi_roi    = self.motion_history[y:y+rh,x:x+rw]
            if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
                continue
            angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
            color = ((255, 0, 0), (0, 0, 255))[i == 0]
            if self._use_cv_gui:
                draws.append(lambda vis, rect=rect, angle=angle, color=color:
                                draw_motion_comp(vis, rect, angle, color))
            centers.append( (x+rw/2, y+rh/2) )
            rects.append(rect)

        self.tracker_group.update_trackers(centers, rects)

        #print 'Active trackers: %d' % len(trackers)
        #print 'Tracker score: %s' % ','.join(['%2d'%len(tracker.hits) for tracker in trackers])
        trackers = self.tracker_group.trackers
        cx, cy = None, None
        #print "#trackers = %d" % len(trackers)
        if len(trackers):
            first_tracker = trackers[0]
            cx, cy = center_after_median_threshold(frame, first_tracker.rect)
            cv2.circle(frame, (cx, cy), 5, (255, 255, 255), 3)
        print str(qi)*5; qi += 1
        print self._on_cx_cy
        self._on_cx_cy(cx, cy) # gives None's for no identified balloon
        print str(qi)*5; qi += 1

        if self._use_cv_gui:
            self.on_frame_cv_gui(frame, draws, (cx, cy))
        else:
            self.frame_vis(frame, draws, (cx, cy))

        #time.sleep(0.5)
        self.prev_frame = frame.copy()
        # TODO - print visualization onto image
        return frame
Ejemplo n.º 3
0
 def process_motions(self, seg_bounds):
     mg_mask, mg_orient = cv2.calcMotionGradient( self.motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
     seg_mask, seg_bounds = cv2.segmentMotion(self.motion_history, timestamp, MAX_TIME_DELTA)
     for i, rect in enumerate([(0, 0, self.w, self.h)] + list(seg_bounds)):
         x, y, rw, rh = rect
         area = rw*rh
         if area < 64**2:
             continue
         silh_roi   = motion_mask   [y:y+rh,x:x+rw]
         orient_roi = mg_orient     [y:y+rh,x:x+rw]
         mask_roi   = mg_mask       [y:y+rh,x:x+rw]
         mhi_roi    = self.motion_history[y:y+rh,x:x+rw]
         if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
             continue
         angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
         color = ((255, 0, 0), (0, 0, 255))[i == 0]
         draw_motion_comp(self.vis, rect, angle, color)
Ejemplo n.º 4
0
    def detect(self):
        # print('detect...')
        # visuals = ['input', 'frame_diff', 'motion_hist', 'grad_orient']
        # cv2.createTrackbar('visual', 'motempl', 2, len(visuals)-1, nothing)
        # cv2.createTrackbar('threshold', 'motempl', DEFAULT_THRESHOLD, 255, nothing)
        height, width = self.frame.shape[:2]
        frame_diff = cv2.absdiff(self.frame, self.prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
        thrs = self.threshold_spin.value()
        # thrs = cv2.getTrackbarPos('threshold', 'motempl')
        ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, self.motion_history, timestamp, self.MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient(self.motion_history, self.MAX_TIME_DELTA, self.MIN_TIME_DELTA, apertureSize=5)
        seg_mask, seg_bounds = cv2.segmentMotion(self.motion_history, timestamp, self.MAX_TIME_DELTA)

        for radio in (self.visual_radio_1, self.visual_radio_2, self.visual_radio_3, self.visual_radio_4):
            if radio.isChecked():
                visual_name = str(radio.text())
                break

        # visual_name = visuals[cv2.getTrackbarPos('visual', 'motempl')]
        # visual_name = 'input'
        if visual_name == 'input':
            vis = self.frame.copy()
        elif visual_name == 'frame_diff':
            vis = frame_diff.copy()
        elif visual_name == 'motion_hist':
            vis = np.uint8(np.clip((self.motion_history-(timestamp-self.MHI_DURATION)) / self.MHI_DURATION, 0, 1)*255)
            vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
        elif visual_name == 'grad_orient':
            self.hsv[:, :, 0] = mg_orient/2
            self.hsv[:, :, 2] = mg_mask*255
            vis = cv2.cvtColor(self.hsv, cv2.COLOR_HSV2BGR)

        for i, rect in enumerate([(0, 0, width, height)] + list(seg_bounds)):
            x, y, rw, rh = rect
            area = rw*rh
            if area < 64**2:
                continue
            silh_roi = motion_mask[y:y+rh, x:x+rw]
            orient_roi = mg_orient[y:y+rh, x:x+rw]
            mask_roi = mg_mask[y:y+rh, x:x+rw]
            mhi_roi = self.motion_history[y:y+rh, x:x+rw]
            if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
                continue
            angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, self.MHI_DURATION)
            color = ((255, 0, 0), (0, 0, 255))[i == 0]
            self.draw_motion_comp(vis, rect, angle, color)
            if i == 0:
                # 检测到目标运动
                if self.record_check.isChecked():
                    self.monitor_last_shoot = clock()
                    if not self.is_recording:
                        self.start_record()
                elif self.shoot_check.isChecked():
                    # print(self.monitor_last_shoot)
                    # print(clock())
                    delay = self.shoot_delay_spin.value()
                    if (not self.monitor_last_shoot) or (clock() - self.monitor_last_shoot >= delay):
                        self.shoot()
                        self.monitor_last_shoot = clock()
                if self.sound_check.isChecked():
                    self.play_sound()

        self.draw_str(vis, (20, 20), visual_name)

        self.prev_frame = self.frame.copy()
        return vis
def video_feature_extraction_save(videoName, featureWriter, case, MIN_TIME_DELTA,MAX_TIME_DELTA,MHI_DURATION,THRESH_VALUE,DISPLAY=False): 
    cv2.namedWindow('rat activity recognition')
    visuals = ['input', 'frame_diff', 'motion_hist', 'grad_orient']
    # use MHI features (motion history intensity)
    visual_name = visuals[2]
  
    cam = cv2.VideoCapture(videoName)
    video_len = cam.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
    ret, frame = cam.read()
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:,:,1] = 255
    ii = 0         
    while (ii<video_len):
        ii += 1
        ret, frame = cam.read()
        frame_diff = cv2.absdiff(frame, prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
     
        ret, motion_mask = cv2.threshold(gray_diff, THRESH_VALUE, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient(motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
        seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

        if visual_name == 'input':
            vis = frame.copy()
        elif visual_name == 'frame_diff':
            vis = frame_diff.copy()
        elif visual_name == 'motion_hist':
            vis0 = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            junk,mei0 = cv2.threshold(vis0,1,255,cv2.THRESH_BINARY)

        elif visual_name == 'grad_orient':
            hsv[:,:,0] = mg_orient/2
            hsv[:,:,2] = mg_mask*255
            vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
            
        ## Compute features
        M1 = cv2.moments(mei0)
        M2 = cv2.moments(vis0)    
        Hu1 = cv2.HuMoments(M1)
        Hu2 = cv2.HuMoments(M2)
        
        smallNum = [1e-200]*7 
        Hu1 = Hu1 + smallNum
        Hu2 = Hu2 + smallNum
        
        Hu1 = np.sign(Hu1)*np.log10(np.abs(Hu1))
        Hu2 = np.sign(Hu2)*np.log10(np.abs(Hu2))
        
        if M1['m00']!=0:
            cx1 = M1['m10']/M1['m00']
            cy1 = M1['m01']/M1['m00']
        else:
            cx1 = 0;
            cy1 = 0; 
            
        if M2['m00']!=0:
            cx2 = M2['m10']/M2['m00']
            cy2 = M2['m01']/M2['m00']
        else:
            cx2 = 0;
            cy2 = 0;     
                                       
        meiSize = np.count_nonzero(mei0);
        
        if meiSize == 0:
            corner1 = 0
            corner2 = 0
            corner3 = 0
            corner4 = 0
            height = 0
            width = 0
            extend = 0
        else:
            indices = np.nonzero(mei0)
            corner1 = max(indices[0])
            corner2 = min(indices[0])
            corner3 = max(indices[1])
            corner4 = min(indices[1])
            height = corner1 - corner2+1
            width = corner3 - corner4+1
            extend = meiSize/float(height*width)
        
        features = [Hu1[0][0],Hu1[1][0],Hu1[2][0],Hu1[3][0],Hu1[4][0],Hu1[5][0],Hu1[6][0],
                    Hu2[0][0],Hu2[1][0],Hu2[2][0],Hu2[3][0],Hu2[4][0],Hu2[5][0],Hu2[6][0],
                    cx1, cy1, cx2, cy2, meiSize, corner1, corner2, corner3,corner4, height, width, extend, case]
      
        zeroFeatures = [-200]*14
        #zeroFeatures = [0]*14
        if case == 6:# Rest case
            featureWriter.writerow(features)
        else:
            if features[0:len(features)-6] != zeroFeatures:
                featureWriter.writerow(features)
       
        prev_frame = frame.copy()
                            
        if DISPLAY:
            #draw_str(vis, (20, 20), visual_name)
            vis = cv2.cvtColor(vis0, cv2.COLOR_GRAY2BGR)
            mei = cv2.cvtColor(mei0, cv2.COLOR_GRAY2BGR)
            cv2.imshow('MHI', vis)
            cv2.imshow('MEI', mei)
            cv2.imshow('Video',frame)
    
            if 0xff & cv2.waitKey(50) == 27:
                break
            
    cam.release()
    cv2.destroyAllWindows()
Ejemplo n.º 6
0
    ret, frame = cam.read()
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:,:,1] = 255
    while True:
        ret, frame = cam.read()
        frame_diff = cv2.absdiff(frame, prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
        thrs = cv2.getTrackbarPos('threshold', 'motempl')
        ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient( motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
        seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

        visual_name = visuals[cv2.getTrackbarPos('visual', 'motempl')]
        if visual_name == 'input':
            vis = frame.copy()
        elif visual_name == 'frame_diff':
            vis = frame_diff.copy()
        elif visual_name == 'motion_hist':
            vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
        elif visual_name == 'grad_orient':
            hsv[:,:,0] = mg_orient/2
            hsv[:,:,2] = mg_mask*255
            vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

        for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
Ejemplo n.º 7
0
def main(args):
    if args.in_file:
        try: 
            inf = int(args.in_file)
        except ValueError: 
            inf = args.in_file
    else: 
        inf = 0
        
    cam = cv2.VideoCapture(inf)
    imgHeight = int(cam.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
    imgWidth = int(cam.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
    comb_vis = np.zeros((imgHeight, 2*imgWidth), np.uint8)
    
    setup(imgWidth, imgHeight)

    motion_history = np.zeros((imgHeight, imgWidth), np.float32)
    
    s = Splatter((imgHeight, imgWidth), 'splat_db')
    
    pause_time = 0
    start_time = time.time()
    ret, t0 = cam.read() # First two frames
    ret, t = cam.read()
    if args.flip:
        t0 = cv2.flip(t0, flipCode=1)
        t = cv2.flip(t, flipCode=1)

    if args.out_file:
        h, w = t.shape[:2]
        if not args.dont_show_capture: w *= 2
        video = cv2.VideoWriter(args.out_file, cv2.cv.CV_FOURCC('X','V','I','D'), 10, (w, h))
        # video = cv2.VideoWriter(args.out_file, -1, 10, (w, h))

    while ret:
        thresh = cv2.getTrackbarPos(ThreshTrackbar, controlTrackbars)
        mhi_duration = cv2.getTrackbarPos(MhiDurationTrackbar, controlTrackbars) / 10.0
        max_time_delta = cv2.getTrackbarPos(MaxTimeDeltaTrackbar, controlTrackbars) / 10.0
        sqrt_rect_area = cv2.getTrackbarPos(MinSqrtRectAreaTrackbar, controlTrackbars)
        
        diff = cv2.absdiff(t, t0)
        gray_diff = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
        gray_diff = cv2.medianBlur(gray_diff, 5)
        ret, mask = cv2.threshold(gray_diff, thresh, 255, cv2.THRESH_BINARY)
        vis = s.splat_mask(mask) | s.splat_mask(~mask)
        
        timestamp = time.clock()
        cv2.updateMotionHistory(mask, motion_history, timestamp, mhi_duration)
        seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, max_time_delta)
        
        rects = [rect for rect in seg_bounds if rect[2] * rect[3] > sqrt_rect_area ** 2]
        for rect in rects:
            x, y, w, h = rect
            cv2.rectangle(t, (x, y), (x+w, y+h), (0, 255, 0))
        vis |= perturber.c_p_p(mask, rects)
        
        if not args.dont_show_capture:
            vis = utils.combine_images(vis, t)
        
        cv2.imshow(winName, vis)
        if args.out_file:
            video.write(vis if len(vis.shape) == 3 else cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR))
        t0 = t
        ret, t = cam.read()
        if args.flip:
            t = cv2.flip(t, flipCode=1)
        key = cv2.waitKey(40)
        if key == ord('q'):
            cv2.destroyAllWindows()
            break
        elif key == ord('s'):
            if cv2.waitKey() == ord('p'):
                cv2.imwrite('res.png', mask)
    
    cam.release()

    if args.out_file:
        video.release()
Ejemplo n.º 8
0
    if not successFlag:
        break

    frameDiff = cv2.absdiff(lastFrame, frame)
    greyDiff = cv2.cvtColor(frameDiff, code=cv2.COLOR_BGR2GRAY)

    retval, motionMask = cv2.threshold(greyDiff, 20, 1, cv2.THRESH_BINARY)

    timestamp = time.clock()
    cv2.updateMotionHistory(motionMask, motionHistory, timestamp, 0.5)
    mg_mask, mg_orient = cv2.calcMotionGradient(motionHistory,
                                                0.25,
                                                0.05,
                                                apertureSize=5)
    seg_mask, seg_bounds = cv2.segmentMotion(motionHistory, timestamp, 0.25)

    surf = cv2.SURF()
    grey = cv2.cvtColor(frame, code=cv2.COLOR_BGR2GRAY)
    mask = numpy.uint8(numpy.ones(grey.shape))
    points = surf.detect(grey, mask)

    vis = cv2.cvtColor(grey, cv2.COLOR_GRAY2BGR)

    for p in points[::10]:
        cv2.circle(vis, (int(p.pt[0]), int(p.pt[1])), 2, (0, 255, 0), -1)
        cv2.circle(vis, (int(p.pt[0]), int(p.pt[1])), int(p.size), (0, 255, 0),
                   2)

    total = sum(sum(motionHistory)) / 8 * 255
    print "movement: ", total
def doTestRealTime():
    global saveTrainFeaturesDes, visuals, gSkipFrames, testData,testLabels

    print("Testing on real time video")
    cv2.namedWindow('Output')
    cv2.createTrackbar('visual', 'Output', 2, len(visuals)-1, nothing)
    cv2.createTrackbar('threshold', 'Output', DEFAULT_THRESHOLD, 255, nothing)

    cv2.namedWindow('Output7')
    cv2.createTrackbar('visual', 'Output7', 2, len(visuals)-1, nothing)
    cv2.createTrackbar('threshold', 'Output7', DEFAULT_THRESHOLD, 255, nothing)

    cv2.namedWindow('Output14')
    cv2.createTrackbar('visual', 'Output14', 2, len(visuals)-1, nothing)
    cv2.createTrackbar('threshold', 'Output14', DEFAULT_THRESHOLD, 255, nothing)
    testData = []
    testLabels = []

    printPageBreak()
    print("\n\nProcessing real time video..")

    frameCounter = 0
    cam = cv2.VideoCapture(0)

    ret, frame = cam.read()
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:,:,1] = 255
    frameCounter = frameCounter +1
    subFrameCount = 0
    skipFrames = 1

    prev_frame7 = frame.copy()
    motion_history7 = np.zeros((h, w), np.float32)
    # print(motion_history7)
    hsv7 = np.zeros((h, w, 3), np.uint8)
    hsv7[:,:,1] = 255

    prev_frame14 = frame.copy()
    motion_history14 = np.zeros((h, w), np.float32)
    # print(motion_history)
    hsv14 = np.zeros((h, w, 3), np.uint8)
    hsv14[:,:,1] = 255
    frameCounter =0

    # totalFrames = int(cam.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    frameCounter = frameCounter +1
    # print(totalFrames)

    subFrameCount = 0
    skipFrames = 1

    subFrameCount7 = 0
    skipFrames7 = 7

    subFrameCount14 = 0
    skipFrames14 = 14

    featureDesSAHMIS = None
    bowDescriptorSAHMIS = None

    timestamp = clock()
    timestamp7 = clock()
    timestamp14 = clock()


    label = ""

    ##Read all frames
    while(True):
        ret, frame = cam.read()
        frame7 = frame.copy()
        frame14 = frame.copy()

        if(subFrameCount == 0):
            frame_diff = cv2.absdiff(frame, prev_frame)
            prev_frame = frame.copy()
            gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
            thrs = cv2.getTrackbarPos('threshold', 'Output')
            ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
            timestamp = clock()
            cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)

            mg_mask, mg_orient = cv2.calcMotionGradient( motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
            seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

            visual_name = visuals[cv2.getTrackbarPos('visual', 'Output')]
            if visual_name == 'input':
                vis = frame.copy()
            elif visual_name == 'frame_diff':
                vis = frame_diff.copy()
            elif visual_name == 'motion_hist':
                vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
            elif visual_name == 'grad_orient':
                hsv[:,:,0] = mg_orient/2
                hsv[:,:,2] = mg_mask*255
                vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

            for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
                x, y, rw, rh = rect
                area = rw*rh
                if area < 64**2:
                    continue
                silh_roi   = motion_mask   [y:y+rh,x:x+rw]
                orient_roi = mg_orient     [y:y+rh,x:x+rw]
                mask_roi   = mg_mask       [y:y+rh,x:x+rw]
                mhi_roi    = motion_history[y:y+rh,x:x+rw]
                if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
                    continue
                angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
                color = ((255, 0, 0), (0, 0, 255))[i == 0]
                draw_motion_comp(vis, rect, angle, color)

            visCopy = vis.copy()
            draw_str(visCopy, (20, 20), visual_name)
            cv2.imshow('Output', visCopy)

        if(subFrameCount7 == 0):
            frame_diff7 = cv2.absdiff(frame7, prev_frame7)
            prev_frame7 = frame7.copy()
            gray_diff7 = cv2.cvtColor(frame_diff7, cv2.COLOR_BGR2GRAY)
            thrs =DEFAULT_THRESHOLD
            ret7, motion_mask7 = cv2.threshold(gray_diff7, thrs, 1, cv2.THRESH_BINARY)
            timestamp7 = clock()
            cv2.updateMotionHistory(motion_mask7, motion_history7, timestamp7, MHI_DURATION)

            mg_mask7, mg_orient7 = cv2.calcMotionGradient( motion_history7, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
            seg_mask7, seg_bounds7 = cv2.segmentMotion(motion_history7, timestamp7, MAX_TIME_DELTA)

            visual_name = 'motion_hist' #visuals[cv2.getTrackbarPos('visual', 'Output')]
            if visual_name == 'input':
                vis7 = frame7.copy()
            elif visual_name == 'frame_diff':
                vis7 = frame_diff7.copy()
            elif visual_name == 'motion_hist':
                vis7 = np.uint8(np.clip((motion_history7-(timestamp7-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                vis7 = cv2.cvtColor(vis7, cv2.COLOR_GRAY2BGR)
            elif visual_name == 'grad_orient':
                hsv7[:,:,0] = mg_orient7/2
                hsv7[:,:,2] = mg_mask7*255
                vis7 = cv2.cvtColor(hsv7, cv2.COLOR_HSV2BGR)

            visCopy7 = vis7.copy()
            draw_str(visCopy7, (20, 20), visual_name)
            cv2.imshow('Output7', visCopy7)


        if(subFrameCount14 == 0):
            frame_diff14 = cv2.absdiff(frame14, prev_frame14)
            prev_frame14 = frame14.copy()
            gray_diff14 = cv2.cvtColor(frame_diff14, cv2.COLOR_BGR2GRAY)
            thrs = DEFAULT_THRESHOLD
            ret14, motion_mask14 = cv2.threshold(gray_diff14, thrs, 1, cv2.THRESH_BINARY)
            timestamp14 = clock()
            cv2.updateMotionHistory(motion_mask14, motion_history14, timestamp14, MHI_DURATION)

            mg_mask14, mg_orient14 = cv2.calcMotionGradient( motion_history14, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
            seg_mask14, seg_bounds14 = cv2.segmentMotion(motion_history14, timestamp14, MAX_TIME_DELTA)

            visual_name = 'motion_hist' #visuals[cv2.getTrackbarPos('visual', 'Output')]
            if visual_name == 'input':
                vis14 = frame14.copy()
            elif visual_name == 'frame_diff':
                vis14 = frame_diff14.copy()
            elif visual_name == 'motion_hist':
                vis14 = np.uint8(np.clip((motion_history14-(timestamp14-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                vis14 = cv2.cvtColor(vis14, cv2.COLOR_GRAY2BGR)
            elif visual_name == 'grad_orient':
                hsv14[:,:,0] = mg_orient14/2
                hsv14[:,:,2] = mg_mask14*255
                vis14 = cv2.cvtColor(hsv14, cv2.COLOR_HSV2BGR)

            visCopy14 = vis14.copy()
            draw_str(visCopy14, (20, 20), visual_name)
            cv2.imshow('Output14', visCopy14)

        # cv2.waitKey(0)
        hasAtleastOneKetPoint = False

        if(subFrameCount == 0):
            mhi_vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            mhi_vis = cv2.cvtColor(mhi_vis, cv2.COLOR_GRAY2BGR)
            kp = detector.detect(mhi_vis)
            print('KeyPoints Length:: ',len(kp))
            if len(kp) > 0:
                features = extractor.compute(mhi_vis,kp)
                featuresDes = features[1]
                hasAtleastOneKetPoint = True
                bowDescriptor = bowDE.compute(mhi_vis, kp)

                if(bowDescriptorSAHMIS is None):
                    bowDescriptorSAHMIS = bowDescriptor
                else:
                    bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor)#,axis=0)

                ##Check if the operation on training data or test data
                # testData.append(bowDescriptor)
                # testLabels.append(label)
            else:
                print("No Key points were detectected for this image..")

        if(subFrameCount7 == 0):
            mhi_vis7 = np.uint8(np.clip((motion_history7-(timestamp7-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            mhi_vis7 = cv2.cvtColor(mhi_vis7, cv2.COLOR_GRAY2BGR)
            kp7 = detector.detect(mhi_vis7)
            print('KeyPoints7 Length:: ',len(kp7))
            if len(kp7) > 0:
                features7 = extractor.compute(mhi_vis7,kp7)
                featuresDes7 = features7[1]
                hasAtleastOneKetPoint = True
                bowDescriptor7 = bowDE.compute(mhi_vis7, kp7)
                if(bowDescriptorSAHMIS is None):
                    bowDescriptorSAHMIS = bowDescriptor7
                else:
                    bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor7)#,axis=0)

                ##Check if the operation on training data or test data
                # testData.append(bowDescriptor7)
            else:
                print("No Key points were detectected for this image7.")


        if(subFrameCount14 == 0):
            mhi_vis14 = np.uint8(np.clip((motion_history14-(timestamp14-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            mhi_vis14 = cv2.cvtColor(mhi_vis14, cv2.COLOR_GRAY2BGR)
            kp14 = detector.detect(mhi_vis14)
            print('KeyPoints14 Length:: ',len(kp14))
            if len(kp14) > 0:
                features14 = extractor.compute(mhi_vis14,kp14)
                featuresDes14 = features14[1]
                hasAtleastOneKetPoint = True
                bowDescriptor14 = bowDE.compute(mhi_vis14, kp14)
                if(bowDescriptorSAHMIS is None):
                    bowDescriptorSAHMIS = bowDescriptor14
                else:
                    bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor14)#,axis=0)

                ##Check if the operation on training data or test data
                # testData.append(bowDescriptor14)
                # testLabels.append(label)
            else:
                print("No Key points were detectected for this image14..")


        ##Start extracting features
        sift = cv2.SIFT()
        denseDetector = cv2.FeatureDetector_create(sDetector) ##using Dense Feature Detector

        ##Check if there are any detected keypoints before processing.
        if(subFrameCount14 == 0 and hasAtleastOneKetPoint):# or frameCounter > 30):
            testData.append(bowDescriptorSAHMIS)
            testData = np.float32(testData).reshape(-1,3*dictSize)
            # testLabels = np.float32(testLabels).reshape(-1,1)
            print("testData Shape: ", testData.shape)
            # print("testLabels Shape: ", testLabels.shape)
            print("testData : ", testData)
            # print("testLabels : ", testLabels)

            result = classifier.predict_all(testData)

            #######   Check Accuracy   ########################
            printPageBreak()
            printPageBreak()

            # print("TestLabels: ", testLabels.reshape(1,-1))
            print("Results: ", result.reshape(1,-1));
            frameCounter = 0
            testData = []
            testLabels = []
            featureDesSAHMIS = None
            bowDescriptorSAHMIS = None

        draw_str(frame, (20, 20), label)
        cv2.imshow('Output', visCopy)
        cv2.imshow('Input', frame)
        # cv2.imwrite('keyPoints\\'+file.split("\\")[2]+'_keypoints.jpg',img)
        cv2.waitKey(25)

        ##Update counters
        subFrameCount = subFrameCount + 1
        subFrameCount7 = subFrameCount7 + 1
        subFrameCount14 = subFrameCount14 + 1

        if(subFrameCount14 > skipFrames14):
            subFrameCount = 0
            subFrameCount7 = 0
            subFrameCount14 = 0
            frameCounter = 0
        frameCounter = frameCounter + 1

    cv2.destroyAllWindows()
def feature_extraction_fullVideo(
    videoName,
    startFrame,
    maskRegion,
    featureWriter,
    videoOutput,
    MIN_TIME_DELTA,
    MAX_TIME_DELTA,
    MHI_DURATION,
    THRESH_VALUE,
    DISPLAY=False,
):
    cv2.namedWindow("rat activity recognition")
    visuals = ["input", "frame_diff", "motion_hist", "grad_orient"]
    # use MHI features (motion history intensity)
    visual_name = visuals[2]

    cam = cv2.VideoCapture(videoName)
    video_len = cam.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
    cam.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, startFrame)

    ret, frame = cam.read()
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:, :, 1] = 255

    # fps = 15.0
    # fourcc = cv2.cv.CV_FOURCC(*'XVID')

    # outputVideoName = "activityRecognitionResults.avi";
    # VideoOutput = cv2.VideoWriter(outputVideoName,fourcc, fps, (w,h))

    frame_base = extract_background(videoName, startFrame)
    frame_base = cv2.GaussianBlur(frame_base, (3, 3), 0)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    # cam.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,ii)

    # for ii in range(int(startFrame)+1,int(video_len)-10):
    for ii in range(1, 3000):
        ret, frame = cam.read()
        ## Mouse segmentation
        mouse, mFeat = segmentation_frame(frame, frame_base, mask, kernel)

        frame_results = cv2.add(frame, mouse)

        frame_diff = cv2.absdiff(frame, prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)

        ret, motion_mask = cv2.threshold(gray_diff, THRESH_VALUE, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient(motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5)
        seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

        if visual_name == "input":
            vis = frame.copy()
        elif visual_name == "frame_diff":
            vis = frame_diff.copy()
        elif visual_name == "motion_hist":
            vis0 = np.uint8(np.clip((motion_history - (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
            junk, mei0 = cv2.threshold(vis0, 1, 255, cv2.THRESH_BINARY)

            vis0 = vis0 * maskRegion
            mei0 = mei0 * scipy.sign(maskRegion)

        elif visual_name == "grad_orient":
            hsv[:, :, 0] = mg_orient / 2
            hsv[:, :, 2] = mg_mask * 255
            vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

        ## Compute features
        M1 = cv2.moments(mei0)
        M2 = cv2.moments(vis0)
        Hu1 = cv2.HuMoments(M1)
        Hu2 = cv2.HuMoments(M2)

        smallNum = [1e-200] * 7
        Hu1 = Hu1 + smallNum
        Hu2 = Hu2 + smallNum

        Hu1 = np.sign(Hu1) * np.log10(np.abs(Hu1))
        Hu2 = np.sign(Hu2) * np.log10(np.abs(Hu2))

        if M1["m00"] != 0:
            cx1 = M1["m10"] / M1["m00"]
            cy1 = M1["m01"] / M1["m00"]
        else:
            cx1 = 0
            cy1 = 0

        if M2["m00"] != 0:
            cx2 = M2["m10"] / M2["m00"]
            cy2 = M2["m01"] / M2["m00"]
        else:
            cx2 = 0
            cy2 = 0

        meiSize = np.count_nonzero(mei0)

        if meiSize == 0:
            corner1 = 0
            corner2 = 0
            corner3 = 0
            corner4 = 0
            height = 0
            width = 0
            extend = 0
        else:
            indices = np.nonzero(mei0)
            corner1 = max(indices[0])
            corner2 = min(indices[0])
            corner3 = max(indices[1])
            corner4 = min(indices[1])
            height = corner1 - corner2 + 1
            width = corner3 - corner4 + 1
            extend = meiSize / float(height * width)

        features = [
            ii,
            Hu1[0][0],
            Hu1[1][0],
            Hu1[2][0],
            Hu1[3][0],
            Hu1[4][0],
            Hu1[5][0],
            Hu1[6][0],
            Hu2[0][0],
            Hu2[1][0],
            Hu2[2][0],
            Hu2[3][0],
            Hu2[4][0],
            Hu2[5][0],
            Hu2[6][0],
            cx1,
            cy1,
            cx2,
            cy2,
            meiSize,
            corner1,
            corner2,
            corner3,
            corner4,
            height,
            width,
            extend,
            mFeat[0],
            mFeat[1],
            mFeat[2],
            mFeat[3],
            mFeat[4],
            mFeat[5],
            mFeat[6],
            mFeat[7],
            mFeat[8],
            mFeat[9],
            mFeat[10],
        ]

        featureWriter.writerow(features)

        prev_frame = frame.copy()

        vis = cv2.cvtColor(vis0, cv2.COLOR_GRAY2BGR)
        mei = cv2.cvtColor(mei0, cv2.COLOR_GRAY2BGR)

        frameDist0 = np.concatenate((frame_results, vis), axis=0)
        frameDist = frameDist0[::2, ::2, :]
        videoOutput.write(frameDist)

        if DISPLAY:
            cv2.imshow("Video", frameDist)
            if 0xFF & cv2.waitKey(1) == 27:
                break

    cam.release()
    cv2.destroyAllWindows()
while 1:
    successFlag, frame = video.read()

    if not successFlag:
        break

    frameDiff = cv2.absdiff(lastFrame, frame)
    greyDiff = cv2.cvtColor(frameDiff, code=cv2.COLOR_BGR2GRAY)

    retval, motionMask = cv2.threshold(greyDiff,10,1,cv2.THRESH_BINARY)

    timestamp = time.clock()
    cv2.updateMotionHistory(motionMask, motionHistory, timestamp, 0.5) #Updates the motion history image by a moving silhouette.
    mg_mask, mg_orient = cv2.calcMotionGradient( motionHistory, 0.25, 0.05, apertureSize=5 ) #to calculate gradient orientation of a motion history image at each pixel.
    seg_mask, seg_bounds = cv2.segmentMotion(motionHistory, timestamp, 0.25) #Splits a motion history image into a few parts corresponding to separate independent motions

    #print motionHistory

    total = sum(sum(motionHistory))/8
    movement.append(total)

    cv2.imshow("My Window", motionHistory)
    c = cv2.waitKey(7) % 0x100
    if c == 27:
        break

    lastFrame = frame.copy()

xindex = range(len(movement))
scatterplot(xindex, movement)
    def process_image(self, cv_image):
        if self.motion_history == None:
            self.h, self.w = cv_image.shape[:2]
            self.prev_frame = cv_image.copy()
            self.motion_history = np.zeros((self.h, self.w), np.float32)
            self.hsv = np.zeros((self.h, self.w, 3), np.uint8)
            self.hsv[:,:,1] = 255
            self.erode_kernel = cv2.getStructuringElement(cv2.MORPH_ERODE,(3,3))
        
        color_frame = cv_image.copy()
        frame_diff = cv2.absdiff(color_frame, self.prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
        
        thresh = cv2.getTrackbarPos('threshold', self.node_name)
        
        ret, motion_mask = cv2.threshold(gray_diff, thresh, 1, cv2.THRESH_BINARY)
        
        motion_mask = cv2.erode(motion_mask, self.erode_kernel, iterations=2)
        motion_mask = cv2.dilate(motion_mask, self.erode_kernel, iterations=2)
                
        timestamp = clock()
        
        cv2.updateMotionHistory(motion_mask, self.motion_history, timestamp, MHI_DURATION)

        mg_mask, mg_orient = cv2.calcMotionGradient(self.motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
        seg_mask, seg_bounds = cv2.segmentMotion(self.motion_history, timestamp, MAX_TIME_DELTA)

        visual_name = self.visuals[cv2.getTrackbarPos('visual', self.node_name)]
        if visual_name == 'input':
            vis = cv_image.copy()
        elif visual_name == 'frame_diff':
            vis = frame_diff.copy()
        elif visual_name == 'motion_hist':
            vis = np.uint8(np.clip((self.motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
        elif visual_name == 'motion_hist_color':
            vis = np.uint8(np.clip((self.motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
        elif visual_name == 'grad_orient':
            self.hsv[:,:,0] = mg_orient/2
            self.hsv[:,:,2] = mg_mask*255
            vis = cv2.cvtColor(self.hsv, cv2.COLOR_HSV2BGR)

        max_rect_area = 0
        
        for i, rect in enumerate([(0, 0, self.w, self.h)] + list(seg_bounds)):
            x, y, rw, rh = rect
            area = rw*rh
            if area < 64**2:
                continue
            if area < 640*480 and area > max_rect_area:
                max_rect_area = area
                max_rect = rect
            silh_roi   = motion_mask   [y:y+rh,x:x+rw]
            orient_roi = mg_orient     [y:y+rh,x:x+rw]
            mask_roi   = mg_mask       [y:y+rh,x:x+rw]
            mhi_roi    = self.motion_history[y:y+rh,x:x+rw]
            if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
                continue
            angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
            color = ((255, 0, 0), (0, 0, 255))[i == 0]
            #draw_motion_comp(vis, rect, angle, color)

        #draw_str(vis, (20, 20), visual_name)

        display_image = cv_image.copy()


        if max_rect_area != 0:
            x, y, w, h = max_rect
            display = color_frame[y:y+h,x:x+w] 
#            #bounding_box = cv2.boundingRect(vis)
#            #print bounding_box
#        
            if visual_name == 'motion_hist':
                display = vis.copy()
            else:
                display = cv2.bitwise_and(color_frame, vis, vis)

            draw_str(vis, (20, 20), visual_name)
            
            contour_image = vis.copy()
            
            contours, hierarchy = cv2.findContours(contour_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            
            #ty:
            contour_points = list()
            
            if len(contours) != 0:
                for cnt in contours:
                    contour_points.append(cnt)
                
                vstack_points = np.vstack(contour_points)
                if len(vstack_points) > 5:
                    z_ellipse = cv2.fitEllipse(vstack_points)
                    cv2.ellipse(display_image, z_ellipse, (0,255,0), 2)
        
                cv2.drawContours(display_image, contours, -1, (0,255,0), 3)
            
            cv2.imshow("Contours", display_image)

        self.prev_frame = color_frame
        
        #cv2.waitKey(5)
                
        return cv_image
Ejemplo n.º 13
0
successFlag, frame = video.read()

lastFrame = frame.copy()

h, w = frame.shape[:2]
motionHistory = numpy.zeros((h, w), numpy.float32)

while 1:
    successFlag, frame = video.read()

    if not successFlag:
        break

    frameDiff = cv2.absdiff(lastFrame, frame)
    greyDiff = cv2.cvtColor(frameDiff, code=cv2.COLOR_BGR2GRAY)

    retval, motionMask = cv2.threshold(greyDiff,10,1,cv2.THRESH_BINARY)

    timestamp = time.clock()
    cv2.updateMotionHistory(motionMask, motionHistory, timestamp, 0.5)
    mg_mask, mg_orient = cv2.calcMotionGradient( motionHistory, 0.25, 0.05, apertureSize=5 )
    seg_mask, seg_bounds = cv2.segmentMotion(motionHistory, timestamp, 0.25)

    total = sum(sum(motionHistory))/8
    print "movement: ", total

    cv2.imshow("My Window", motionHistory)
    cv2.waitKey(1)

    lastFrame = frame.copy()
Ejemplo n.º 14
0
    def process_image(self, cv_image):
        if self.motion_history == None:
            self.h, self.w = cv_image.shape[:2]
            self.prev_frame = cv_image.copy()
            self.motion_history = np.zeros((self.h, self.w), np.float32)
            self.hsv = np.zeros((self.h, self.w, 3), np.uint8)
            self.hsv[:, :, 1] = 255
            self.erode_kernel = cv2.getStructuringElement(
                cv2.MORPH_ERODE, (3, 3))

        color_frame = cv_image.copy()
        frame_diff = cv2.absdiff(color_frame, self.prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)

        thresh = cv2.getTrackbarPos('threshold', self.node_name)

        ret, motion_mask = cv2.threshold(gray_diff, thresh, 1,
                                         cv2.THRESH_BINARY)

        motion_mask = cv2.erode(motion_mask, self.erode_kernel, iterations=2)
        motion_mask = cv2.dilate(motion_mask, self.erode_kernel, iterations=2)

        timestamp = clock()

        cv2.updateMotionHistory(motion_mask, self.motion_history, timestamp,
                                MHI_DURATION)

        mg_mask, mg_orient = cv2.calcMotionGradient(self.motion_history,
                                                    MAX_TIME_DELTA,
                                                    MIN_TIME_DELTA,
                                                    apertureSize=5)
        seg_mask, seg_bounds = cv2.segmentMotion(self.motion_history,
                                                 timestamp, MAX_TIME_DELTA)

        visual_name = self.visuals[cv2.getTrackbarPos('visual',
                                                      self.node_name)]
        if visual_name == 'input':
            vis = cv_image.copy()
        elif visual_name == 'frame_diff':
            vis = frame_diff.copy()
        elif visual_name == 'motion_hist':
            vis = np.uint8(
                np.clip(
                    (self.motion_history -
                     (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
        elif visual_name == 'motion_hist_color':
            vis = np.uint8(
                np.clip(
                    (self.motion_history -
                     (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
            vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
        elif visual_name == 'grad_orient':
            self.hsv[:, :, 0] = mg_orient / 2
            self.hsv[:, :, 2] = mg_mask * 255
            vis = cv2.cvtColor(self.hsv, cv2.COLOR_HSV2BGR)

        max_rect_area = 0

        for i, rect in enumerate([(0, 0, self.w, self.h)] + list(seg_bounds)):
            x, y, rw, rh = rect
            area = rw * rh
            if area < 64**2:
                continue
            if area < 640 * 480 and area > max_rect_area:
                max_rect_area = area
                max_rect = rect
            silh_roi = motion_mask[y:y + rh, x:x + rw]
            orient_roi = mg_orient[y:y + rh, x:x + rw]
            mask_roi = mg_mask[y:y + rh, x:x + rw]
            mhi_roi = self.motion_history[y:y + rh, x:x + rw]
            if cv2.norm(silh_roi, cv2.NORM_L1) < area * 0.05:
                continue
            angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi,
                                              timestamp, MHI_DURATION)
            color = ((255, 0, 0), (0, 0, 255))[i == 0]
            #draw_motion_comp(vis, rect, angle, color)

        #draw_str(vis, (20, 20), visual_name)

        display_image = cv_image.copy()

        if max_rect_area != 0:
            x, y, w, h = max_rect
            display = color_frame[y:y + h, x:x + w]
            #            #bounding_box = cv2.boundingRect(vis)
            #            #print bounding_box
            #
            if visual_name == 'motion_hist':
                display = vis.copy()
            else:
                display = cv2.bitwise_and(color_frame, vis, vis)

            draw_str(vis, (20, 20), visual_name)

            contour_image = vis.copy()

            contours, hierarchy = cv2.findContours(contour_image,
                                                   cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)

            #ty:
            contour_points = list()

            if len(contours) != 0:
                for cnt in contours:
                    contour_points.append(cnt)

                vstack_points = np.vstack(contour_points)
                if len(vstack_points) > 5:
                    z_ellipse = cv2.fitEllipse(vstack_points)
                    cv2.ellipse(display_image, z_ellipse, (0, 255, 0), 2)

                cv2.drawContours(display_image, contours, -1, (0, 255, 0), 3)

            cv2.imshow("Contours", display_image)

        self.prev_frame = color_frame

        #cv2.waitKey(5)

        return cv_image
def processTrainingFiles(oper="extractTrainingVocabulary",fileType="train"):
    global saveTrainFeaturesDes, visuals, gSkipFrames

    cv2.namedWindow('Output')
    cv2.createTrackbar('visual', 'Output', 2, len(visuals)-1, nothing)
    cv2.createTrackbar('threshold', 'Output', DEFAULT_THRESHOLD, 255, nothing)

    printPageBreak()
    print("\n\nProcessing "+fileType.capitalize()+"ing Files")
    trainTestFiles = groupedTrainFiles
    if fileType == "train":
        trainTestFiles = groupedTrainFiles
    else:
        trainTestFiles = groupedTestFiles

    for label, fileList in trainTestFiles.iteritems():
        global trainData, testData, trainLabels, testLabels
        for file in fileList:
            frameCounter = 0
            print(label,file)
            cam = cv2.VideoCapture(file)

            ret, frame = cam.read()
            h, w = frame.shape[:2]
            prev_frame = frame.copy()
            motion_history = np.zeros((h, w), np.float32)
            hsv = np.zeros((h, w, 3), np.uint8)
            hsv[:,:,1] = 255
            totalFrames = int(cam.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
            frameCounter = frameCounter +1
            print(totalFrames)
            subFrameCount = 0
            skipFrames = 1

            prev_frame7 = frame.copy()
            motion_history7 = np.zeros((h, w), np.float32)
            # print(motion_history7)
            hsv7 = np.zeros((h, w, 3), np.uint8)
            hsv7[:,:,1] = 255

            prev_frame14 = frame.copy()
            motion_history14 = np.zeros((h, w), np.float32)
            # print(motion_history)
            hsv14 = np.zeros((h, w, 3), np.uint8)
            hsv14[:,:,1] = 255
            frameCounter =0

            totalFrames = int(cam.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
            frameCounter = frameCounter +1
            print(totalFrames)

            subFrameCount = 0
            skipFrames = 1

            subFrameCount7 = 0
            skipFrames7 = 7

            subFrameCount14 = 0
            skipFrames14 = 14

            featureDesSAHMIS = None
            bowDescriptorSAHMIS = None

            timestamp = clock()
            timestamp7 = clock()
            timestamp14 = clock()


            ##Read all frames
            while(frameCounter < totalFrames):
                ret, frame = cam.read()
                frame7 = frame.copy()
                frame14 = frame.copy()

                if(subFrameCount == 0):
                    frame_diff = cv2.absdiff(frame, prev_frame)
                    prev_frame = frame.copy()
                    gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
                    thrs = DEFAULT_THRESHOLD
                    ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
                    timestamp = clock()
                    cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)

                    mg_mask, mg_orient = cv2.calcMotionGradient( motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
                    seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

                    visual_name = 'motion_hist' #visuals[cv2.getTrackbarPos('visual', 'Output')]
                    if visual_name == 'input':
                        vis = frame.copy()
                    elif visual_name == 'frame_diff':
                        vis = frame_diff.copy()
                    elif visual_name == 'motion_hist':
                        vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                        vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
                    elif visual_name == 'grad_orient':
                        hsv[:,:,0] = mg_orient/2
                        hsv[:,:,2] = mg_mask*255
                        vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

                    for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
                        x, y, rw, rh = rect
                        area = rw*rh
                        if area < 64**2:
                            continue
                        silh_roi   = motion_mask   [y:y+rh,x:x+rw]
                        orient_roi = mg_orient     [y:y+rh,x:x+rw]
                        mask_roi   = mg_mask       [y:y+rh,x:x+rw]
                        mhi_roi    = motion_history[y:y+rh,x:x+rw]
                        if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
                            continue
                        angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
                        color = ((255, 0, 0), (0, 0, 255))[i == 0]
                        draw_motion_comp(vis, rect, angle, color)

                    visCopy = vis.copy()
                    draw_str(visCopy, (20, 20), visual_name)
                    cv2.imshow('Output', visCopy)

                if(subFrameCount7 == 0):
                    frame_diff7 = cv2.absdiff(frame7, prev_frame7)
                    prev_frame7 = frame7.copy()
                    gray_diff7 = cv2.cvtColor(frame_diff7, cv2.COLOR_BGR2GRAY)
                    thrs =DEFAULT_THRESHOLD
                    ret7, motion_mask7 = cv2.threshold(gray_diff7, thrs, 1, cv2.THRESH_BINARY)
                    timestamp7 = clock()
                    cv2.updateMotionHistory(motion_mask7, motion_history7, timestamp7, MHI_DURATION)

                    mg_mask7, mg_orient7 = cv2.calcMotionGradient( motion_history7, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
                    seg_mask7, seg_bounds7 = cv2.segmentMotion(motion_history7, timestamp7, MAX_TIME_DELTA)

                    visual_name = 'motion_hist' #visuals[cv2.getTrackbarPos('visual', 'Output')]
                    if visual_name == 'input':
                        vis7 = frame7.copy()
                    elif visual_name == 'frame_diff':
                        vis7 = frame_diff7.copy()
                    elif visual_name == 'motion_hist':
                        vis7 = np.uint8(np.clip((motion_history7-(timestamp7-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                        vis7 = cv2.cvtColor(vis7, cv2.COLOR_GRAY2BGR)
                    elif visual_name == 'grad_orient':
                        hsv7[:,:,0] = mg_orient7/2
                        hsv7[:,:,2] = mg_mask7*255
                        vis7 = cv2.cvtColor(hsv7, cv2.COLOR_HSV2BGR)

                    visCopy7 = vis7.copy()
                    draw_str(visCopy7, (20, 20), visual_name)
                    cv2.imshow('Output7', visCopy7)

                if(subFrameCount14 == 0):
                    frame_diff14 = cv2.absdiff(frame14, prev_frame14)
                    prev_frame14 = frame14.copy()
                    gray_diff14 = cv2.cvtColor(frame_diff14, cv2.COLOR_BGR2GRAY)
                    thrs = DEFAULT_THRESHOLD
                    ret14, motion_mask14 = cv2.threshold(gray_diff14, thrs, 1, cv2.THRESH_BINARY)
                    timestamp14 = clock()
                    cv2.updateMotionHistory(motion_mask14, motion_history14, timestamp14, MHI_DURATION)

                    mg_mask14, mg_orient14 = cv2.calcMotionGradient( motion_history14, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
                    seg_mask14, seg_bounds14 = cv2.segmentMotion(motion_history14, timestamp14, MAX_TIME_DELTA)

                    visual_name = 'motion_hist' #visuals[cv2.getTrackbarPos('visual', 'Output')]
                    if visual_name == 'input':
                        vis14 = frame14.copy()
                    elif visual_name == 'frame_diff':
                        vis14 = frame_diff14.copy()
                    elif visual_name == 'motion_hist':
                        vis14 = np.uint8(np.clip((motion_history14-(timestamp14-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                        vis14 = cv2.cvtColor(vis14, cv2.COLOR_GRAY2BGR)
                    elif visual_name == 'grad_orient':
                        hsv14[:,:,0] = mg_orient14/2
                        hsv14[:,:,2] = mg_mask14*255
                        vis14 = cv2.cvtColor(hsv14, cv2.COLOR_HSV2BGR)

                    visCopy14 = vis14.copy()
                    draw_str(visCopy14, (20, 20), visual_name)
                    cv2.imshow('Output14', visCopy14)


                if(visual_name == 'motion_hist'):
                    mhi_vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                    mhi_vis = cv2.cvtColor(mhi_vis, cv2.COLOR_GRAY2BGR)
                    mhi_vis7 = np.uint8(np.clip((motion_history7-(timestamp7-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                    mhi_vis7 = cv2.cvtColor(mhi_vis7, cv2.COLOR_GRAY2BGR)
                    mhi_vis14 = np.uint8(np.clip((motion_history14-(timestamp14-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                    mhi_vis14 = cv2.cvtColor(mhi_vis14, cv2.COLOR_GRAY2BGR)

                ##Start extracting features
                sift = cv2.SIFT()
                denseDetector = cv2.FeatureDetector_create(sDetector) ##using Dense Feature Detector

                kp = detector.detect(mhi_vis)
                print('KeyPoints Length:: ',len(kp))
                kp7 = detector.detect(mhi_vis7)
                print('KeyPoints7 Length:: ',len(kp7))
                kp14 = detector.detect(mhi_vis14)
                print('KeyPoints14 Length:: ',len(kp14))

                hasAtleastOneKP = False

                ##Check if there are any detected keypoints before processing.
                if len(kp) > 0:
                    hasAtleastOneKP = True
                    features = extractor.compute(mhi_vis,kp)
                    featuresDes = features[1]

                    if(featureDesSAHMIS is None):
                        featureDesSAHMIS = featuresDes
                    else:
                        featureDesSAHMIS = np.append(featureDesSAHMIS,featuresDes,axis=0)

                    print('Descriptors:: ',featuresDes)
                    print('Descriptors Length:: ',len(featuresDes))
                    print('Descriptors Shape:: ',featuresDes.shape)

                    # print('KeyPoints:: ',kp)
                    # print('Descriptors:: ',des)
                    #desFlattened = features.flatten()
                    # print('desFlattened Length:: ',len(desFlattened)
                    if(oper == "extractTrainingVocabulary"):
                        # bowTrainer.add(featuresDes)

                        saveTrainFeaturesDes.append(featuresDes)
                        cv2.imwrite('mhiImages\\'+file.split("\\")[2]+'_mhi.jpg',mhi_vis)
                        # saveTrainFeaturesDes1 = np.array(saveTrainFeaturesDes)
                        # saveTrainFeaturesDes1.dump(open('trainFeatures.dat', 'wb'))
                    elif(oper=="extractBOWDescriptor"):
                        bowDescriptor = bowDE.compute(mhi_vis, kp)
                        # descriptors.push_back(bowDescriptor);
                        # print('bowDescriptor:: ',bowDescriptor)
                        print('bowDescriptor Length:: ',len(bowDescriptor))
                        print('bowDescriptor Shape:: ',bowDescriptor.shape)

                        ##Check if the operation on training data or test data
                        if(fileType=="train"):
                            if(bowDescriptorSAHMIS is None):
                                bowDescriptorSAHMIS = bowDescriptor
                            else:
                                bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor)#,axis=0)
                            img=cv2.drawKeypoints(mhi_vis,kp)
                            cv2.imwrite('keyPoints\\'+file.split("\\")[2]+'_keypoints.jpg',img)
                        else:
                            testData.append(bowDescriptor)
                            testLabels.append(label)
                else:
                    print("No Key points were detectected for this image..")


                ##Check if there are any detected keypoints before processing.
                if len(kp7) > 0:

                    hasAtleastOneKP = True
                    features7 = extractor.compute(mhi_vis7,kp7)
                    featuresDes7 = features7[1]

                    if(featureDesSAHMIS is None):
                        featureDesSAHMIS = featuresDes7
                    else:
                        featureDesSAHMIS = np.append(featureDesSAHMIS,featuresDes7,axis=0)
                    print('Descriptors:: ',featuresDes)
                    print('featuresDes7 Length:: ',len(featuresDes7))
                    print('featuresDes7 Shape:: ',featuresDes7.shape)

                    if(oper == "extractTrainingVocabulary"):
                        # bowTrainer.add(featuresDes7)
                        saveTrainFeaturesDes.append(featuresDes7)
                        cv2.imwrite('mhiImages\\'+file.split("\\")[2]+'_mhi7.jpg',mhi_vis7)
                    elif(oper=="extractBOWDescriptor"):
                        bowDescriptor7 = bowDE.compute(mhi_vis7, kp7)
                        print('bowDescriptor7 Length:: ',len(bowDescriptor7))
                        print('bowDescriptor7 Shape:: ',bowDescriptor7.shape)

                        ##Check if the operation on training data or test data
                        if(fileType=="train"):
                            if(bowDescriptorSAHMIS is None):
                                bowDescriptorSAHMIS = bowDescriptor7
                            else:
                                bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor7)#,axis=0)
                            img7=cv2.drawKeypoints(mhi_vis7,kp7)
                            cv2.imwrite('keyPoints\\'+file.split("\\")[2]+'_keypoints7.jpg',img7)
                        else:
                            testData.append(bowDescriptor7)
                            testLabels.append(label)
                else:
                    print("No Key points were detectected for this image7..")


                ##Check if there are any detected keypoints before processing.
                if len(kp14) > 0:
                    hasAtleastOneKP = True
                    features14 = extractor.compute(mhi_vis14,kp14)
                    featuresDes14 = features14[1]
                    if(featureDesSAHMIS is None):
                        featureDesSAHMIS = featuresDes14
                    else:
                        featureDesSAHMIS = np.append(featureDesSAHMIS,featuresDes14,axis=0)
                    print('featuresDes14 Length:: ',len(featuresDes14))
                    print('featuresDes14 Shape:: ',featuresDes14.shape)

                    if(oper == "extractTrainingVocabulary"):
                        # bowTrainer.add(featuresDes14)
                        saveTrainFeaturesDes.append(featuresDes14)
                        cv2.imwrite('mhiImages\\'+file.split("\\")[2]+'_mhi14.jpg',mhi_vis14)
                        # saveTrainFeaturesDes1 = np.array(saveTrainFeaturesDes)
                        # saveTrainFeaturesDes1.dump(open('trainFeatures.dat', 'wb'))
                    elif(oper=="extractBOWDescriptor"):
                        bowDescriptor14 = bowDE.compute(mhi_vis14, kp14)
                        # descriptors.push_back(bowDescriptor);
                        # print('bowDescriptor:: ',bowDescriptor)
                        print('bowDescriptor14 Length:: ',len(bowDescriptor14))
                        print('bowDescriptor14 Shape:: ',bowDescriptor14.shape)

                        ##Check if the operation on training data or test data
                        if(fileType=="train"):
                            if(bowDescriptorSAHMIS is None):
                                bowDescriptorSAHMIS = bowDescriptor14
                            else:
                                bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor14)#,axis=0)
                            img14=cv2.drawKeypoints(mhi_vis14,kp14)
                            cv2.imwrite('keyPoints\\'+file.split("\\")[2]+'_keypoints14.jpg',img14)
                        else:
                            testData.append(bowDescriptor14)
                            testLabels.append(label)
                else:
                    print("No Key points were detectected for this image14..")

                if(hasAtleastOneKP):
                    if(oper == "extractTrainingVocabulary"):
                        # print('featureDesSAHMIS:: ',featureDesSAHMIS)
                        print('featureDesSAHMIS Length:: ',len(featureDesSAHMIS))
                        print('featureDesSAHMIS Shape:: ',featureDesSAHMIS.shape)
                        bowTrainer.add(featureDesSAHMIS)
                        saveTrainFeaturesDes.append(featureDesSAHMIS)
                    else:
                        print('bowDescriptorSAHMIS:: ',bowDescriptorSAHMIS)
                        print('bowDescriptorSAHMIS Length:: ',len(bowDescriptorSAHMIS))
                        print('bowDescriptorSAHMIS Shape:: ',bowDescriptorSAHMIS.shape)
                        ##Check if the operation on training data or test data
                        if(fileType=="train"):
                            trainData.append(bowDescriptorSAHMIS)
                            trainLabels.append(label)
                            # trainLabels.append(label)
                            # trainLabels.append(label)
                        else:
                            testData.append(bowDescriptorSAHMIS)
                            testLabels.append(label)
                            # testLabels.append(label)
                            # testLabels.append(label)
                    featureDesSAHMIS = None
                    bowDescriptorSAHMIS = None


                print(file.split("\\")[2])

                subFrameCount = subFrameCount + 1
                if(subFrameCount > skipFrames):
                    subFrameCount = 0

                subFrameCount7 = subFrameCount7 + 1
                if(subFrameCount7 > skipFrames7):
                    subFrameCount7 = 0

                subFrameCount14 = subFrameCount14 + 1
                if(subFrameCount14 > skipFrames14):
                    subFrameCount14 = 0

                frameCounter = frameCounter + 1


                # cv2.imwrite('keyPoints\\'+file.split("\\")[2]+'_keypoints.jpg',img)
                cv2.waitKey(25)

    cv2.destroyAllWindows()
def feature_extraction_fullVideo(videoName, scoreTxtName,startFrame,maskRegion,featureWriter, videoOutput, MIN_TIME_DELTA,MAX_TIME_DELTA,MHI_DURATION,THRESH_VALUE,DISPLAY=False): 
    cv2.namedWindow('rat activity recognition')
    visuals = ['input', 'frame_diff', 'motion_hist', 'grad_orient']
    # use MHI features (motion history intensity)
    visual_name = visuals[2]
  
    cam = cv2.VideoCapture(videoName)
    video_len = cam.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
    cam.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,startFrame)
    
    ret, frame = cam.read()
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:,:,1] = 255
    
    
    fps = 15.0
    fourcc = cv2.cv.CV_FOURCC(*'XVID')

    #outputVideoName = "activityRecognitionResults.avi";
    #VideoOutput = cv2.VideoWriter(outputVideoName,fourcc, fps, (w,h))   
    
    
    
    manualScores = read_manual_scoring(scoreTxtName,video_len)
    
    frame_base = extract_background(videoName, startFrame)
    frame_base = cv2.GaussianBlur(frame_base,(3,3),0) 
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    #cam.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,ii)
     
    for ii in range(int(startFrame)+1,int(video_len)-10):
    #while (ii<1000):
        ret, frame = cam.read()
        ## Mouse segmentation
        mouse, junk = segmentation_frame(frame,frame_base,mask,kernel)

        frame_results = cv2.add(frame,mouse)  
        cv2.putText(frame_results, 'stage: ' + str(manualScores[ii]), (200, 30),
        cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255,255,255))
        
        frame_diff = cv2.absdiff(frame, prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
        
        ret, motion_mask = cv2.threshold(gray_diff, THRESH_VALUE, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient(motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
        seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

        if visual_name == 'input':
            vis = frame.copy()
        elif visual_name == 'frame_diff':
            vis = frame_diff.copy()
        elif visual_name == 'motion_hist':
            vis0 = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            junk,mei0 = cv2.threshold(vis0,1,255,cv2.THRESH_BINARY)
            
            vis0 = vis0*maskRegion
            mei0 = mei0*scipy.sign(maskRegion)

        elif visual_name == 'grad_orient':
            hsv[:,:,0] = mg_orient/2
            hsv[:,:,2] = mg_mask*255
            vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
            
        ## Compute features
        M1 = cv2.moments(mei0)
        M2 = cv2.moments(vis0)    
        Hu1 = cv2.HuMoments(M1)
        Hu2 = cv2.HuMoments(M2)
        
        smallNum = [1e-200]*7 
        Hu1 = Hu1 + smallNum
        Hu2 = Hu2 + smallNum
        
        Hu1 = np.sign(Hu1)*np.log10(np.abs(Hu1))
        Hu2 = np.sign(Hu2)*np.log10(np.abs(Hu2))
      
        if M1['m00']!=0:
            cx1 = M1['m10']/M1['m00']
            cy1 = M1['m01']/M1['m00']
        else:
            cx1 = 0;
            cy1 = 0; 
            
        if M2['m00']!=0:
            cx2 = M2['m10']/M2['m00']
            cy2 = M2['m01']/M2['m00']
        else:
            cx2 = 0;
            cy2 = 0;     
                                       
        meiSize = np.count_nonzero(mei0);
        
        if meiSize == 0:
            corner1 = 0
            corner2 = 0
            corner3 = 0
            corner4 = 0
            height = 0
            width = 0
            extend = 0
        else:
            indices = np.nonzero(mei0)
            corner1 = max(indices[0])
            corner2 = min(indices[0])
            corner3 = max(indices[1])
            corner4 = min(indices[1])
            height = corner1 - corner2+1
            width = corner3 - corner4+1
            extend = meiSize/float(height*width)
            
        features = [ii,Hu1[0][0],Hu1[1][0],Hu1[2][0],Hu1[3][0],Hu1[4][0],Hu1[5][0],Hu1[6][0],
                    Hu2[0][0],Hu2[1][0],Hu2[2][0],Hu2[3][0],Hu2[4][0],Hu2[5][0],Hu2[6][0],
                    cx1, cy1, cx2, cy2, meiSize, corner1, corner2, corner3, corner4,height, width, extend,manualScores[ii]]
       
        featureWriter.writerow(features)
         
        prev_frame = frame.copy()      
     
        vis = cv2.cvtColor(vis0, cv2.COLOR_GRAY2BGR)
        mei = cv2.cvtColor(mei0, cv2.COLOR_GRAY2BGR)
            
                            
        if DISPLAY:

            cv2.imshow('MHI', vis)
            cv2.imshow('MEI', mei)
            cv2.imshow('Video',frame_results)
            videoOutput.write(frame_results)   
            
            if 0xff & cv2.waitKey(1) == 27:
                break
    
         
    cam.release()
    cv2.destroyAllWindows()
def doTestArchive():
    global saveTrainFeaturesDes, visuals, gSkipFrames, testData,testLabels

    print("Testing on real time video")
    cv2.namedWindow('Output')
    cv2.createTrackbar('visual', 'Output', 2, len(visuals)-1, nothing)
    cv2.createTrackbar('threshold', 'Output', DEFAULT_THRESHOLD, 255, nothing)
    testData = []
    testLabels = []

    printPageBreak()
    print("\n\nProcessing real time video..")

    frameCounter = 0
    cam = cv2.VideoCapture(0)

    ret, frame = cam.read()
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:,:,1] = 255
    frameCounter = frameCounter +1
    subFrameCount = 0
    skipFrames = gSkipFrames

    label = ""

    ##Read all frames
    while(True):
        ret, frame = cam.read()
        if(subFrameCount == 0):
            frame_diff = cv2.absdiff(frame, prev_frame)
            prev_frame = frame.copy()
            gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
            thrs = cv2.getTrackbarPos('threshold', 'Output')
            ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
            timestamp = clock()
            cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)

            mg_mask, mg_orient = cv2.calcMotionGradient( motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
            seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

            visual_name = visuals[cv2.getTrackbarPos('visual', 'Output')]
            if visual_name == 'input':
                vis = frame.copy()
            elif visual_name == 'frame_diff':
                vis = frame_diff.copy()
            elif visual_name == 'motion_hist':
                vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
            elif visual_name == 'grad_orient':
                hsv[:,:,0] = mg_orient/2
                hsv[:,:,2] = mg_mask*255
                vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

            for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
                x, y, rw, rh = rect
                area = rw*rh
                if area < 64**2:
                    continue
                silh_roi   = motion_mask   [y:y+rh,x:x+rw]
                orient_roi = mg_orient     [y:y+rh,x:x+rw]
                mask_roi   = mg_mask       [y:y+rh,x:x+rw]
                mhi_roi    = motion_history[y:y+rh,x:x+rw]
                if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
                    continue
                angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
                color = ((255, 0, 0), (0, 0, 255))[i == 0]
                draw_motion_comp(vis, rect, angle, color)

            visCopy = vis.copy()
            draw_str(visCopy, (20, 20), visual_name)
            cv2.imshow('Output', visCopy)

        subFrameCount = subFrameCount + 1
        if(subFrameCount > skipFrames):
            subFrameCount = 0
        frameCounter = frameCounter + 1

        if(visual_name == 'motion_hist'):
            mhi_vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            mhi_vis = cv2.cvtColor(mhi_vis, cv2.COLOR_GRAY2BGR)
        else:
            hsv[:,:,0] = mg_orient/2
            hsv[:,:,2] = mg_mask*255
            mhi_vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
        ##Start extracting features
        sift = cv2.SIFT()
        denseDetector = cv2.FeatureDetector_create(sDetector) ##using Dense Feature Detector

        kp = detector.detect(mhi_vis)
        print('KeyPoints Length:: ',len(kp))

        ##Check if there are any detected keypoints before processing.
        if len(kp) > 0:
            features = extractor.compute(mhi_vis,kp)
            featuresDes = features[1]
            # print('Descriptors:: ',featuresDes)
            # print('featuresDes Length:: ',len(featuresDes))
            # print('featuresDes Shape:: ',featuresDes.shape)

            bowDescriptor = bowDE.compute(mhi_vis, kp)
            # descriptors.push_back(bowDescriptor);
            # print('bowDescriptor:: ',bowDescriptor)
            # print('bowDescriptor Length:: ',len(bowDescriptor))
            # print('bowDescriptor Shape:: ',bowDescriptor.shape)

            ##Check if the operation on training data or test data
            testData.append(bowDescriptor)
            testLabels.append(label)
        else:
            print("No Key points were detectected for this image..")

        if(subFrameCount == 0 or frameCounter > 30):
            testData = np.float32(testData).reshape(-1,dictSize)
            # testLabels = np.float32(testLabels).reshape(-1,1)
            print("testData Shape: ", testData.shape)
            # print("testLabels Shape: ", testLabels.shape)
            print("testData : ", testData)
            # print("testLabels : ", testLabels)

            result = classifier.predict_all(testData)

            #######   Check Accuracy   ########################
            printPageBreak()
            printPageBreak()

            # print("TestLabels: ", testLabels.reshape(1,-1))
            print("Results: ", result.reshape(1,-1));
            frameCounter = 0
            testData = []
            testLabels = []

        draw_str(frame, (20, 20), label)
        cv2.imshow('Output', visCopy)
        cv2.imshow('Input', frame)

        # cv2.imwrite('keyPoints\\'+file.split("\\")[2]+'_keypoints.jpg',img)
        cv2.waitKey(25)

    cv2.destroyAllWindows()
def real_time_evaluation(videoName, featureWriter,classifier, activities, MIN_TIME_DELTA,MAX_TIME_DELTA,MHI_DURATION,THRESH_VALUE,DISPLAY=False): 
    cv2.namedWindow('rat activity recognition')
    visuals = ['input', 'frame_diff', 'motion_hist', 'grad_orient']
    # use MHI features (motion history intensity)
    visual_name = visuals[2]
  
    cam = cv2.VideoCapture(videoName)
    video_len = cam.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
    ret, frame = cam.read()
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:,:,1] = 255
    
    
    fps = 15.0
    fourcc = cv2.cv.CV_FOURCC(*'XVID')

    outputVideoName = "activityRecognitionResults.avi";
    VideoOutput = cv2.VideoWriter(outputVideoName,fourcc, fps, (w,h))   
    
    
    ii = 0        
    
    #cam.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,ii)
     
    while (ii < video_len):
    #while (ii<1000):
        ii += 1
        ret, frame = cam.read()
        frame_diff = cv2.absdiff(frame, prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
     
        ret, motion_mask = cv2.threshold(gray_diff, THRESH_VALUE, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient(motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
        seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

        if visual_name == 'input':
            vis = frame.copy()
        elif visual_name == 'frame_diff':
            vis = frame_diff.copy()
        elif visual_name == 'motion_hist':
            vis0 = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            junk,mei0 = cv2.threshold(vis0,1,255,cv2.THRESH_BINARY)

        elif visual_name == 'grad_orient':
            hsv[:,:,0] = mg_orient/2
            hsv[:,:,2] = mg_mask*255
            vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
            
        ## Compute features
        M1 = cv2.moments(mei0)
        M2 = cv2.moments(vis0)    
        Hu1 = cv2.HuMoments(M1)
        Hu2 = cv2.HuMoments(M2)
        
        smallNum = [1e-200]*7 
        Hu1 = Hu1 + smallNum
        Hu2 = Hu2 + smallNum
        
        Hu1 = np.sign(Hu1)*np.log10(np.abs(Hu1))
        Hu2 = np.sign(Hu2)*np.log10(np.abs(Hu2))
      
        if M1['m00']!=0:
            cx1 = M1['m10']/M1['m00']
            cy1 = M1['m01']/M1['m00']
        else:
            cx1 = 0;
            cy1 = 0; 
            
        if M2['m00']!=0:
            cx2 = M2['m10']/M2['m00']
            cy2 = M2['m01']/M2['m00']
        else:
            cx2 = 0;
            cy2 = 0;     
                                       
        meiSize = np.count_nonzero(mei0);
        
        if meiSize == 0:
            corner1 = 0
            corner2 = 0
            corner3 = 0
            corner4 = 0
            height = 0
            width = 0
            extend = 0
        else:
            indices = np.nonzero(mei0)
            corner1 = max(indices[0])
            corner2 = min(indices[0])
            corner3 = max(indices[1])
            corner4 = min(indices[1])
            height = corner1 - corner2+1
            width = corner3 - corner4+1
            extend = meiSize/float(height*width)
            
        features = [Hu1[0][0],Hu1[1][0],Hu1[2][0],Hu1[3][0],Hu1[4][0],Hu1[5][0],Hu1[6][0],
                    Hu2[0][0],Hu2[1][0],Hu2[2][0],Hu2[3][0],Hu2[4][0],Hu2[5][0],Hu2[6][0],
                    cx1, cy1, cx2, cy2, meiSize, corner1, corner2, corner3, corner4,height, width, extend]
       
        featureWriter.writerow(features)
         
        tag = classifier.predict(features[0:26])
        activity = activities[tag]  
            
        prev_frame = frame.copy()      
     
                            
        if DISPLAY:
            cv2.putText(frame, activity, (5, 25),
                cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255,255,255))
            vis = cv2.cvtColor(vis0, cv2.COLOR_GRAY2BGR)
            mei = cv2.cvtColor(mei0, cv2.COLOR_GRAY2BGR)
            cv2.imshow('MHI', vis)
            cv2.imshow('MEI', mei)
            cv2.imshow('Video',frame)
            VideoOutput.write(frame)
            
            if 0xff & cv2.waitKey(50) == 27:
                break
            
    cam.release()
    cv2.destroyAllWindows()
Ejemplo n.º 19
0
def processTrainingFiles(oper="extractTrainingVocabulary", fileType="train"):
    global saveTrainFeaturesDes
    cv2.namedWindow("Output")
    visuals = ["input", "frame_diff", "motion_hist", "grad_orient"]
    cv2.createTrackbar("visual", "Output", 2, len(visuals) - 1, nothing)
    cv2.createTrackbar("threshold", "Output", DEFAULT_THRESHOLD, 255, nothing)

    cv2.namedWindow("Output7")
    # visuals = ['input', 'frame_diff', 'motion_hist', 'grad_orient']
    cv2.createTrackbar("visual", "Output7", 2, len(visuals) - 1, nothing)
    cv2.createTrackbar("threshold", "Output7", DEFAULT_THRESHOLD, 255, nothing)

    cv2.namedWindow("Output14")
    # visuals = ['input', 'frame_diff', 'motion_hist', 'grad_orient']
    cv2.createTrackbar("visual", "Output14", 2, len(visuals) - 1, nothing)
    cv2.createTrackbar("threshold", "Output14", DEFAULT_THRESHOLD, 255, nothing)

    printPageBreak()
    print ("\n\nProcessing " + fileType.capitalize() + "ing Files")
    trainTestFiles = groupedTrainFiles
    if fileType == "train":
        trainTestFiles = groupedTrainFiles
    else:
        trainTestFiles = groupedTestFiles

    for label, fileList in trainTestFiles.iteritems():
        global trainData, testData, trainLabels, testLabels
        for file in fileList:
            frameCounter = 0
            print (label, file)
            cam = cv2.VideoCapture(file)
            # cam = video.create_capture(file,fallback='synth:class=chess:bg=lena.jpg:noise=0.01')
            # cam = video.create_capture(file, fallback='synth:class=chess:bg=lena.jpg:noise=0.01')
            ret, frame = cam.read()
            h, w = frame.shape[:2]
            prev_frame = frame.copy()
            motion_history = np.zeros((h, w), np.float32)
            # print(motion_history)
            hsv = np.zeros((h, w, 3), np.uint8)
            hsv[:, :, 1] = 255

            prev_frame7 = frame.copy()
            motion_history7 = np.zeros((h, w), np.float32)
            # print(motion_history7)
            hsv7 = np.zeros((h, w, 3), np.uint8)
            hsv7[:, :, 1] = 255

            prev_frame14 = frame.copy()
            motion_history14 = np.zeros((h, w), np.float32)
            # print(motion_history)
            hsv14 = np.zeros((h, w, 3), np.uint8)
            hsv14[:, :, 1] = 255

            totalFrames = int(cam.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
            frameCounter = frameCounter + 1
            print (totalFrames)

            subFrameCount = 0
            skipFrames = 1

            subFrameCount7 = 0
            skipFrames7 = 7

            subFrameCount14 = 0
            skipFrames14 = 14

            featureDesSAHMIS = None
            bowDescriptorSAHMIS = None

            while frameCounter < totalFrames:
                ret, frame = cam.read()

                """
                #################################################################################SAMHI-1
                """
                if subFrameCount == 0:
                    frame_diff = cv2.absdiff(frame, prev_frame)
                    gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
                    # Remove the noise and do the threshold
                    # gray_diff = cv2.morphologyEx(gray_diff, cv2.KERNEL_SMOOTH, kernel)
                    # gray_diff = cv2.dilate(gray_diff, kernel, iterations=1)
                    # gray_diff = cv2.erode(gray_diff, kernel, iterations=1)
                    # # gray_diff = cv2.morphologyEx(gray_diff, cv2.MORPH_OPEN, kernel)
                    # # gray_diff = cv2.morphologyEx(gray_diff, cv2.MORPH_CLOSE, kernel)
                    # gray_diff = cv2.erode(gray_diff, kernel, iterations=1)
                    # gray_diff = cv2.dilate(gray_diff, kernel, iterations=1)
                    # cv2.cv.Smooth(gray_diff, gray_diff, cv2.cv.CV_BLUR, 5,5)
                    # cv2.cv.MorphologyEx(gray_diff, gray_diff, None, None, cv2.cv.CV_MOP_OPEN)
                    # cv2.cv.MorphologyEx(gray_diff, gray_diff, None, None, cv2.cv.CV_MOP_CLOSE)
                    # cv2.cv.Threshold(gray_diff, gray_diff, 10, 255, cv2.cv.CV_THRESH_BINARY_INV)
                    #
                    # mhi_vis = cv2.dilate(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_OPEN, kernel)
                    # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_CLOSE, kernel)
                    # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.dilate(mhi_vis, kernel, iterations=2)
                    thrs = cv2.getTrackbarPos("threshold", "Output")
                    ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
                    timestamp = clock()
                    cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)

                    mg_mask, mg_orient = cv2.calcMotionGradient(
                        motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5
                    )
                    seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

                    visual_name = visuals[cv2.getTrackbarPos("visual", "Output")]
                    if visual_name == "input":
                        vis = frame.copy()
                    elif visual_name == "frame_diff":
                        vis = frame_diff.copy()
                    elif visual_name == "motion_hist":
                        vis = np.uint8(
                            np.clip((motion_history - (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255
                        )
                        vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
                    elif visual_name == "grad_orient":
                        hsv[:, :, 0] = mg_orient / 2
                        hsv[:, :, 2] = mg_mask * 255
                        vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

                    for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
                        x, y, rw, rh = rect
                        area = rw * rh
                        if area < 64 ** 2:
                            continue
                        silh_roi = motion_mask[y : y + rh, x : x + rw]
                        orient_roi = mg_orient[y : y + rh, x : x + rw]
                        mask_roi = mg_mask[y : y + rh, x : x + rw]
                        mhi_roi = motion_history[y : y + rh, x : x + rw]
                        if cv2.norm(silh_roi, cv2.NORM_L1) < area * 0.05:
                            continue
                        angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
                        color = ((255, 0, 0), (0, 0, 255))[i == 0]
                        draw_motion_comp(vis, rect, angle, color)

                    visCopy = vis.copy()
                    draw_str(visCopy, (20, 20), visual_name)
                    cv2.imshow("Output", visCopy)

                    prev_frame = frame.copy()
                    if 0xFF & cv2.waitKey(5) == 27:
                        break
                    cv2.waitKey(25)
                """
                #################################################################################SAMHI-7
                """
                if subFrameCount7 == 0:
                    frame_diff7 = cv2.absdiff(frame, prev_frame7)
                    gray_diff7 = cv2.cvtColor(frame_diff7, cv2.COLOR_BGR2GRAY)
                    # Remove the noise and do the threshold
                    # gray_diff = cv2.morphologyEx(gray_diff, cv2.KERNEL_SMOOTH, kernel)
                    # gray_diff = cv2.dilate(gray_diff, kernel, iterations=1)
                    # gray_diff = cv2.erode(gray_diff, kernel, iterations=1)
                    # # gray_diff = cv2.morphologyEx(gray_diff, cv2.MORPH_OPEN, kernel)
                    # # gray_diff = cv2.morphologyEx(gray_diff, cv2.MORPH_CLOSE, kernel)
                    # gray_diff = cv2.erode(gray_diff, kernel, iterations=1)
                    # gray_diff = cv2.dilate(gray_diff, kernel, iterations=1)
                    # cv2.cv.Smooth(gray_diff, gray_diff, cv2.cv.CV_BLUR, 5,5)
                    # cv2.cv.MorphologyEx(gray_diff, gray_diff, None, None, cv2.cv.CV_MOP_OPEN)
                    # cv2.cv.MorphologyEx(gray_diff, gray_diff, None, None, cv2.cv.CV_MOP_CLOSE)
                    # cv2.cv.Threshold(gray_diff, gray_diff, 10, 255, cv2.cv.CV_THRESH_BINARY_INV)
                    #
                    # mhi_vis = cv2.dilate(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_OPEN, kernel)
                    # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_CLOSE, kernel)
                    # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.dilate(mhi_vis, kernel, iterations=2)
                    thrs = cv2.getTrackbarPos("threshold", "Output")
                    ret7, motion_mask7 = cv2.threshold(gray_diff7, thrs, 1, cv2.THRESH_BINARY)
                    timestamp7 = clock()
                    cv2.updateMotionHistory(motion_mask7, motion_history7, timestamp7, MHI_DURATION)

                    mg_mask7, mg_orient7 = cv2.calcMotionGradient(
                        motion_history7, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5
                    )
                    seg_mask7, seg_bounds7 = cv2.segmentMotion(motion_history7, timestamp7, MAX_TIME_DELTA)

                    visual_name7 = visuals[cv2.getTrackbarPos("visual", "Output7")]
                    if visual_name7 == "input":
                        vis7 = frame.copy()
                    elif visual_name == "frame_diff":
                        vis7 = frame_diff7.copy()
                    elif visual_name == "motion_hist":
                        vis7 = np.uint8(
                            np.clip((motion_history7 - (timestamp7 - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255
                        )
                        vis7 = cv2.cvtColor(vis7, cv2.COLOR_GRAY2BGR)
                    elif visual_name == "grad_orient":
                        hsv7[:, :, 0] = mg_orient7 / 2
                        hsv7[:, :, 2] = mg_mask7 * 255
                        vis7 = cv2.cvtColor(hsv7, cv2.COLOR_HSV2BGR)

                    for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds7)):
                        x, y, rw, rh = rect
                        area = rw * rh
                        if area < 64 ** 2:
                            continue
                        silh_roi7 = motion_mask7[y : y + rh, x : x + rw]
                        orient_roi7 = mg_orient7[y : y + rh, x : x + rw]
                        mask_roi7 = mg_mask7[y : y + rh, x : x + rw]
                        mhi_roi7 = motion_history7[y : y + rh, x : x + rw]
                        if cv2.norm(silh_roi7, cv2.NORM_L1) < area * 0.05:
                            continue
                        angle7 = cv2.calcGlobalOrientation(orient_roi7, mask_roi7, mhi_roi7, timestamp7, MHI_DURATION)
                        color7 = ((255, 0, 0), (0, 0, 255))[i == 0]
                        draw_motion_comp(vis7, rect, angle7, color7)

                    visCopy7 = vis7.copy()
                    draw_str(visCopy7, (20, 20), visual_name)
                    cv2.imshow("Output7", visCopy7)

                    prev_frame7 = frame.copy()
                    if 0xFF & cv2.waitKey(5) == 27:
                        break
                    cv2.waitKey(25)

                """
                #################################################################################SAMHI-14
                """
                if subFrameCount14 == 0:
                    frame_diff14 = cv2.absdiff(frame, prev_frame)
                    gray_diff14 = cv2.cvtColor(frame_diff14, cv2.COLOR_BGR2GRAY)
                    # Remove the noise and do the threshold
                    # gray_diff = cv2.morphologyEx(gray_diff, cv2.KERNEL_SMOOTH, kernel)
                    # gray_diff = cv2.dilate(gray_diff, kernel, iterations=1)
                    # gray_diff = cv2.erode(gray_diff, kernel, iterations=1)
                    # # gray_diff = cv2.morphologyEx(gray_diff, cv2.MORPH_OPEN, kernel)
                    # # gray_diff = cv2.morphologyEx(gray_diff, cv2.MORPH_CLOSE, kernel)
                    # gray_diff = cv2.erode(gray_diff, kernel, iterations=1)
                    # gray_diff = cv2.dilate(gray_diff, kernel, iterations=1)
                    # cv2.cv.Smooth(gray_diff, gray_diff, cv2.cv.CV_BLUR, 5,5)
                    # cv2.cv.MorphologyEx(gray_diff, gray_diff, None, None, cv2.cv.CV_MOP_OPEN)
                    # cv2.cv.MorphologyEx(gray_diff, gray_diff, None, None, cv2.cv.CV_MOP_CLOSE)
                    # cv2.cv.Threshold(gray_diff, gray_diff, 10, 255, cv2.cv.CV_THRESH_BINARY_INV)
                    #
                    # mhi_vis = cv2.dilate(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_OPEN, kernel)
                    # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_CLOSE, kernel)
                    # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.dilate(mhi_vis, kernel, iterations=2)
                    thrs = cv2.getTrackbarPos("threshold", "Output14")
                    ret14, motion_mask14 = cv2.threshold(gray_diff14, thrs, 1, cv2.THRESH_BINARY)
                    timestamp14 = clock()
                    cv2.updateMotionHistory(motion_mask14, motion_history14, timestamp14, MHI_DURATION)

                    mg_mask14, mg_orient14 = cv2.calcMotionGradient(
                        motion_history14, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5
                    )
                    seg_mask14, seg_bounds14 = cv2.segmentMotion(motion_history14, timestamp14, MAX_TIME_DELTA)

                    visual_name = visuals[cv2.getTrackbarPos("visual", "Output14")]
                    if visual_name == "input":
                        vis14 = frame.copy()
                    elif visual_name == "frame_diff":
                        vis14 = frame_diff14.copy()
                    elif visual_name == "motion_hist":
                        vis14 = np.uint8(
                            np.clip((motion_history14 - (timestamp14 - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255
                        )
                        vis14 = cv2.cvtColor(vis14, cv2.COLOR_GRAY2BGR)
                    elif visual_name == "grad_orient":
                        hsv14[:, :, 0] = mg_orient14 / 2
                        hsv14[:, :, 2] = mg_mask14 * 255
                        vis14 = cv2.cvtColor(hsv14, cv2.COLOR_HSV2BGR)

                    for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds14)):
                        x, y, rw, rh = rect
                        area = rw * rh
                        if area < 64 ** 2:
                            continue
                        silh_roi14 = motion_mask14[y : y + rh, x : x + rw]
                        orient_roi14 = mg_orient14[y : y + rh, x : x + rw]
                        mask_roi14 = mg_mask14[y : y + rh, x : x + rw]
                        mhi_roi14 = motion_history14[y : y + rh, x : x + rw]
                        if cv2.norm(silh_roi14, cv2.NORM_L1) < area * 0.05:
                            continue
                        angle14 = cv2.calcGlobalOrientation(
                            orient_roi14, mask_roi14, mhi_roi14, timestamp14, MHI_DURATION
                        )
                        color14 = ((255, 0, 0), (0, 0, 255))[i == 0]
                        draw_motion_comp(vis14, rect, angle14, color14)

                    visCopy14 = vis14.copy()
                    draw_str(visCopy14, (20, 20), visual_name)
                    cv2.imshow("Output14", visCopy14)

                    prev_frame14 = frame.copy()
                    if 0xFF & cv2.waitKey(5) == 27:
                        break
                    cv2.waitKey(25)

                subFrameCount = subFrameCount + 1
                subFrameCount7 = subFrameCount7 + 1
                subFrameCount14 = subFrameCount14 + 1

                if subFrameCount > skipFrames:
                    subFrameCount = 0

                if subFrameCount7 > skipFrames7:
                    subFrameCount7 = 0

                if subFrameCount14 > skipFrames14:
                    subFrameCount14 = 0

                frameCounter = frameCounter + 1
            with open("mhiInfo", "a+") as mhiFile:
                mhiFile.write(
                    "\n======================================================================================================\n"
                )
                for row in motion_history:
                    # print(row)
                    mhiFile.write(" ".join(str(x) for x in row) + "\n")

            if visual_name == "motion_hist":
                mhi_vis = np.uint8(np.clip((motion_history - (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
                mhi_vis = cv2.cvtColor(mhi_vis, cv2.COLOR_GRAY2BGR)

                mhi_vis7 = np.uint8(np.clip((motion_history7 - (timestamp7 - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
                mhi_vis7 = cv2.cvtColor(mhi_vis7, cv2.COLOR_GRAY2BGR)

                mhi_vis14 = np.uint8(
                    np.clip((motion_history14 - (timestamp14 - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255
                )
                mhi_vis14 = cv2.cvtColor(mhi_vis14, cv2.COLOR_GRAY2BGR)
            else:
                hsv[:, :, 0] = mg_orient / 2
                hsv[:, :, 2] = mg_mask * 255
                mhi_vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

                hsv7[:, :, 0] = mg_orient7 / 2
                hsv7[:, :, 2] = mg_mask7 * 255
                mhi_vis7 = cv2.cvtColor(hsv7, cv2.COLOR_HSV2BGR)

                hsv14[:, :, 0] = mg_orient14 / 2
                hsv14[:, :, 2] = mg_mask14 * 255
                mhi_vis14 = cv2.cvtColor(hsv14, cv2.COLOR_HSV2BGR)

            # Remove the noise and do the threshold
            # cv2.cv.Smooth(mhi_vis, mhi_vis, cv2.cv.CV_BLUR, 5,5)
            # cv2.cv.MorphologyEx(mhi_vis, mhi_vis, None, None, cv2.cv.CV_MOP_OPEN)
            # cv2.cv.MorphologyEx(mhi_vis, mhi_vis, None, None, cv2.cv.CV_MOP_CLOSE)
            # cv2.cv.Threshold(mhi_vis, mhi_vis, 10, 255, cv2.cv.CV_THRESH_BINARY_INV)
            # #
            # mhi_vis = cv2.(mhi_vis, kernel, iterations=2)
            # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
            # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_OPEN, kernel)
            # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_CLOSE, kernel)
            # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
            # mhi_vis = cv2.dilate(mhi_vis, kernel, iterations=2)

            ##Start extracting features
            sift = cv2.SIFT()
            denseDetector = cv2.FeatureDetector_create(sDetector)  ##using Dense Feature Detector

            kp = detector.detect(mhi_vis)

            kp7 = detector.detect(mhi_vis7)

            kp14 = detector.detect(mhi_vis14)

            # kp2, des2 = sift.compute(mhi_vis,kp)
            # img=cv2.drawKeypoints(mhi_vis,kp2)

            print ("KeyPoints Length:: ", len(kp))

            hasAtleastOneKP = False

            ##Check if there are any detected keypoints before processing.
            if len(kp) > 0:
                hasAtleastOneKP = True
                features = extractor.compute(mhi_vis, kp)
                featuresDes = features[1]
                # print('Descriptors:: ',featuresDes)
                print ("Descriptors Length:: ", len(featuresDes))
                print ("Descriptors Shape:: ", featuresDes.shape)

                if featureDesSAHMIS is None:
                    featureDesSAHMIS = featuresDes
                else:
                    featureDesSAHMIS = np.append(featureDesSAHMIS, featuresDes, axis=0)

                if oper == "extractTrainingVocabulary":
                    bowTrainer.add(featureDesSAHMIS)
                    saveTrainFeaturesDes.append(featureDesSAHMIS)
                    cv2.imwrite("mhiImages\\" + file.split("\\")[2] + "_mhi.jpg", mhi_vis)
                elif oper == "extractBOWDescriptor":
                    bowDescriptor = bowDE.compute(mhi_vis, kp)
                    # descriptors.push_back(bowDescriptor);
                    # print('bowDescriptor:: ',bowDescriptor)
                    print ("bowDescriptor Length:: ", len(bowDescriptor))
                    print ("bowDescriptor Shape:: ", bowDescriptor.shape)
                    if fileType == "train":
                        img = cv2.drawKeypoints(mhi_vis, kp)
                        cv2.imwrite("keyPoints\\" + file.split("\\")[2] + "_keypoints.jpg", img)
                    if bowDescriptorSAHMIS is None:
                        bowDescriptorSAHMIS = bowDescriptor
                    else:
                        # bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor)
                        bowDescriptorSAHMIS = np.sum([bowDescriptorSAHMIS, bowDescriptor], axis=0)
            else:
                featuresDes = np.zeros((1, featureSize), np.float32)
                if featureDesSAHMIS is None:
                    featureDesSAHMIS = featuresDes
                else:
                    featureDesSAHMIS = np.append(featureDesSAHMIS, featuresDes, axis=0)
                bowDescriptor = np.zeros((1, 1000), np.float32)
                if bowDescriptorSAHMIS is None:
                    bowDescriptorSAHMIS = bowDescriptor
                else:
                    # bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor)
                    bowDescriptorSAHMIS = np.sum([bowDescriptorSAHMIS, bowDescriptor], axis=0)
                print ("No SAMHI-1 Key points were detectected for this image..")

            ##Check if there are any detected keypoints before processing.
            if len(kp7) > 0:
                hasAtleastOneKP = True
                features7 = extractor.compute(mhi_vis7, kp7)
                featuresDes7 = features7[1]
                # print('Descriptors7:: ',featuresDes7)
                print ("Descriptors7 Length:: ", len(featuresDes7))
                print ("Descriptors7 Shape:: ", featuresDes7.shape)

                if featureDesSAHMIS is None:
                    featureDesSAHMIS = featuresDes7
                else:
                    featureDesSAHMIS = np.append(featureDesSAHMIS, featuresDes7, axis=0)

                if oper == "extractTrainingVocabulary":
                    cv2.imwrite("mhiImages\\" + file.split("\\")[2] + "_mhi7.jpg", mhi_vis7)
                elif oper == "extractBOWDescriptor":
                    bowDescriptor7 = bowDE.compute(mhi_vis7, kp7)
                    # descriptors.push_back(bowDescriptor);
                    # print('bowDescriptor7:: ',bowDescriptor7)
                    print ("bowDescriptor7 Length:: ", len(bowDescriptor7))
                    print ("bowDescriptor7 Shape:: ", bowDescriptor7.shape)
                    if fileType == "train":
                        img7 = cv2.drawKeypoints(mhi_vis7, kp7)
                        cv2.imwrite("keyPoints\\" + file.split("\\")[2] + "_keypoints_7.jpg", img7)
                    if bowDescriptorSAHMIS is None:
                        bowDescriptorSAHMIS = bowDescriptor7
                    else:
                        # print("bowDescriptor7=> ",bowDescriptor7)
                        # bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor7)
                        bowDescriptorSAHMIS = np.sum([bowDescriptorSAHMIS, bowDescriptor7], axis=0)
            else:
                featuresDes7 = np.zeros((1, featureSize), np.float32)
                if featureDesSAHMIS is None:
                    featureDesSAHMIS = featuresDes7
                else:
                    featureDesSAHMIS = np.append(featureDesSAHMIS, featuresDes7, axis=0)
                bowDescriptor7 = np.zeros((1, 1000), np.float32)
                if bowDescriptorSAHMIS is None:
                    bowDescriptorSAHMIS = bowDescriptor7
                else:
                    # bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor7)
                    bowDescriptorSAHMIS = np.sum([bowDescriptorSAHMIS, bowDescriptor7], axis=0)
                print ("No SAMHI-7 Key points were detectected for this image..")

            ##Check if there are any detected keypoints before processing.
            if len(kp14) > 0:
                hasAtleastOneKP = True
                features14 = extractor.compute(mhi_vis14, kp14)
                featuresDes14 = features14[1]
                # print('Descriptor14:: ',featuresDes14)
                print ("Descriptors14 Length:: ", len(featuresDes14))
                print ("Descriptors14 Shape:: ", featuresDes14.shape)

                if featureDesSAHMIS is None:
                    featureDesSAHMIS = featuresDes14
                else:
                    featureDesSAHMIS = np.append(featureDesSAHMIS, featuresDes14, axis=0)

                if oper == "extractTrainingVocabulary":
                    cv2.imwrite("mhiImages\\" + file.split("\\")[2] + "_mhi14.jpg", mhi_vis14)
                elif oper == "extractBOWDescriptor":
                    bowDescriptor14 = bowDE.compute(mhi_vis14, kp14)
                    # descriptors.push_back(bowDescriptor);
                    # print('bowDescriptor14:: ',bowDescriptor14)
                    print ("bowDescriptor14 Length:: ", len(bowDescriptor14))
                    print ("bowDescriptor14 Shape:: ", bowDescriptor7.shape)
                    if fileType == "train":
                        img14 = cv2.drawKeypoints(mhi_vis14, kp14)
                        cv2.imwrite("keyPoints\\" + file.split("\\")[2] + "_keypoints_14.jpg", img14)

                    if bowDescriptorSAHMIS is None:
                        bowDescriptorSAHMIS = bowDescriptor14
                    else:
                        # bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor14)
                        bowDescriptorSAHMIS = np.sum([bowDescriptorSAHMIS, bowDescriptor14], axis=0)
            else:
                featuresDes14 = np.zeros((1, featureSize), np.float32)
                if featureDesSAHMIS is None:
                    featureDesSAHMIS = featuresDes14
                else:
                    featureDesSAHMIS = np.append(featureDesSAHMIS, featuresDes14, axis=0)
                bowDescriptor14 = np.zeros((1, 1000), np.float32)
                if bowDescriptorSAHMIS is None:
                    bowDescriptorSAHMIS = bowDescriptor14
                else:
                    # bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor14)
                    bowDescriptorSAHMIS = np.sum([bowDescriptorSAHMIS, bowDescriptor14], axis=0)
                print ("No SAMHI-14 Key points were detectected for this image..")

            if hasAtleastOneKP:
                if oper == "extractTrainingVocabulary":
                    # print('featureDesSAHMIS:: ',featureDesSAHMIS)
                    print ("featureDesSAHMIS Length:: ", len(featureDesSAHMIS))
                    print ("featureDesSAHMIS Shape:: ", featureDesSAHMIS.shape)
                    bowTrainer.add(featureDesSAHMIS)
                    saveTrainFeaturesDes.append(featureDesSAHMIS)
                else:
                    print ("bowDescriptorSAHMIS:: ", bowDescriptorSAHMIS)
                    print ("bowDescriptorSAHMIS Length:: ", len(bowDescriptorSAHMIS))
                    print ("bowDescriptorSAHMIS Shape:: ", bowDescriptorSAHMIS.shape)
                    ##Check if the operation on training data or test data
                    if fileType == "train":
                        trainData.append(bowDescriptorSAHMIS)
                        trainLabels.append(label)
                        # trainLabels.append(label)
                        # trainLabels.append(label)
                    else:
                        testData.append(bowDescriptorSAHMIS)
                        testLabels.append(label)
                        # testLabels.append(label)
                        # testLabels.append(label)

            print (file.split("\\")[2])

            # cv2.imwrite('keyPoints\\'+file.split("\\")[2]+'_keypoints.jpg',img)
            cv2.waitKey(25)

    cv2.destroyAllWindows()
Ejemplo n.º 20
0
def main(argv=None):
    if argv is None:
        argv = sys.argv[1:]
    
    parser = argparse.ArgumentParser(description="Track moving objects in a video stream")
    
    parser.add_argument("-f", "--file", 
                        help="use given file")
    parser.add_argument("-p", "--play-only", action="store_true",
                        help="playback only. Don't do any recognition. Useful for sanity checking files or installation")
    parser.add_argument("--motion-threshold", type=int, default=32,
                        help="threshold for motion. (difference in grey values between frames)")
    parser.add_argument("--max-track-time", type=float, default=0.5,
                        help="maximum time for a motion track")
    
    args = parser.parse_args(argv)
    
    source = args.file
    
    if source is None:
        print "No video source given!"
        return
        
    video = cv2.VideoCapture()
    video.open(source)
    
    if not video.isOpened():
        print "Video not open"
        return
        
    width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = video.get(cv2.CAP_PROP_FPS)
    
    print "opened {w}x{h} video @ {f}fps".format(w=width,h=height,f=fps)
    
    HISTORY_NAME = "motion history"
    MASK_NAME = "motion mask"
    ORIENTATION_NAME = "orientation"
    
    make_nth_named_window(WIN_NAME, height)
    
    if not args.play_only:
        make_nth_named_window(HISTORY_NAME, height, 1)
        # make_nth_named_window(MASK_NAME, height, 2)
        # make_nth_named_window(ORIENTATION_NAME, height, 3)
        
        motion_history = np.zeros((height, width), np.float32)
        
    prev_frame = None
    
    frame_count = 0
    frame_interval_normal = int(1000.0/fps)
    frame_interval = frame_interval_normal
    while video.grab():
        got_frame, frame = video.retrieve()
        
        if not got_frame:
            print "frame miss"
            continue
        
        frame_count += 1
        print "frame: {c}   \r".format(c=frame_count), 
        sys.stdout.flush()
        
        display = frame.copy()
        
        timestamp = float(frame_count) / fps
        
        if not args.play_only:
            if prev_frame is None:
                prev_frame = frame.copy()
                
            frame_diff = cv2.absdiff(frame, prev_frame)
            gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
            ret, motion_mask = cv2.threshold(gray_diff, args.motion_threshold, 1, cv2.THRESH_BINARY)
            # cv2.imshow(MASK_NAME, motion_mask)
            
            cv2.updateMotionHistory(motion_mask, motion_history, timestamp, args.max_track_time)
            cv2.imshow(HISTORY_NAME, motion_history)
            
            mgrad_mask, mgrad_orient = cv2.calcMotionGradient( motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
            mseg_mask, mseg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)
            
            # cv2.imshow(ORIENTATION_NAME, mgrad_orient)
            # if frame_interval == 0:
            #     import pdb; pdb.set_trace()
            
            for i, rect in enumerate([(0, 0, width, height)] + list(mseg_bounds)):
                x, y, rw, rh = rect
                area = rw * rh
                # TODO: where does 64**2 come from?
                if area < 64*2:
                    continue
                motion_roi = motion_mask[y:y+rh, x:x+rw]
                if cv2.norm(motion_roi, cv2.NORM_L1) < 0.05 * area:
                    # eliminate small things
                    continue
                mgrad_orient_roi = mgrad_orient[y:y+rh, x:x+rw]
                mgrad_mask_roi = mgrad_mask[y:y+rh, x:x+rw]
                motion_hist_roi = motion_history[y:y+rh, x:x+rw]
                angle = cv2.calcGlobalOrientation(mgrad_orient_roi, mgrad_mask_roi, motion_hist_roi, timestamp, args.max_track_time)
                
                cv2.rectangle(display, (x, y), (x+rw, y+rh), GREEN)
                cv2.putText(display, "{:.1f}".format(angle), (x, y+rh), FONT, 1, GREEN)
                            
            cv2.imshow(WIN_NAME, display)
    
            prev_frame = frame
        
        key = cv2.waitKey(frame_interval)
        if key == 27:
            return
        elif key == 32:
            # toggle pause on space
            frame_interval = 0 if frame_interval !=0 else frame_interval_normal
        elif key >= 0:
            print "\nkey: {k}\n".format(k=key)
        
    print
    video.release()
Ejemplo n.º 21
0
    ret, frame = cam.read()
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:, :, 1] = 255
    while True:
        ret, frame = cam.read()
        frame_diff = cv2.absdiff(frame, prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
        thrs = 30  # cv2.getTrackbarPos('threshold', 'motempl')
        ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient(motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5)
        seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

        visual_name = visuals[cv2.getTrackbarPos('visual', 'motempl')]
        if visual_name == 'input':
            vis = frame.copy()
        elif visual_name == 'frame_diff':
            vis = frame_diff.copy()
        elif visual_name == 'motion_hist':
            vis = np.uint8(np.clip((motion_history - (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
            vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
        elif visual_name == 'grad_orient':
            hsv[:, :, 0] = mg_orient / 2
            hsv[:, :, 2] = mg_mask * 255
            vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

        for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
def video_feature_extraction_save(
    videoName,
    featureWriter,
    maskRegion,
    case,
    MIN_TIME_DELTA,
    MAX_TIME_DELTA,
    MHI_DURATION,
    THRESH_VALUE,
    DISPLAY=False,
):
    cv2.namedWindow("rat activity recognition")
    visuals = ["input", "frame_diff", "motion_hist", "grad_orient"]
    # use MHI features (motion history intensity)
    visual_name = visuals[2]

    cam = cv2.VideoCapture(videoName)
    video_len = cam.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)

    ret, frame = cam.read()

    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:, :, 1] = 255
    ii = 0
    while ii < video_len - 1:
        ii += 1
        ret, frame = cam.read()

        frame_diff = cv2.absdiff(frame, prev_frame)

        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY) * maskRegion

        ret, motion_mask = cv2.threshold(gray_diff, THRESH_VALUE, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient(motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5)
        seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

        if visual_name == "input":
            vis = frame.copy()
        elif visual_name == "frame_diff":
            vis = frame_diff.copy()
        elif visual_name == "motion_hist":
            vis0 = np.uint8(np.clip((motion_history - (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
            junk, mei0 = cv2.threshold(vis0, 1, 255, cv2.THRESH_BINARY)

        elif visual_name == "grad_orient":
            hsv[:, :, 0] = mg_orient / 2
            hsv[:, :, 2] = mg_mask * 255
            vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

        ## Compute features
        M1 = cv2.moments(mei0)
        M2 = cv2.moments(vis0)
        Hu1 = cv2.HuMoments(M1)
        Hu2 = cv2.HuMoments(M2)

        smallNum = [1e-200] * 7
        Hu1 = Hu1 + smallNum
        Hu2 = Hu2 + smallNum

        Hu1 = np.sign(Hu1) * np.log10(np.abs(Hu1))
        Hu2 = np.sign(Hu2) * np.log10(np.abs(Hu2))

        if M1["m00"] != 0:
            cx1 = M1["m10"] / M1["m00"]
            cy1 = M1["m01"] / M1["m00"]
        else:
            cx1 = 0
            cy1 = 0

        if M2["m00"] != 0:
            cx2 = M2["m10"] / M2["m00"]
            cy2 = M2["m01"] / M2["m00"]
        else:
            cx2 = 0
            cy2 = 0

        meiSize = np.count_nonzero(mei0)

        if meiSize == 0:
            corner1 = 0
            corner2 = 0
            corner3 = 0
            corner4 = 0
            height = 0
            width = 0
            extend = 0
        else:
            maskInd = np.nonzero(maskRegion)
            maskCx = np.mean(maskInd[0])
            maskCy = np.mean(maskInd[1])

            indices = np.nonzero(mei0)
            corner1 = max(indices[0]) - maskCx
            corner2 = min(indices[0]) - maskCx
            corner3 = max(indices[1]) - maskCy
            corner4 = min(indices[1]) - maskCy
            height = corner1 - corner2 + 1
            width = corner3 - corner4 + 1
            extend = meiSize / float(height * width)

        # features = [Hu1[0][0],Hu1[1][0],Hu1[2][0],Hu1[3][0],Hu1[4][0],Hu1[5][0],Hu1[6][0],
        #            Hu2[0][0],Hu2[1][0],Hu2[2][0],Hu2[3][0],Hu2[4][0],Hu2[5][0],Hu2[6][0],
        #            cx1, cy1, cx2, cy2, meiSize, corner1, corner2, corner3,corner4, height, width, extend, case]

        features = [
            Hu1[0][0],
            Hu1[1][0],
            Hu1[2][0],
            Hu1[3][0],
            Hu1[4][0],
            Hu1[5][0],
            Hu1[6][0],
            Hu2[0][0],
            Hu2[1][0],
            Hu2[2][0],
            Hu2[3][0],
            Hu2[4][0],
            Hu2[5][0],
            Hu2[6][0],
            cx1,
            cy1,
            cx2,
            cy2,
            corner1,
            corner2,
            corner3,
            corner4,
            meiSize,
            height / (width + 0.000001),
            height,
            width,
            extend,
            case,
        ]
        # zeroFeatures = [-200]*14
        # zeroFeatures = [0]*14
        # if case == 1:# Rest case
        #    featureWriter.writerow(features)
        # else:
        #    if features[0:len(features)-6] != zeroFeatures:
        #       featureWriter.writerow(features)
        featureWriter.writerow(features)

        prev_frame = frame.copy()

        if DISPLAY:
            # draw_str(vis, (20, 20), visual_name)
            vis = cv2.cvtColor(vis0, cv2.COLOR_GRAY2BGR)
            mei = cv2.cvtColor(mei0, cv2.COLOR_GRAY2BGR)
            cv2.imshow("MHI", vis)
            # cv2.imshow('MEI', mei)
            cv2.imshow("Video", frame)

            if 0xFF & cv2.waitKey(1) == 27:
                break

    cam.release()
    cv2.destroyAllWindows()