示例#1
0
def getHistoryDiff(image):
    # TODO: Implement the history gradient
    global lastImage
    global lastlastImage
    global imageCounter
    global motionHistory
    global historyMaxCount
    global imgROI
    image = cv2.cvtColor(image, cv.CV_BGR2GRAY)
    image = cv2.bitwise_and(image, imgROI)
    dimensions = image.shape
    height, width = dimensions
    ROI = [(0, 0), (width, height)]
    diffImg = motion.blur3Diff(lastlastImage, lastImage, image, "fakestamp", ROI, 24)
    imageCounter += imageCountIncrement
    # Update the motion history with the new diff
    cv2.updateMotionHistory(diffImg, motionHistory, imageCounter, historyMaxCount)
    # Save the current image as last image
    lastlastImage = lastImage
    lastImage = image
    # Scale the motion history image
    motionDiff = np.zeros((height, width), dtype=np.uint8)
    # Scale the image according to the age of the motions in the image.
    # motionHistory[x,y] = timestamp if diff[x,y] != 0
    #                    = *timestamp if *timestamp - t > dt
    #                    = 0 otherwise
    subValue = (float(imageCounter) - float(historyMaxCount))
    clippedDiff = np.clip((motionHistory - subValue) / float(historyMaxCount),
                          0, 1)
    scaledDiff = np.uint8(clippedDiff*255)
    return scaledDiff
示例#2
0
文件: main.py 项目: noir-neo/grusonii
    def get_motion_angle(diff):
        times_s = time.clock()
        cv2.updateMotionHistory(diff, hist_32, times_s, DURATION)
        hist_8, direction = cv2.calcMotionGradient(hist_32, 0.25, 0.05, apertureSize=5)
        angle = cv2.calcGlobalOrientation(direction, hist_8, hist_32, times_s, DURATION)

        return angle
示例#3
0
    def process_frame(self, frame):
        self.frame_count += 1
        frame = cv2.GaussianBlur(frame, (3, 3), -1)
        display = frame.copy()

        fgmask = self.fgbg.apply(frame)
        diff = cv2.absdiff(fgmask, self.prev_background)
        self.prev_background = fgmask

        diff = cv2.morphologyEx(diff, cv2.MORPH_CLOSE, self.se)
        diff = cv2.morphologyEx(diff, cv2.MORPH_OPEN, self.se)

        timestamp = float(self.frame_count) / self.fps
        cv2.updateMotionHistory(diff, self.motion_history, timestamp,
                                MHI_DURATION)
        mseg_mask, mseg_bounds = cv2.segmentMotion(self.motion_history,
                                                   timestamp, MAX_TIME_DELTA)

        mseg_bounds = filter_inside(mseg_bounds, self.screen_area)

        if len(mseg_bounds) > 0:
            self.track_human(frame, mseg_bounds)

        people, visible = self.calculator.get_visible()
        draw_detections(display, visible, 3)
        return display, people
示例#4
0
    def on_frame(self, frame):
        h, w = frame.shape[:2]
        qi = 0
        #print "on_frame %d x %d" % (h, w)
        frame_diff = cv2.absdiff(frame, self.prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
        ret, motion_mask = cv2.threshold(gray_diff, self._threshold, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, self.motion_history, timestamp, MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient(self.motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
        seg_mask, seg_bounds = cv2.segmentMotion(self.motion_history, timestamp, MAX_TIME_DELTA)

        centers = []
        rects = []
        draws = []
        for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
            x, y, rw, rh = rect
            area = rw*rh
            if area < 64**2:
                continue
            silh_roi   = motion_mask        [y:y+rh,x:x+rw]
            orient_roi = mg_orient          [y:y+rh,x:x+rw]
            mask_roi   = mg_mask            [y:y+rh,x:x+rw]
            mhi_roi    = self.motion_history[y:y+rh,x:x+rw]
            if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
                continue
            angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
            color = ((255, 0, 0), (0, 0, 255))[i == 0]
            if self._use_cv_gui:
                draws.append(lambda vis, rect=rect, angle=angle, color=color:
                                draw_motion_comp(vis, rect, angle, color))
            centers.append( (x+rw/2, y+rh/2) )
            rects.append(rect)

        self.tracker_group.update_trackers(centers, rects)

        #print 'Active trackers: %d' % len(trackers)
        #print 'Tracker score: %s' % ','.join(['%2d'%len(tracker.hits) for tracker in trackers])
        trackers = self.tracker_group.trackers
        cx, cy = None, None
        #print "#trackers = %d" % len(trackers)
        if len(trackers):
            first_tracker = trackers[0]
            cx, cy = center_after_median_threshold(frame, first_tracker.rect)
            cv2.circle(frame, (cx, cy), 5, (255, 255, 255), 3)
        print str(qi)*5; qi += 1
        print self._on_cx_cy
        self._on_cx_cy(cx, cy) # gives None's for no identified balloon
        print str(qi)*5; qi += 1

        if self._use_cv_gui:
            self.on_frame_cv_gui(frame, draws, (cx, cy))
        else:
            self.frame_vis(frame, draws, (cx, cy))

        #time.sleep(0.5)
        self.prev_frame = frame.copy()
        # TODO - print visualization onto image
        return frame
示例#5
0
    def update(self, frame):
        frame_diff = cv2.absdiff(frame, self.prev_frame)
        real_diff = frame - self.prev_frame
        self.real_diff = cv2.cvtColor(real_diff,  cv2.COLOR_BGR2GRAY)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
        thrs = 40 #cv2.getTrackbarPos('threshold', 'motempl')
        ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, self.motion_history, timestamp, MHI_DURATION)

        self.vis = np.uint8(np.clip((self.motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
        self.vis = cv2.cvtColor(self.vis, cv2.COLOR_GRAY2BGR)
        #self.process_motions()
        self.prev_frame = frame.copy()
示例#6
0
def main():
    import sys
    try:
        video_src = sys.argv[1]
    except:
        video_src = './images/example-%03d.jpg'

    cv2.namedWindow('motion-history')
    cv2.namedWindow('raw')
    cv2.moveWindow('raw', 200, 0)
    while True:
        cam = cv2.VideoCapture(video_src)
        ret, frame = cam.read()
        h, w = frame.shape[:2]
        prev_frame = frame.copy()
        motion_history = np.zeros((h, w), np.float32)
        timestamp = 0
        while True:
            ret, frame = cam.read()
            if not ret:
                break
            frame_diff = cv2.absdiff(frame, prev_frame)
            gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
            ret, fgmask = cv2.threshold(gray_diff, DEFAULT_THRESHOLD, 1,
                                        cv2.THRESH_BINARY)
            timestamp += 1

            # update motion history
            cv2.updateMotionHistory(fgmask, motion_history, timestamp,
                                    MHI_DURATION)

            # normalize motion history
            mh = np.uint8(
                np.clip(
                    (motion_history -
                     (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
            cv2.imshow('motempl', mh)
            cv2.imshow('raw', frame)

            prev_frame = frame.copy()
            if 0xFF & cv2.waitKey(5) == 27:
                break
    cv2.destroyAllWindows()

    pass
def computeMHI(directoryName):
    depthfiles = glob.glob(directoryName + '/' + '*.pgm');
    depthfiles = np.sort(depthfiles)
    frame = cv2.imread(depthfiles[0])
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:,:,1] = 255
    for i in range(len(depthfiles)-1):
        frame = cv2.imread(depthfiles[i+1])
        frame_diff = cv2.absdiff(frame, prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
        ret, motion_mask = cv2.threshold(gray_diff, 70, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, motion_history, timestamp, duration=0.5)
        prev_frame = frame.copy()
    return motion_history
def main():
  import sys
  try: video_src = sys.argv[1]
  except: video_src = './images/example-%03d.jpg'

  cv2.namedWindow('motion-history')
  cv2.namedWindow('raw')
  cv2.moveWindow('raw', 200, 0)
  while True:
    cam = cv2.VideoCapture(video_src)
    ret, frame = cam.read()
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    timestamp = 0
    while True:
      ret, frame = cam.read()
      if not ret:
        break
      frame_diff = cv2.absdiff(frame, prev_frame)
      gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
      ret, fgmask = cv2.threshold(gray_diff, DEFAULT_THRESHOLD, 1, cv2.THRESH_BINARY)
      timestamp += 1

      # update motion history
      cv2.updateMotionHistory(fgmask, motion_history, timestamp, MHI_DURATION)

      # normalize motion history
      mh = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
      cv2.imshow('motempl', mh)
      cv2.imshow('raw', frame)

      prev_frame = frame.copy()
      if 0xFF & cv2.waitKey(5) == 27:
        break
  cv2.destroyAllWindows()

  pass
示例#9
0
文件: motempl.py 项目: vfn/opencv
    cam = video.create_capture(video_src, fallback='synth:class=chess:bg=../cpp/lena.jpg:noise=0.01')
    ret, frame = cam.read()
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:,:,1] = 255
    while True:
        ret, frame = cam.read()
        frame_diff = cv2.absdiff(frame, prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
        thrs = cv2.getTrackbarPos('threshold', 'motempl')
        ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient( motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
        seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

        visual_name = visuals[cv2.getTrackbarPos('visual', 'motempl')]
        if visual_name == 'input':
            vis = frame.copy()
        elif visual_name == 'frame_diff':
            vis = frame_diff.copy()
        elif visual_name == 'motion_hist':
            vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
        elif visual_name == 'grad_orient':
            hsv[:,:,0] = mg_orient/2
            hsv[:,:,2] = mg_mask*255
            vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
示例#10
0
def main(args):
    if args.in_file:
        try: 
            inf = int(args.in_file)
        except ValueError: 
            inf = args.in_file
    else: 
        inf = 0
        
    cam = cv2.VideoCapture(inf)
    imgHeight = int(cam.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
    imgWidth = int(cam.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
    comb_vis = np.zeros((imgHeight, 2*imgWidth), np.uint8)
    
    setup(imgWidth, imgHeight)

    motion_history = np.zeros((imgHeight, imgWidth), np.float32)
    
    s = Splatter((imgHeight, imgWidth), 'splat_db')
    
    pause_time = 0
    start_time = time.time()
    ret, t0 = cam.read() # First two frames
    ret, t = cam.read()
    if args.flip:
        t0 = cv2.flip(t0, flipCode=1)
        t = cv2.flip(t, flipCode=1)

    if args.out_file:
        h, w = t.shape[:2]
        if not args.dont_show_capture: w *= 2
        video = cv2.VideoWriter(args.out_file, cv2.cv.CV_FOURCC('X','V','I','D'), 10, (w, h))
        # video = cv2.VideoWriter(args.out_file, -1, 10, (w, h))

    while ret:
        thresh = cv2.getTrackbarPos(ThreshTrackbar, controlTrackbars)
        mhi_duration = cv2.getTrackbarPos(MhiDurationTrackbar, controlTrackbars) / 10.0
        max_time_delta = cv2.getTrackbarPos(MaxTimeDeltaTrackbar, controlTrackbars) / 10.0
        sqrt_rect_area = cv2.getTrackbarPos(MinSqrtRectAreaTrackbar, controlTrackbars)
        
        diff = cv2.absdiff(t, t0)
        gray_diff = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
        gray_diff = cv2.medianBlur(gray_diff, 5)
        ret, mask = cv2.threshold(gray_diff, thresh, 255, cv2.THRESH_BINARY)
        vis = s.splat_mask(mask) | s.splat_mask(~mask)
        
        timestamp = time.clock()
        cv2.updateMotionHistory(mask, motion_history, timestamp, mhi_duration)
        seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, max_time_delta)
        
        rects = [rect for rect in seg_bounds if rect[2] * rect[3] > sqrt_rect_area ** 2]
        for rect in rects:
            x, y, w, h = rect
            cv2.rectangle(t, (x, y), (x+w, y+h), (0, 255, 0))
        vis |= perturber.c_p_p(mask, rects)
        
        if not args.dont_show_capture:
            vis = utils.combine_images(vis, t)
        
        cv2.imshow(winName, vis)
        if args.out_file:
            video.write(vis if len(vis.shape) == 3 else cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR))
        t0 = t
        ret, t = cam.read()
        if args.flip:
            t = cv2.flip(t, flipCode=1)
        key = cv2.waitKey(40)
        if key == ord('q'):
            cv2.destroyAllWindows()
            break
        elif key == ord('s'):
            if cv2.waitKey() == ord('p'):
                cv2.imwrite('res.png', mask)
    
    cam.release()

    if args.out_file:
        video.release()
示例#11
0
def main(argv=None):
    if argv is None:
        argv = sys.argv[1:]
    
    parser = argparse.ArgumentParser(description="Track moving objects in a video stream")
    
    parser.add_argument("-f", "--file", 
                        help="use given file")
    parser.add_argument("-p", "--play-only", action="store_true",
                        help="playback only. Don't do any recognition. Useful for sanity checking files or installation")
    parser.add_argument("--motion-threshold", type=int, default=32,
                        help="threshold for motion. (difference in grey values between frames)")
    parser.add_argument("--max-track-time", type=float, default=0.5,
                        help="maximum time for a motion track")
    
    args = parser.parse_args(argv)
    
    source = args.file
    
    if source is None:
        print "No video source given!"
        return
        
    video = cv2.VideoCapture()
    video.open(source)
    
    if not video.isOpened():
        print "Video not open"
        return
        
    width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = video.get(cv2.CAP_PROP_FPS)
    
    print "opened {w}x{h} video @ {f}fps".format(w=width,h=height,f=fps)
    
    HISTORY_NAME = "motion history"
    MASK_NAME = "motion mask"
    ORIENTATION_NAME = "orientation"
    
    make_nth_named_window(WIN_NAME, height)
    
    if not args.play_only:
        make_nth_named_window(HISTORY_NAME, height, 1)
        # make_nth_named_window(MASK_NAME, height, 2)
        # make_nth_named_window(ORIENTATION_NAME, height, 3)
        
        motion_history = np.zeros((height, width), np.float32)
        
    prev_frame = None
    
    frame_count = 0
    frame_interval_normal = int(1000.0/fps)
    frame_interval = frame_interval_normal
    while video.grab():
        got_frame, frame = video.retrieve()
        
        if not got_frame:
            print "frame miss"
            continue
        
        frame_count += 1
        print "frame: {c}   \r".format(c=frame_count), 
        sys.stdout.flush()
        
        display = frame.copy()
        
        timestamp = float(frame_count) / fps
        
        if not args.play_only:
            if prev_frame is None:
                prev_frame = frame.copy()
                
            frame_diff = cv2.absdiff(frame, prev_frame)
            gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
            ret, motion_mask = cv2.threshold(gray_diff, args.motion_threshold, 1, cv2.THRESH_BINARY)
            # cv2.imshow(MASK_NAME, motion_mask)
            
            cv2.updateMotionHistory(motion_mask, motion_history, timestamp, args.max_track_time)
            cv2.imshow(HISTORY_NAME, motion_history)
            
            mgrad_mask, mgrad_orient = cv2.calcMotionGradient( motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
            mseg_mask, mseg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)
            
            # cv2.imshow(ORIENTATION_NAME, mgrad_orient)
            # if frame_interval == 0:
            #     import pdb; pdb.set_trace()
            
            for i, rect in enumerate([(0, 0, width, height)] + list(mseg_bounds)):
                x, y, rw, rh = rect
                area = rw * rh
                # TODO: where does 64**2 come from?
                if area < 64*2:
                    continue
                motion_roi = motion_mask[y:y+rh, x:x+rw]
                if cv2.norm(motion_roi, cv2.NORM_L1) < 0.05 * area:
                    # eliminate small things
                    continue
                mgrad_orient_roi = mgrad_orient[y:y+rh, x:x+rw]
                mgrad_mask_roi = mgrad_mask[y:y+rh, x:x+rw]
                motion_hist_roi = motion_history[y:y+rh, x:x+rw]
                angle = cv2.calcGlobalOrientation(mgrad_orient_roi, mgrad_mask_roi, motion_hist_roi, timestamp, args.max_track_time)
                
                cv2.rectangle(display, (x, y), (x+rw, y+rh), GREEN)
                cv2.putText(display, "{:.1f}".format(angle), (x, y+rh), FONT, 1, GREEN)
                            
            cv2.imshow(WIN_NAME, display)
    
            prev_frame = frame
        
        key = cv2.waitKey(frame_interval)
        if key == 27:
            return
        elif key == 32:
            # toggle pause on space
            frame_interval = 0 if frame_interval !=0 else frame_interval_normal
        elif key >= 0:
            print "\nkey: {k}\n".format(k=key)
        
    print
    video.release()
    def process_image(self, cv_image):
        if self.motion_history == None:
            self.h, self.w = cv_image.shape[:2]
            self.prev_frame = cv_image.copy()
            self.motion_history = np.zeros((self.h, self.w), np.float32)
            self.hsv = np.zeros((self.h, self.w, 3), np.uint8)
            self.hsv[:,:,1] = 255
            self.erode_kernel = cv2.getStructuringElement(cv2.MORPH_ERODE,(3,3))
        
        color_frame = cv_image.copy()
        frame_diff = cv2.absdiff(color_frame, self.prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
        
        thresh = cv2.getTrackbarPos('threshold', self.node_name)
        
        ret, motion_mask = cv2.threshold(gray_diff, thresh, 1, cv2.THRESH_BINARY)
        
        motion_mask = cv2.erode(motion_mask, self.erode_kernel, iterations=2)
        motion_mask = cv2.dilate(motion_mask, self.erode_kernel, iterations=2)
                
        timestamp = clock()
        
        cv2.updateMotionHistory(motion_mask, self.motion_history, timestamp, MHI_DURATION)

        mg_mask, mg_orient = cv2.calcMotionGradient(self.motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
        seg_mask, seg_bounds = cv2.segmentMotion(self.motion_history, timestamp, MAX_TIME_DELTA)

        visual_name = self.visuals[cv2.getTrackbarPos('visual', self.node_name)]
        if visual_name == 'input':
            vis = cv_image.copy()
        elif visual_name == 'frame_diff':
            vis = frame_diff.copy()
        elif visual_name == 'motion_hist':
            vis = np.uint8(np.clip((self.motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
        elif visual_name == 'motion_hist_color':
            vis = np.uint8(np.clip((self.motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
        elif visual_name == 'grad_orient':
            self.hsv[:,:,0] = mg_orient/2
            self.hsv[:,:,2] = mg_mask*255
            vis = cv2.cvtColor(self.hsv, cv2.COLOR_HSV2BGR)

        max_rect_area = 0
        
        for i, rect in enumerate([(0, 0, self.w, self.h)] + list(seg_bounds)):
            x, y, rw, rh = rect
            area = rw*rh
            if area < 64**2:
                continue
            if area < 640*480 and area > max_rect_area:
                max_rect_area = area
                max_rect = rect
            silh_roi   = motion_mask   [y:y+rh,x:x+rw]
            orient_roi = mg_orient     [y:y+rh,x:x+rw]
            mask_roi   = mg_mask       [y:y+rh,x:x+rw]
            mhi_roi    = self.motion_history[y:y+rh,x:x+rw]
            if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
                continue
            angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
            color = ((255, 0, 0), (0, 0, 255))[i == 0]
            #draw_motion_comp(vis, rect, angle, color)

        #draw_str(vis, (20, 20), visual_name)

        display_image = cv_image.copy()


        if max_rect_area != 0:
            x, y, w, h = max_rect
            display = color_frame[y:y+h,x:x+w] 
#            #bounding_box = cv2.boundingRect(vis)
#            #print bounding_box
#        
            if visual_name == 'motion_hist':
                display = vis.copy()
            else:
                display = cv2.bitwise_and(color_frame, vis, vis)

            draw_str(vis, (20, 20), visual_name)
            
            contour_image = vis.copy()
            
            contours, hierarchy = cv2.findContours(contour_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            
            #ty:
            contour_points = list()
            
            if len(contours) != 0:
                for cnt in contours:
                    contour_points.append(cnt)
                
                vstack_points = np.vstack(contour_points)
                if len(vstack_points) > 5:
                    z_ellipse = cv2.fitEllipse(vstack_points)
                    cv2.ellipse(display_image, z_ellipse, (0,255,0), 2)
        
                cv2.drawContours(display_image, contours, -1, (0,255,0), 3)
            
            cv2.imshow("Contours", display_image)

        self.prev_frame = color_frame
        
        #cv2.waitKey(5)
                
        return cv_image
def processTrainingFiles(oper="extractTrainingVocabulary", fileType="train"):
    global saveTrainFeaturesDes
    cv2.namedWindow("Output")
    visuals = ["input", "frame_diff", "motion_hist", "grad_orient"]
    cv2.createTrackbar("visual", "Output", 2, len(visuals) - 1, nothing)
    cv2.createTrackbar("threshold", "Output", DEFAULT_THRESHOLD, 255, nothing)

    cv2.namedWindow("Output7")
    # visuals = ['input', 'frame_diff', 'motion_hist', 'grad_orient']
    cv2.createTrackbar("visual", "Output7", 2, len(visuals) - 1, nothing)
    cv2.createTrackbar("threshold", "Output7", DEFAULT_THRESHOLD, 255, nothing)

    cv2.namedWindow("Output14")
    # visuals = ['input', 'frame_diff', 'motion_hist', 'grad_orient']
    cv2.createTrackbar("visual", "Output14", 2, len(visuals) - 1, nothing)
    cv2.createTrackbar("threshold", "Output14", DEFAULT_THRESHOLD, 255, nothing)

    printPageBreak()
    print ("\n\nProcessing " + fileType.capitalize() + "ing Files")
    trainTestFiles = groupedTrainFiles
    if fileType == "train":
        trainTestFiles = groupedTrainFiles
    else:
        trainTestFiles = groupedTestFiles

    for label, fileList in trainTestFiles.iteritems():
        global trainData, testData, trainLabels, testLabels
        for file in fileList:
            frameCounter = 0
            print (label, file)
            cam = cv2.VideoCapture(file)
            # cam = video.create_capture(file,fallback='synth:class=chess:bg=lena.jpg:noise=0.01')
            # cam = video.create_capture(file, fallback='synth:class=chess:bg=lena.jpg:noise=0.01')
            ret, frame = cam.read()
            h, w = frame.shape[:2]
            prev_frame = frame.copy()
            motion_history = np.zeros((h, w), np.float32)
            # print(motion_history)
            hsv = np.zeros((h, w, 3), np.uint8)
            hsv[:, :, 1] = 255

            prev_frame7 = frame.copy()
            motion_history7 = np.zeros((h, w), np.float32)
            # print(motion_history7)
            hsv7 = np.zeros((h, w, 3), np.uint8)
            hsv7[:, :, 1] = 255

            prev_frame14 = frame.copy()
            motion_history14 = np.zeros((h, w), np.float32)
            # print(motion_history)
            hsv14 = np.zeros((h, w, 3), np.uint8)
            hsv14[:, :, 1] = 255

            totalFrames = int(cam.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
            frameCounter = frameCounter + 1
            print (totalFrames)

            subFrameCount = 0
            skipFrames = 1

            subFrameCount7 = 0
            skipFrames7 = 7

            subFrameCount14 = 0
            skipFrames14 = 14

            featureDesSAHMIS = None
            bowDescriptorSAHMIS = None

            while frameCounter < totalFrames:
                ret, frame = cam.read()

                """
                #################################################################################SAMHI-1
                """
                if subFrameCount == 0:
                    frame_diff = cv2.absdiff(frame, prev_frame)
                    gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
                    # Remove the noise and do the threshold
                    # gray_diff = cv2.morphologyEx(gray_diff, cv2.KERNEL_SMOOTH, kernel)
                    # gray_diff = cv2.dilate(gray_diff, kernel, iterations=1)
                    # gray_diff = cv2.erode(gray_diff, kernel, iterations=1)
                    # # gray_diff = cv2.morphologyEx(gray_diff, cv2.MORPH_OPEN, kernel)
                    # # gray_diff = cv2.morphologyEx(gray_diff, cv2.MORPH_CLOSE, kernel)
                    # gray_diff = cv2.erode(gray_diff, kernel, iterations=1)
                    # gray_diff = cv2.dilate(gray_diff, kernel, iterations=1)
                    # cv2.cv.Smooth(gray_diff, gray_diff, cv2.cv.CV_BLUR, 5,5)
                    # cv2.cv.MorphologyEx(gray_diff, gray_diff, None, None, cv2.cv.CV_MOP_OPEN)
                    # cv2.cv.MorphologyEx(gray_diff, gray_diff, None, None, cv2.cv.CV_MOP_CLOSE)
                    # cv2.cv.Threshold(gray_diff, gray_diff, 10, 255, cv2.cv.CV_THRESH_BINARY_INV)
                    #
                    # mhi_vis = cv2.dilate(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_OPEN, kernel)
                    # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_CLOSE, kernel)
                    # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.dilate(mhi_vis, kernel, iterations=2)
                    thrs = cv2.getTrackbarPos("threshold", "Output")
                    ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
                    timestamp = clock()
                    cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)

                    mg_mask, mg_orient = cv2.calcMotionGradient(
                        motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5
                    )
                    seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

                    visual_name = visuals[cv2.getTrackbarPos("visual", "Output")]
                    if visual_name == "input":
                        vis = frame.copy()
                    elif visual_name == "frame_diff":
                        vis = frame_diff.copy()
                    elif visual_name == "motion_hist":
                        vis = np.uint8(
                            np.clip((motion_history - (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255
                        )
                        vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
                    elif visual_name == "grad_orient":
                        hsv[:, :, 0] = mg_orient / 2
                        hsv[:, :, 2] = mg_mask * 255
                        vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

                    for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
                        x, y, rw, rh = rect
                        area = rw * rh
                        if area < 64 ** 2:
                            continue
                        silh_roi = motion_mask[y : y + rh, x : x + rw]
                        orient_roi = mg_orient[y : y + rh, x : x + rw]
                        mask_roi = mg_mask[y : y + rh, x : x + rw]
                        mhi_roi = motion_history[y : y + rh, x : x + rw]
                        if cv2.norm(silh_roi, cv2.NORM_L1) < area * 0.05:
                            continue
                        angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
                        color = ((255, 0, 0), (0, 0, 255))[i == 0]
                        draw_motion_comp(vis, rect, angle, color)

                    visCopy = vis.copy()
                    draw_str(visCopy, (20, 20), visual_name)
                    cv2.imshow("Output", visCopy)

                    prev_frame = frame.copy()
                    if 0xFF & cv2.waitKey(5) == 27:
                        break
                    cv2.waitKey(25)
                """
                #################################################################################SAMHI-7
                """
                if subFrameCount7 == 0:
                    frame_diff7 = cv2.absdiff(frame, prev_frame7)
                    gray_diff7 = cv2.cvtColor(frame_diff7, cv2.COLOR_BGR2GRAY)
                    # Remove the noise and do the threshold
                    # gray_diff = cv2.morphologyEx(gray_diff, cv2.KERNEL_SMOOTH, kernel)
                    # gray_diff = cv2.dilate(gray_diff, kernel, iterations=1)
                    # gray_diff = cv2.erode(gray_diff, kernel, iterations=1)
                    # # gray_diff = cv2.morphologyEx(gray_diff, cv2.MORPH_OPEN, kernel)
                    # # gray_diff = cv2.morphologyEx(gray_diff, cv2.MORPH_CLOSE, kernel)
                    # gray_diff = cv2.erode(gray_diff, kernel, iterations=1)
                    # gray_diff = cv2.dilate(gray_diff, kernel, iterations=1)
                    # cv2.cv.Smooth(gray_diff, gray_diff, cv2.cv.CV_BLUR, 5,5)
                    # cv2.cv.MorphologyEx(gray_diff, gray_diff, None, None, cv2.cv.CV_MOP_OPEN)
                    # cv2.cv.MorphologyEx(gray_diff, gray_diff, None, None, cv2.cv.CV_MOP_CLOSE)
                    # cv2.cv.Threshold(gray_diff, gray_diff, 10, 255, cv2.cv.CV_THRESH_BINARY_INV)
                    #
                    # mhi_vis = cv2.dilate(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_OPEN, kernel)
                    # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_CLOSE, kernel)
                    # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.dilate(mhi_vis, kernel, iterations=2)
                    thrs = cv2.getTrackbarPos("threshold", "Output")
                    ret7, motion_mask7 = cv2.threshold(gray_diff7, thrs, 1, cv2.THRESH_BINARY)
                    timestamp7 = clock()
                    cv2.updateMotionHistory(motion_mask7, motion_history7, timestamp7, MHI_DURATION)

                    mg_mask7, mg_orient7 = cv2.calcMotionGradient(
                        motion_history7, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5
                    )
                    seg_mask7, seg_bounds7 = cv2.segmentMotion(motion_history7, timestamp7, MAX_TIME_DELTA)

                    visual_name7 = visuals[cv2.getTrackbarPos("visual", "Output7")]
                    if visual_name7 == "input":
                        vis7 = frame.copy()
                    elif visual_name == "frame_diff":
                        vis7 = frame_diff7.copy()
                    elif visual_name == "motion_hist":
                        vis7 = np.uint8(
                            np.clip((motion_history7 - (timestamp7 - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255
                        )
                        vis7 = cv2.cvtColor(vis7, cv2.COLOR_GRAY2BGR)
                    elif visual_name == "grad_orient":
                        hsv7[:, :, 0] = mg_orient7 / 2
                        hsv7[:, :, 2] = mg_mask7 * 255
                        vis7 = cv2.cvtColor(hsv7, cv2.COLOR_HSV2BGR)

                    for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds7)):
                        x, y, rw, rh = rect
                        area = rw * rh
                        if area < 64 ** 2:
                            continue
                        silh_roi7 = motion_mask7[y : y + rh, x : x + rw]
                        orient_roi7 = mg_orient7[y : y + rh, x : x + rw]
                        mask_roi7 = mg_mask7[y : y + rh, x : x + rw]
                        mhi_roi7 = motion_history7[y : y + rh, x : x + rw]
                        if cv2.norm(silh_roi7, cv2.NORM_L1) < area * 0.05:
                            continue
                        angle7 = cv2.calcGlobalOrientation(orient_roi7, mask_roi7, mhi_roi7, timestamp7, MHI_DURATION)
                        color7 = ((255, 0, 0), (0, 0, 255))[i == 0]
                        draw_motion_comp(vis7, rect, angle7, color7)

                    visCopy7 = vis7.copy()
                    draw_str(visCopy7, (20, 20), visual_name)
                    cv2.imshow("Output7", visCopy7)

                    prev_frame7 = frame.copy()
                    if 0xFF & cv2.waitKey(5) == 27:
                        break
                    cv2.waitKey(25)

                """
                #################################################################################SAMHI-14
                """
                if subFrameCount14 == 0:
                    frame_diff14 = cv2.absdiff(frame, prev_frame)
                    gray_diff14 = cv2.cvtColor(frame_diff14, cv2.COLOR_BGR2GRAY)
                    # Remove the noise and do the threshold
                    # gray_diff = cv2.morphologyEx(gray_diff, cv2.KERNEL_SMOOTH, kernel)
                    # gray_diff = cv2.dilate(gray_diff, kernel, iterations=1)
                    # gray_diff = cv2.erode(gray_diff, kernel, iterations=1)
                    # # gray_diff = cv2.morphologyEx(gray_diff, cv2.MORPH_OPEN, kernel)
                    # # gray_diff = cv2.morphologyEx(gray_diff, cv2.MORPH_CLOSE, kernel)
                    # gray_diff = cv2.erode(gray_diff, kernel, iterations=1)
                    # gray_diff = cv2.dilate(gray_diff, kernel, iterations=1)
                    # cv2.cv.Smooth(gray_diff, gray_diff, cv2.cv.CV_BLUR, 5,5)
                    # cv2.cv.MorphologyEx(gray_diff, gray_diff, None, None, cv2.cv.CV_MOP_OPEN)
                    # cv2.cv.MorphologyEx(gray_diff, gray_diff, None, None, cv2.cv.CV_MOP_CLOSE)
                    # cv2.cv.Threshold(gray_diff, gray_diff, 10, 255, cv2.cv.CV_THRESH_BINARY_INV)
                    #
                    # mhi_vis = cv2.dilate(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_OPEN, kernel)
                    # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_CLOSE, kernel)
                    # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.dilate(mhi_vis, kernel, iterations=2)
                    thrs = cv2.getTrackbarPos("threshold", "Output14")
                    ret14, motion_mask14 = cv2.threshold(gray_diff14, thrs, 1, cv2.THRESH_BINARY)
                    timestamp14 = clock()
                    cv2.updateMotionHistory(motion_mask14, motion_history14, timestamp14, MHI_DURATION)

                    mg_mask14, mg_orient14 = cv2.calcMotionGradient(
                        motion_history14, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5
                    )
                    seg_mask14, seg_bounds14 = cv2.segmentMotion(motion_history14, timestamp14, MAX_TIME_DELTA)

                    visual_name = visuals[cv2.getTrackbarPos("visual", "Output14")]
                    if visual_name == "input":
                        vis14 = frame.copy()
                    elif visual_name == "frame_diff":
                        vis14 = frame_diff14.copy()
                    elif visual_name == "motion_hist":
                        vis14 = np.uint8(
                            np.clip((motion_history14 - (timestamp14 - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255
                        )
                        vis14 = cv2.cvtColor(vis14, cv2.COLOR_GRAY2BGR)
                    elif visual_name == "grad_orient":
                        hsv14[:, :, 0] = mg_orient14 / 2
                        hsv14[:, :, 2] = mg_mask14 * 255
                        vis14 = cv2.cvtColor(hsv14, cv2.COLOR_HSV2BGR)

                    for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds14)):
                        x, y, rw, rh = rect
                        area = rw * rh
                        if area < 64 ** 2:
                            continue
                        silh_roi14 = motion_mask14[y : y + rh, x : x + rw]
                        orient_roi14 = mg_orient14[y : y + rh, x : x + rw]
                        mask_roi14 = mg_mask14[y : y + rh, x : x + rw]
                        mhi_roi14 = motion_history14[y : y + rh, x : x + rw]
                        if cv2.norm(silh_roi14, cv2.NORM_L1) < area * 0.05:
                            continue
                        angle14 = cv2.calcGlobalOrientation(
                            orient_roi14, mask_roi14, mhi_roi14, timestamp14, MHI_DURATION
                        )
                        color14 = ((255, 0, 0), (0, 0, 255))[i == 0]
                        draw_motion_comp(vis14, rect, angle14, color14)

                    visCopy14 = vis14.copy()
                    draw_str(visCopy14, (20, 20), visual_name)
                    cv2.imshow("Output14", visCopy14)

                    prev_frame14 = frame.copy()
                    if 0xFF & cv2.waitKey(5) == 27:
                        break
                    cv2.waitKey(25)

                subFrameCount = subFrameCount + 1
                subFrameCount7 = subFrameCount7 + 1
                subFrameCount14 = subFrameCount14 + 1

                if subFrameCount > skipFrames:
                    subFrameCount = 0

                if subFrameCount7 > skipFrames7:
                    subFrameCount7 = 0

                if subFrameCount14 > skipFrames14:
                    subFrameCount14 = 0

                frameCounter = frameCounter + 1
            with open("mhiInfo", "a+") as mhiFile:
                mhiFile.write(
                    "\n======================================================================================================\n"
                )
                for row in motion_history:
                    # print(row)
                    mhiFile.write(" ".join(str(x) for x in row) + "\n")

            if visual_name == "motion_hist":
                mhi_vis = np.uint8(np.clip((motion_history - (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
                mhi_vis = cv2.cvtColor(mhi_vis, cv2.COLOR_GRAY2BGR)

                mhi_vis7 = np.uint8(np.clip((motion_history7 - (timestamp7 - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
                mhi_vis7 = cv2.cvtColor(mhi_vis7, cv2.COLOR_GRAY2BGR)

                mhi_vis14 = np.uint8(
                    np.clip((motion_history14 - (timestamp14 - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255
                )
                mhi_vis14 = cv2.cvtColor(mhi_vis14, cv2.COLOR_GRAY2BGR)
            else:
                hsv[:, :, 0] = mg_orient / 2
                hsv[:, :, 2] = mg_mask * 255
                mhi_vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

                hsv7[:, :, 0] = mg_orient7 / 2
                hsv7[:, :, 2] = mg_mask7 * 255
                mhi_vis7 = cv2.cvtColor(hsv7, cv2.COLOR_HSV2BGR)

                hsv14[:, :, 0] = mg_orient14 / 2
                hsv14[:, :, 2] = mg_mask14 * 255
                mhi_vis14 = cv2.cvtColor(hsv14, cv2.COLOR_HSV2BGR)

            # Remove the noise and do the threshold
            # cv2.cv.Smooth(mhi_vis, mhi_vis, cv2.cv.CV_BLUR, 5,5)
            # cv2.cv.MorphologyEx(mhi_vis, mhi_vis, None, None, cv2.cv.CV_MOP_OPEN)
            # cv2.cv.MorphologyEx(mhi_vis, mhi_vis, None, None, cv2.cv.CV_MOP_CLOSE)
            # cv2.cv.Threshold(mhi_vis, mhi_vis, 10, 255, cv2.cv.CV_THRESH_BINARY_INV)
            # #
            # mhi_vis = cv2.(mhi_vis, kernel, iterations=2)
            # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
            # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_OPEN, kernel)
            # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_CLOSE, kernel)
            # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
            # mhi_vis = cv2.dilate(mhi_vis, kernel, iterations=2)

            ##Start extracting features
            sift = cv2.SIFT()
            denseDetector = cv2.FeatureDetector_create(sDetector)  ##using Dense Feature Detector

            kp = detector.detect(mhi_vis)

            kp7 = detector.detect(mhi_vis7)

            kp14 = detector.detect(mhi_vis14)

            # kp2, des2 = sift.compute(mhi_vis,kp)
            # img=cv2.drawKeypoints(mhi_vis,kp2)

            print ("KeyPoints Length:: ", len(kp))

            hasAtleastOneKP = False

            ##Check if there are any detected keypoints before processing.
            if len(kp) > 0:
                hasAtleastOneKP = True
                features = extractor.compute(mhi_vis, kp)
                featuresDes = features[1]
                # print('Descriptors:: ',featuresDes)
                print ("Descriptors Length:: ", len(featuresDes))
                print ("Descriptors Shape:: ", featuresDes.shape)

                if featureDesSAHMIS is None:
                    featureDesSAHMIS = featuresDes
                else:
                    featureDesSAHMIS = np.append(featureDesSAHMIS, featuresDes, axis=0)

                if oper == "extractTrainingVocabulary":
                    bowTrainer.add(featureDesSAHMIS)
                    saveTrainFeaturesDes.append(featureDesSAHMIS)
                    cv2.imwrite("mhiImages\\" + file.split("\\")[2] + "_mhi.jpg", mhi_vis)
                elif oper == "extractBOWDescriptor":
                    bowDescriptor = bowDE.compute(mhi_vis, kp)
                    # descriptors.push_back(bowDescriptor);
                    # print('bowDescriptor:: ',bowDescriptor)
                    print ("bowDescriptor Length:: ", len(bowDescriptor))
                    print ("bowDescriptor Shape:: ", bowDescriptor.shape)
                    if fileType == "train":
                        img = cv2.drawKeypoints(mhi_vis, kp)
                        cv2.imwrite("keyPoints\\" + file.split("\\")[2] + "_keypoints.jpg", img)
                    if bowDescriptorSAHMIS is None:
                        bowDescriptorSAHMIS = bowDescriptor
                    else:
                        # bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor)
                        bowDescriptorSAHMIS = np.sum([bowDescriptorSAHMIS, bowDescriptor], axis=0)
            else:
                featuresDes = np.zeros((1, featureSize), np.float32)
                if featureDesSAHMIS is None:
                    featureDesSAHMIS = featuresDes
                else:
                    featureDesSAHMIS = np.append(featureDesSAHMIS, featuresDes, axis=0)
                bowDescriptor = np.zeros((1, 1000), np.float32)
                if bowDescriptorSAHMIS is None:
                    bowDescriptorSAHMIS = bowDescriptor
                else:
                    # bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor)
                    bowDescriptorSAHMIS = np.sum([bowDescriptorSAHMIS, bowDescriptor], axis=0)
                print ("No SAMHI-1 Key points were detectected for this image..")

            ##Check if there are any detected keypoints before processing.
            if len(kp7) > 0:
                hasAtleastOneKP = True
                features7 = extractor.compute(mhi_vis7, kp7)
                featuresDes7 = features7[1]
                # print('Descriptors7:: ',featuresDes7)
                print ("Descriptors7 Length:: ", len(featuresDes7))
                print ("Descriptors7 Shape:: ", featuresDes7.shape)

                if featureDesSAHMIS is None:
                    featureDesSAHMIS = featuresDes7
                else:
                    featureDesSAHMIS = np.append(featureDesSAHMIS, featuresDes7, axis=0)

                if oper == "extractTrainingVocabulary":
                    cv2.imwrite("mhiImages\\" + file.split("\\")[2] + "_mhi7.jpg", mhi_vis7)
                elif oper == "extractBOWDescriptor":
                    bowDescriptor7 = bowDE.compute(mhi_vis7, kp7)
                    # descriptors.push_back(bowDescriptor);
                    # print('bowDescriptor7:: ',bowDescriptor7)
                    print ("bowDescriptor7 Length:: ", len(bowDescriptor7))
                    print ("bowDescriptor7 Shape:: ", bowDescriptor7.shape)
                    if fileType == "train":
                        img7 = cv2.drawKeypoints(mhi_vis7, kp7)
                        cv2.imwrite("keyPoints\\" + file.split("\\")[2] + "_keypoints_7.jpg", img7)
                    if bowDescriptorSAHMIS is None:
                        bowDescriptorSAHMIS = bowDescriptor7
                    else:
                        # print("bowDescriptor7=> ",bowDescriptor7)
                        # bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor7)
                        bowDescriptorSAHMIS = np.sum([bowDescriptorSAHMIS, bowDescriptor7], axis=0)
            else:
                featuresDes7 = np.zeros((1, featureSize), np.float32)
                if featureDesSAHMIS is None:
                    featureDesSAHMIS = featuresDes7
                else:
                    featureDesSAHMIS = np.append(featureDesSAHMIS, featuresDes7, axis=0)
                bowDescriptor7 = np.zeros((1, 1000), np.float32)
                if bowDescriptorSAHMIS is None:
                    bowDescriptorSAHMIS = bowDescriptor7
                else:
                    # bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor7)
                    bowDescriptorSAHMIS = np.sum([bowDescriptorSAHMIS, bowDescriptor7], axis=0)
                print ("No SAMHI-7 Key points were detectected for this image..")

            ##Check if there are any detected keypoints before processing.
            if len(kp14) > 0:
                hasAtleastOneKP = True
                features14 = extractor.compute(mhi_vis14, kp14)
                featuresDes14 = features14[1]
                # print('Descriptor14:: ',featuresDes14)
                print ("Descriptors14 Length:: ", len(featuresDes14))
                print ("Descriptors14 Shape:: ", featuresDes14.shape)

                if featureDesSAHMIS is None:
                    featureDesSAHMIS = featuresDes14
                else:
                    featureDesSAHMIS = np.append(featureDesSAHMIS, featuresDes14, axis=0)

                if oper == "extractTrainingVocabulary":
                    cv2.imwrite("mhiImages\\" + file.split("\\")[2] + "_mhi14.jpg", mhi_vis14)
                elif oper == "extractBOWDescriptor":
                    bowDescriptor14 = bowDE.compute(mhi_vis14, kp14)
                    # descriptors.push_back(bowDescriptor);
                    # print('bowDescriptor14:: ',bowDescriptor14)
                    print ("bowDescriptor14 Length:: ", len(bowDescriptor14))
                    print ("bowDescriptor14 Shape:: ", bowDescriptor7.shape)
                    if fileType == "train":
                        img14 = cv2.drawKeypoints(mhi_vis14, kp14)
                        cv2.imwrite("keyPoints\\" + file.split("\\")[2] + "_keypoints_14.jpg", img14)

                    if bowDescriptorSAHMIS is None:
                        bowDescriptorSAHMIS = bowDescriptor14
                    else:
                        # bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor14)
                        bowDescriptorSAHMIS = np.sum([bowDescriptorSAHMIS, bowDescriptor14], axis=0)
            else:
                featuresDes14 = np.zeros((1, featureSize), np.float32)
                if featureDesSAHMIS is None:
                    featureDesSAHMIS = featuresDes14
                else:
                    featureDesSAHMIS = np.append(featureDesSAHMIS, featuresDes14, axis=0)
                bowDescriptor14 = np.zeros((1, 1000), np.float32)
                if bowDescriptorSAHMIS is None:
                    bowDescriptorSAHMIS = bowDescriptor14
                else:
                    # bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor14)
                    bowDescriptorSAHMIS = np.sum([bowDescriptorSAHMIS, bowDescriptor14], axis=0)
                print ("No SAMHI-14 Key points were detectected for this image..")

            if hasAtleastOneKP:
                if oper == "extractTrainingVocabulary":
                    # print('featureDesSAHMIS:: ',featureDesSAHMIS)
                    print ("featureDesSAHMIS Length:: ", len(featureDesSAHMIS))
                    print ("featureDesSAHMIS Shape:: ", featureDesSAHMIS.shape)
                    bowTrainer.add(featureDesSAHMIS)
                    saveTrainFeaturesDes.append(featureDesSAHMIS)
                else:
                    print ("bowDescriptorSAHMIS:: ", bowDescriptorSAHMIS)
                    print ("bowDescriptorSAHMIS Length:: ", len(bowDescriptorSAHMIS))
                    print ("bowDescriptorSAHMIS Shape:: ", bowDescriptorSAHMIS.shape)
                    ##Check if the operation on training data or test data
                    if fileType == "train":
                        trainData.append(bowDescriptorSAHMIS)
                        trainLabels.append(label)
                        # trainLabels.append(label)
                        # trainLabels.append(label)
                    else:
                        testData.append(bowDescriptorSAHMIS)
                        testLabels.append(label)
                        # testLabels.append(label)
                        # testLabels.append(label)

            print (file.split("\\")[2])

            # cv2.imwrite('keyPoints\\'+file.split("\\")[2]+'_keypoints.jpg',img)
            cv2.waitKey(25)

    cv2.destroyAllWindows()
def doTestRealTime():
    global saveTrainFeaturesDes, visuals, gSkipFrames, testData,testLabels

    print("Testing on real time video")
    cv2.namedWindow('Output')
    cv2.createTrackbar('visual', 'Output', 2, len(visuals)-1, nothing)
    cv2.createTrackbar('threshold', 'Output', DEFAULT_THRESHOLD, 255, nothing)

    cv2.namedWindow('Output7')
    cv2.createTrackbar('visual', 'Output7', 2, len(visuals)-1, nothing)
    cv2.createTrackbar('threshold', 'Output7', DEFAULT_THRESHOLD, 255, nothing)

    cv2.namedWindow('Output14')
    cv2.createTrackbar('visual', 'Output14', 2, len(visuals)-1, nothing)
    cv2.createTrackbar('threshold', 'Output14', DEFAULT_THRESHOLD, 255, nothing)
    testData = []
    testLabels = []

    printPageBreak()
    print("\n\nProcessing real time video..")

    frameCounter = 0
    cam = cv2.VideoCapture(0)

    ret, frame = cam.read()
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:,:,1] = 255
    frameCounter = frameCounter +1
    subFrameCount = 0
    skipFrames = 1

    prev_frame7 = frame.copy()
    motion_history7 = np.zeros((h, w), np.float32)
    # print(motion_history7)
    hsv7 = np.zeros((h, w, 3), np.uint8)
    hsv7[:,:,1] = 255

    prev_frame14 = frame.copy()
    motion_history14 = np.zeros((h, w), np.float32)
    # print(motion_history)
    hsv14 = np.zeros((h, w, 3), np.uint8)
    hsv14[:,:,1] = 255
    frameCounter =0

    # totalFrames = int(cam.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    frameCounter = frameCounter +1
    # print(totalFrames)

    subFrameCount = 0
    skipFrames = 1

    subFrameCount7 = 0
    skipFrames7 = 7

    subFrameCount14 = 0
    skipFrames14 = 14

    featureDesSAHMIS = None
    bowDescriptorSAHMIS = None

    timestamp = clock()
    timestamp7 = clock()
    timestamp14 = clock()


    label = ""

    ##Read all frames
    while(True):
        ret, frame = cam.read()
        frame7 = frame.copy()
        frame14 = frame.copy()

        if(subFrameCount == 0):
            frame_diff = cv2.absdiff(frame, prev_frame)
            prev_frame = frame.copy()
            gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
            thrs = cv2.getTrackbarPos('threshold', 'Output')
            ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
            timestamp = clock()
            cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)

            mg_mask, mg_orient = cv2.calcMotionGradient( motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
            seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

            visual_name = visuals[cv2.getTrackbarPos('visual', 'Output')]
            if visual_name == 'input':
                vis = frame.copy()
            elif visual_name == 'frame_diff':
                vis = frame_diff.copy()
            elif visual_name == 'motion_hist':
                vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
            elif visual_name == 'grad_orient':
                hsv[:,:,0] = mg_orient/2
                hsv[:,:,2] = mg_mask*255
                vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

            for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
                x, y, rw, rh = rect
                area = rw*rh
                if area < 64**2:
                    continue
                silh_roi   = motion_mask   [y:y+rh,x:x+rw]
                orient_roi = mg_orient     [y:y+rh,x:x+rw]
                mask_roi   = mg_mask       [y:y+rh,x:x+rw]
                mhi_roi    = motion_history[y:y+rh,x:x+rw]
                if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
                    continue
                angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
                color = ((255, 0, 0), (0, 0, 255))[i == 0]
                draw_motion_comp(vis, rect, angle, color)

            visCopy = vis.copy()
            draw_str(visCopy, (20, 20), visual_name)
            cv2.imshow('Output', visCopy)

        if(subFrameCount7 == 0):
            frame_diff7 = cv2.absdiff(frame7, prev_frame7)
            prev_frame7 = frame7.copy()
            gray_diff7 = cv2.cvtColor(frame_diff7, cv2.COLOR_BGR2GRAY)
            thrs =DEFAULT_THRESHOLD
            ret7, motion_mask7 = cv2.threshold(gray_diff7, thrs, 1, cv2.THRESH_BINARY)
            timestamp7 = clock()
            cv2.updateMotionHistory(motion_mask7, motion_history7, timestamp7, MHI_DURATION)

            mg_mask7, mg_orient7 = cv2.calcMotionGradient( motion_history7, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
            seg_mask7, seg_bounds7 = cv2.segmentMotion(motion_history7, timestamp7, MAX_TIME_DELTA)

            visual_name = 'motion_hist' #visuals[cv2.getTrackbarPos('visual', 'Output')]
            if visual_name == 'input':
                vis7 = frame7.copy()
            elif visual_name == 'frame_diff':
                vis7 = frame_diff7.copy()
            elif visual_name == 'motion_hist':
                vis7 = np.uint8(np.clip((motion_history7-(timestamp7-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                vis7 = cv2.cvtColor(vis7, cv2.COLOR_GRAY2BGR)
            elif visual_name == 'grad_orient':
                hsv7[:,:,0] = mg_orient7/2
                hsv7[:,:,2] = mg_mask7*255
                vis7 = cv2.cvtColor(hsv7, cv2.COLOR_HSV2BGR)

            visCopy7 = vis7.copy()
            draw_str(visCopy7, (20, 20), visual_name)
            cv2.imshow('Output7', visCopy7)


        if(subFrameCount14 == 0):
            frame_diff14 = cv2.absdiff(frame14, prev_frame14)
            prev_frame14 = frame14.copy()
            gray_diff14 = cv2.cvtColor(frame_diff14, cv2.COLOR_BGR2GRAY)
            thrs = DEFAULT_THRESHOLD
            ret14, motion_mask14 = cv2.threshold(gray_diff14, thrs, 1, cv2.THRESH_BINARY)
            timestamp14 = clock()
            cv2.updateMotionHistory(motion_mask14, motion_history14, timestamp14, MHI_DURATION)

            mg_mask14, mg_orient14 = cv2.calcMotionGradient( motion_history14, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
            seg_mask14, seg_bounds14 = cv2.segmentMotion(motion_history14, timestamp14, MAX_TIME_DELTA)

            visual_name = 'motion_hist' #visuals[cv2.getTrackbarPos('visual', 'Output')]
            if visual_name == 'input':
                vis14 = frame14.copy()
            elif visual_name == 'frame_diff':
                vis14 = frame_diff14.copy()
            elif visual_name == 'motion_hist':
                vis14 = np.uint8(np.clip((motion_history14-(timestamp14-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                vis14 = cv2.cvtColor(vis14, cv2.COLOR_GRAY2BGR)
            elif visual_name == 'grad_orient':
                hsv14[:,:,0] = mg_orient14/2
                hsv14[:,:,2] = mg_mask14*255
                vis14 = cv2.cvtColor(hsv14, cv2.COLOR_HSV2BGR)

            visCopy14 = vis14.copy()
            draw_str(visCopy14, (20, 20), visual_name)
            cv2.imshow('Output14', visCopy14)

        # cv2.waitKey(0)
        hasAtleastOneKetPoint = False

        if(subFrameCount == 0):
            mhi_vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            mhi_vis = cv2.cvtColor(mhi_vis, cv2.COLOR_GRAY2BGR)
            kp = detector.detect(mhi_vis)
            print('KeyPoints Length:: ',len(kp))
            if len(kp) > 0:
                features = extractor.compute(mhi_vis,kp)
                featuresDes = features[1]
                hasAtleastOneKetPoint = True
                bowDescriptor = bowDE.compute(mhi_vis, kp)

                if(bowDescriptorSAHMIS is None):
                    bowDescriptorSAHMIS = bowDescriptor
                else:
                    bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor)#,axis=0)

                ##Check if the operation on training data or test data
                # testData.append(bowDescriptor)
                # testLabels.append(label)
            else:
                print("No Key points were detectected for this image..")

        if(subFrameCount7 == 0):
            mhi_vis7 = np.uint8(np.clip((motion_history7-(timestamp7-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            mhi_vis7 = cv2.cvtColor(mhi_vis7, cv2.COLOR_GRAY2BGR)
            kp7 = detector.detect(mhi_vis7)
            print('KeyPoints7 Length:: ',len(kp7))
            if len(kp7) > 0:
                features7 = extractor.compute(mhi_vis7,kp7)
                featuresDes7 = features7[1]
                hasAtleastOneKetPoint = True
                bowDescriptor7 = bowDE.compute(mhi_vis7, kp7)
                if(bowDescriptorSAHMIS is None):
                    bowDescriptorSAHMIS = bowDescriptor7
                else:
                    bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor7)#,axis=0)

                ##Check if the operation on training data or test data
                # testData.append(bowDescriptor7)
            else:
                print("No Key points were detectected for this image7.")


        if(subFrameCount14 == 0):
            mhi_vis14 = np.uint8(np.clip((motion_history14-(timestamp14-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            mhi_vis14 = cv2.cvtColor(mhi_vis14, cv2.COLOR_GRAY2BGR)
            kp14 = detector.detect(mhi_vis14)
            print('KeyPoints14 Length:: ',len(kp14))
            if len(kp14) > 0:
                features14 = extractor.compute(mhi_vis14,kp14)
                featuresDes14 = features14[1]
                hasAtleastOneKetPoint = True
                bowDescriptor14 = bowDE.compute(mhi_vis14, kp14)
                if(bowDescriptorSAHMIS is None):
                    bowDescriptorSAHMIS = bowDescriptor14
                else:
                    bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor14)#,axis=0)

                ##Check if the operation on training data or test data
                # testData.append(bowDescriptor14)
                # testLabels.append(label)
            else:
                print("No Key points were detectected for this image14..")


        ##Start extracting features
        sift = cv2.SIFT()
        denseDetector = cv2.FeatureDetector_create(sDetector) ##using Dense Feature Detector

        ##Check if there are any detected keypoints before processing.
        if(subFrameCount14 == 0 and hasAtleastOneKetPoint):# or frameCounter > 30):
            testData.append(bowDescriptorSAHMIS)
            testData = np.float32(testData).reshape(-1,3*dictSize)
            # testLabels = np.float32(testLabels).reshape(-1,1)
            print("testData Shape: ", testData.shape)
            # print("testLabels Shape: ", testLabels.shape)
            print("testData : ", testData)
            # print("testLabels : ", testLabels)

            result = classifier.predict_all(testData)

            #######   Check Accuracy   ########################
            printPageBreak()
            printPageBreak()

            # print("TestLabels: ", testLabels.reshape(1,-1))
            print("Results: ", result.reshape(1,-1));
            frameCounter = 0
            testData = []
            testLabels = []
            featureDesSAHMIS = None
            bowDescriptorSAHMIS = None

        draw_str(frame, (20, 20), label)
        cv2.imshow('Output', visCopy)
        cv2.imshow('Input', frame)
        # cv2.imwrite('keyPoints\\'+file.split("\\")[2]+'_keypoints.jpg',img)
        cv2.waitKey(25)

        ##Update counters
        subFrameCount = subFrameCount + 1
        subFrameCount7 = subFrameCount7 + 1
        subFrameCount14 = subFrameCount14 + 1

        if(subFrameCount14 > skipFrames14):
            subFrameCount = 0
            subFrameCount7 = 0
            subFrameCount14 = 0
            frameCounter = 0
        frameCounter = frameCounter + 1

    cv2.destroyAllWindows()
def processTrainingFiles(oper="extractTrainingVocabulary",fileType="train"):
    global saveTrainFeaturesDes, visuals, gSkipFrames

    cv2.namedWindow('Output')
    cv2.createTrackbar('visual', 'Output', 2, len(visuals)-1, nothing)
    cv2.createTrackbar('threshold', 'Output', DEFAULT_THRESHOLD, 255, nothing)

    printPageBreak()
    print("\n\nProcessing "+fileType.capitalize()+"ing Files")
    trainTestFiles = groupedTrainFiles
    if fileType == "train":
        trainTestFiles = groupedTrainFiles
    else:
        trainTestFiles = groupedTestFiles

    for label, fileList in trainTestFiles.iteritems():
        global trainData, testData, trainLabels, testLabels
        for file in fileList:
            frameCounter = 0
            print(label,file)
            cam = cv2.VideoCapture(file)

            ret, frame = cam.read()
            h, w = frame.shape[:2]
            prev_frame = frame.copy()
            motion_history = np.zeros((h, w), np.float32)
            hsv = np.zeros((h, w, 3), np.uint8)
            hsv[:,:,1] = 255
            totalFrames = int(cam.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
            frameCounter = frameCounter +1
            print(totalFrames)
            subFrameCount = 0
            skipFrames = 1

            prev_frame7 = frame.copy()
            motion_history7 = np.zeros((h, w), np.float32)
            # print(motion_history7)
            hsv7 = np.zeros((h, w, 3), np.uint8)
            hsv7[:,:,1] = 255

            prev_frame14 = frame.copy()
            motion_history14 = np.zeros((h, w), np.float32)
            # print(motion_history)
            hsv14 = np.zeros((h, w, 3), np.uint8)
            hsv14[:,:,1] = 255
            frameCounter =0

            totalFrames = int(cam.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
            frameCounter = frameCounter +1
            print(totalFrames)

            subFrameCount = 0
            skipFrames = 1

            subFrameCount7 = 0
            skipFrames7 = 7

            subFrameCount14 = 0
            skipFrames14 = 14

            featureDesSAHMIS = None
            bowDescriptorSAHMIS = None

            timestamp = clock()
            timestamp7 = clock()
            timestamp14 = clock()


            ##Read all frames
            while(frameCounter < totalFrames):
                ret, frame = cam.read()
                frame7 = frame.copy()
                frame14 = frame.copy()

                if(subFrameCount == 0):
                    frame_diff = cv2.absdiff(frame, prev_frame)
                    prev_frame = frame.copy()
                    gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
                    thrs = DEFAULT_THRESHOLD
                    ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
                    timestamp = clock()
                    cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)

                    mg_mask, mg_orient = cv2.calcMotionGradient( motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
                    seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

                    visual_name = 'motion_hist' #visuals[cv2.getTrackbarPos('visual', 'Output')]
                    if visual_name == 'input':
                        vis = frame.copy()
                    elif visual_name == 'frame_diff':
                        vis = frame_diff.copy()
                    elif visual_name == 'motion_hist':
                        vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                        vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
                    elif visual_name == 'grad_orient':
                        hsv[:,:,0] = mg_orient/2
                        hsv[:,:,2] = mg_mask*255
                        vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

                    for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
                        x, y, rw, rh = rect
                        area = rw*rh
                        if area < 64**2:
                            continue
                        silh_roi   = motion_mask   [y:y+rh,x:x+rw]
                        orient_roi = mg_orient     [y:y+rh,x:x+rw]
                        mask_roi   = mg_mask       [y:y+rh,x:x+rw]
                        mhi_roi    = motion_history[y:y+rh,x:x+rw]
                        if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
                            continue
                        angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
                        color = ((255, 0, 0), (0, 0, 255))[i == 0]
                        draw_motion_comp(vis, rect, angle, color)

                    visCopy = vis.copy()
                    draw_str(visCopy, (20, 20), visual_name)
                    cv2.imshow('Output', visCopy)

                if(subFrameCount7 == 0):
                    frame_diff7 = cv2.absdiff(frame7, prev_frame7)
                    prev_frame7 = frame7.copy()
                    gray_diff7 = cv2.cvtColor(frame_diff7, cv2.COLOR_BGR2GRAY)
                    thrs =DEFAULT_THRESHOLD
                    ret7, motion_mask7 = cv2.threshold(gray_diff7, thrs, 1, cv2.THRESH_BINARY)
                    timestamp7 = clock()
                    cv2.updateMotionHistory(motion_mask7, motion_history7, timestamp7, MHI_DURATION)

                    mg_mask7, mg_orient7 = cv2.calcMotionGradient( motion_history7, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
                    seg_mask7, seg_bounds7 = cv2.segmentMotion(motion_history7, timestamp7, MAX_TIME_DELTA)

                    visual_name = 'motion_hist' #visuals[cv2.getTrackbarPos('visual', 'Output')]
                    if visual_name == 'input':
                        vis7 = frame7.copy()
                    elif visual_name == 'frame_diff':
                        vis7 = frame_diff7.copy()
                    elif visual_name == 'motion_hist':
                        vis7 = np.uint8(np.clip((motion_history7-(timestamp7-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                        vis7 = cv2.cvtColor(vis7, cv2.COLOR_GRAY2BGR)
                    elif visual_name == 'grad_orient':
                        hsv7[:,:,0] = mg_orient7/2
                        hsv7[:,:,2] = mg_mask7*255
                        vis7 = cv2.cvtColor(hsv7, cv2.COLOR_HSV2BGR)

                    visCopy7 = vis7.copy()
                    draw_str(visCopy7, (20, 20), visual_name)
                    cv2.imshow('Output7', visCopy7)

                if(subFrameCount14 == 0):
                    frame_diff14 = cv2.absdiff(frame14, prev_frame14)
                    prev_frame14 = frame14.copy()
                    gray_diff14 = cv2.cvtColor(frame_diff14, cv2.COLOR_BGR2GRAY)
                    thrs = DEFAULT_THRESHOLD
                    ret14, motion_mask14 = cv2.threshold(gray_diff14, thrs, 1, cv2.THRESH_BINARY)
                    timestamp14 = clock()
                    cv2.updateMotionHistory(motion_mask14, motion_history14, timestamp14, MHI_DURATION)

                    mg_mask14, mg_orient14 = cv2.calcMotionGradient( motion_history14, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
                    seg_mask14, seg_bounds14 = cv2.segmentMotion(motion_history14, timestamp14, MAX_TIME_DELTA)

                    visual_name = 'motion_hist' #visuals[cv2.getTrackbarPos('visual', 'Output')]
                    if visual_name == 'input':
                        vis14 = frame14.copy()
                    elif visual_name == 'frame_diff':
                        vis14 = frame_diff14.copy()
                    elif visual_name == 'motion_hist':
                        vis14 = np.uint8(np.clip((motion_history14-(timestamp14-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                        vis14 = cv2.cvtColor(vis14, cv2.COLOR_GRAY2BGR)
                    elif visual_name == 'grad_orient':
                        hsv14[:,:,0] = mg_orient14/2
                        hsv14[:,:,2] = mg_mask14*255
                        vis14 = cv2.cvtColor(hsv14, cv2.COLOR_HSV2BGR)

                    visCopy14 = vis14.copy()
                    draw_str(visCopy14, (20, 20), visual_name)
                    cv2.imshow('Output14', visCopy14)


                if(visual_name == 'motion_hist'):
                    mhi_vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                    mhi_vis = cv2.cvtColor(mhi_vis, cv2.COLOR_GRAY2BGR)
                    mhi_vis7 = np.uint8(np.clip((motion_history7-(timestamp7-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                    mhi_vis7 = cv2.cvtColor(mhi_vis7, cv2.COLOR_GRAY2BGR)
                    mhi_vis14 = np.uint8(np.clip((motion_history14-(timestamp14-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                    mhi_vis14 = cv2.cvtColor(mhi_vis14, cv2.COLOR_GRAY2BGR)

                ##Start extracting features
                sift = cv2.SIFT()
                denseDetector = cv2.FeatureDetector_create(sDetector) ##using Dense Feature Detector

                kp = detector.detect(mhi_vis)
                print('KeyPoints Length:: ',len(kp))
                kp7 = detector.detect(mhi_vis7)
                print('KeyPoints7 Length:: ',len(kp7))
                kp14 = detector.detect(mhi_vis14)
                print('KeyPoints14 Length:: ',len(kp14))

                hasAtleastOneKP = False

                ##Check if there are any detected keypoints before processing.
                if len(kp) > 0:
                    hasAtleastOneKP = True
                    features = extractor.compute(mhi_vis,kp)
                    featuresDes = features[1]

                    if(featureDesSAHMIS is None):
                        featureDesSAHMIS = featuresDes
                    else:
                        featureDesSAHMIS = np.append(featureDesSAHMIS,featuresDes,axis=0)

                    print('Descriptors:: ',featuresDes)
                    print('Descriptors Length:: ',len(featuresDes))
                    print('Descriptors Shape:: ',featuresDes.shape)

                    # print('KeyPoints:: ',kp)
                    # print('Descriptors:: ',des)
                    #desFlattened = features.flatten()
                    # print('desFlattened Length:: ',len(desFlattened)
                    if(oper == "extractTrainingVocabulary"):
                        # bowTrainer.add(featuresDes)

                        saveTrainFeaturesDes.append(featuresDes)
                        cv2.imwrite('mhiImages\\'+file.split("\\")[2]+'_mhi.jpg',mhi_vis)
                        # saveTrainFeaturesDes1 = np.array(saveTrainFeaturesDes)
                        # saveTrainFeaturesDes1.dump(open('trainFeatures.dat', 'wb'))
                    elif(oper=="extractBOWDescriptor"):
                        bowDescriptor = bowDE.compute(mhi_vis, kp)
                        # descriptors.push_back(bowDescriptor);
                        # print('bowDescriptor:: ',bowDescriptor)
                        print('bowDescriptor Length:: ',len(bowDescriptor))
                        print('bowDescriptor Shape:: ',bowDescriptor.shape)

                        ##Check if the operation on training data or test data
                        if(fileType=="train"):
                            if(bowDescriptorSAHMIS is None):
                                bowDescriptorSAHMIS = bowDescriptor
                            else:
                                bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor)#,axis=0)
                            img=cv2.drawKeypoints(mhi_vis,kp)
                            cv2.imwrite('keyPoints\\'+file.split("\\")[2]+'_keypoints.jpg',img)
                        else:
                            testData.append(bowDescriptor)
                            testLabels.append(label)
                else:
                    print("No Key points were detectected for this image..")


                ##Check if there are any detected keypoints before processing.
                if len(kp7) > 0:

                    hasAtleastOneKP = True
                    features7 = extractor.compute(mhi_vis7,kp7)
                    featuresDes7 = features7[1]

                    if(featureDesSAHMIS is None):
                        featureDesSAHMIS = featuresDes7
                    else:
                        featureDesSAHMIS = np.append(featureDesSAHMIS,featuresDes7,axis=0)
                    print('Descriptors:: ',featuresDes)
                    print('featuresDes7 Length:: ',len(featuresDes7))
                    print('featuresDes7 Shape:: ',featuresDes7.shape)

                    if(oper == "extractTrainingVocabulary"):
                        # bowTrainer.add(featuresDes7)
                        saveTrainFeaturesDes.append(featuresDes7)
                        cv2.imwrite('mhiImages\\'+file.split("\\")[2]+'_mhi7.jpg',mhi_vis7)
                    elif(oper=="extractBOWDescriptor"):
                        bowDescriptor7 = bowDE.compute(mhi_vis7, kp7)
                        print('bowDescriptor7 Length:: ',len(bowDescriptor7))
                        print('bowDescriptor7 Shape:: ',bowDescriptor7.shape)

                        ##Check if the operation on training data or test data
                        if(fileType=="train"):
                            if(bowDescriptorSAHMIS is None):
                                bowDescriptorSAHMIS = bowDescriptor7
                            else:
                                bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor7)#,axis=0)
                            img7=cv2.drawKeypoints(mhi_vis7,kp7)
                            cv2.imwrite('keyPoints\\'+file.split("\\")[2]+'_keypoints7.jpg',img7)
                        else:
                            testData.append(bowDescriptor7)
                            testLabels.append(label)
                else:
                    print("No Key points were detectected for this image7..")


                ##Check if there are any detected keypoints before processing.
                if len(kp14) > 0:
                    hasAtleastOneKP = True
                    features14 = extractor.compute(mhi_vis14,kp14)
                    featuresDes14 = features14[1]
                    if(featureDesSAHMIS is None):
                        featureDesSAHMIS = featuresDes14
                    else:
                        featureDesSAHMIS = np.append(featureDesSAHMIS,featuresDes14,axis=0)
                    print('featuresDes14 Length:: ',len(featuresDes14))
                    print('featuresDes14 Shape:: ',featuresDes14.shape)

                    if(oper == "extractTrainingVocabulary"):
                        # bowTrainer.add(featuresDes14)
                        saveTrainFeaturesDes.append(featuresDes14)
                        cv2.imwrite('mhiImages\\'+file.split("\\")[2]+'_mhi14.jpg',mhi_vis14)
                        # saveTrainFeaturesDes1 = np.array(saveTrainFeaturesDes)
                        # saveTrainFeaturesDes1.dump(open('trainFeatures.dat', 'wb'))
                    elif(oper=="extractBOWDescriptor"):
                        bowDescriptor14 = bowDE.compute(mhi_vis14, kp14)
                        # descriptors.push_back(bowDescriptor);
                        # print('bowDescriptor:: ',bowDescriptor)
                        print('bowDescriptor14 Length:: ',len(bowDescriptor14))
                        print('bowDescriptor14 Shape:: ',bowDescriptor14.shape)

                        ##Check if the operation on training data or test data
                        if(fileType=="train"):
                            if(bowDescriptorSAHMIS is None):
                                bowDescriptorSAHMIS = bowDescriptor14
                            else:
                                bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor14)#,axis=0)
                            img14=cv2.drawKeypoints(mhi_vis14,kp14)
                            cv2.imwrite('keyPoints\\'+file.split("\\")[2]+'_keypoints14.jpg',img14)
                        else:
                            testData.append(bowDescriptor14)
                            testLabels.append(label)
                else:
                    print("No Key points were detectected for this image14..")

                if(hasAtleastOneKP):
                    if(oper == "extractTrainingVocabulary"):
                        # print('featureDesSAHMIS:: ',featureDesSAHMIS)
                        print('featureDesSAHMIS Length:: ',len(featureDesSAHMIS))
                        print('featureDesSAHMIS Shape:: ',featureDesSAHMIS.shape)
                        bowTrainer.add(featureDesSAHMIS)
                        saveTrainFeaturesDes.append(featureDesSAHMIS)
                    else:
                        print('bowDescriptorSAHMIS:: ',bowDescriptorSAHMIS)
                        print('bowDescriptorSAHMIS Length:: ',len(bowDescriptorSAHMIS))
                        print('bowDescriptorSAHMIS Shape:: ',bowDescriptorSAHMIS.shape)
                        ##Check if the operation on training data or test data
                        if(fileType=="train"):
                            trainData.append(bowDescriptorSAHMIS)
                            trainLabels.append(label)
                            # trainLabels.append(label)
                            # trainLabels.append(label)
                        else:
                            testData.append(bowDescriptorSAHMIS)
                            testLabels.append(label)
                            # testLabels.append(label)
                            # testLabels.append(label)
                    featureDesSAHMIS = None
                    bowDescriptorSAHMIS = None


                print(file.split("\\")[2])

                subFrameCount = subFrameCount + 1
                if(subFrameCount > skipFrames):
                    subFrameCount = 0

                subFrameCount7 = subFrameCount7 + 1
                if(subFrameCount7 > skipFrames7):
                    subFrameCount7 = 0

                subFrameCount14 = subFrameCount14 + 1
                if(subFrameCount14 > skipFrames14):
                    subFrameCount14 = 0

                frameCounter = frameCounter + 1


                # cv2.imwrite('keyPoints\\'+file.split("\\")[2]+'_keypoints.jpg',img)
                cv2.waitKey(25)

    cv2.destroyAllWindows()
示例#16
0
    def process_image(self, cv_image):
        if self.motion_history == None:
            self.h, self.w = cv_image.shape[:2]
            self.prev_frame = cv_image.copy()
            self.motion_history = np.zeros((self.h, self.w), np.float32)
            self.hsv = np.zeros((self.h, self.w, 3), np.uint8)
            self.hsv[:, :, 1] = 255
            self.erode_kernel = cv2.getStructuringElement(
                cv2.MORPH_ERODE, (3, 3))

        color_frame = cv_image.copy()
        frame_diff = cv2.absdiff(color_frame, self.prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)

        thresh = cv2.getTrackbarPos('threshold', self.node_name)

        ret, motion_mask = cv2.threshold(gray_diff, thresh, 1,
                                         cv2.THRESH_BINARY)

        motion_mask = cv2.erode(motion_mask, self.erode_kernel, iterations=2)
        motion_mask = cv2.dilate(motion_mask, self.erode_kernel, iterations=2)

        timestamp = clock()

        cv2.updateMotionHistory(motion_mask, self.motion_history, timestamp,
                                MHI_DURATION)

        mg_mask, mg_orient = cv2.calcMotionGradient(self.motion_history,
                                                    MAX_TIME_DELTA,
                                                    MIN_TIME_DELTA,
                                                    apertureSize=5)
        seg_mask, seg_bounds = cv2.segmentMotion(self.motion_history,
                                                 timestamp, MAX_TIME_DELTA)

        visual_name = self.visuals[cv2.getTrackbarPos('visual',
                                                      self.node_name)]
        if visual_name == 'input':
            vis = cv_image.copy()
        elif visual_name == 'frame_diff':
            vis = frame_diff.copy()
        elif visual_name == 'motion_hist':
            vis = np.uint8(
                np.clip(
                    (self.motion_history -
                     (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
        elif visual_name == 'motion_hist_color':
            vis = np.uint8(
                np.clip(
                    (self.motion_history -
                     (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
            vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
        elif visual_name == 'grad_orient':
            self.hsv[:, :, 0] = mg_orient / 2
            self.hsv[:, :, 2] = mg_mask * 255
            vis = cv2.cvtColor(self.hsv, cv2.COLOR_HSV2BGR)

        max_rect_area = 0

        for i, rect in enumerate([(0, 0, self.w, self.h)] + list(seg_bounds)):
            x, y, rw, rh = rect
            area = rw * rh
            if area < 64**2:
                continue
            if area < 640 * 480 and area > max_rect_area:
                max_rect_area = area
                max_rect = rect
            silh_roi = motion_mask[y:y + rh, x:x + rw]
            orient_roi = mg_orient[y:y + rh, x:x + rw]
            mask_roi = mg_mask[y:y + rh, x:x + rw]
            mhi_roi = self.motion_history[y:y + rh, x:x + rw]
            if cv2.norm(silh_roi, cv2.NORM_L1) < area * 0.05:
                continue
            angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi,
                                              timestamp, MHI_DURATION)
            color = ((255, 0, 0), (0, 0, 255))[i == 0]
            #draw_motion_comp(vis, rect, angle, color)

        #draw_str(vis, (20, 20), visual_name)

        display_image = cv_image.copy()

        if max_rect_area != 0:
            x, y, w, h = max_rect
            display = color_frame[y:y + h, x:x + w]
            #            #bounding_box = cv2.boundingRect(vis)
            #            #print bounding_box
            #
            if visual_name == 'motion_hist':
                display = vis.copy()
            else:
                display = cv2.bitwise_and(color_frame, vis, vis)

            draw_str(vis, (20, 20), visual_name)

            contour_image = vis.copy()

            contours, hierarchy = cv2.findContours(contour_image,
                                                   cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)

            #ty:
            contour_points = list()

            if len(contours) != 0:
                for cnt in contours:
                    contour_points.append(cnt)

                vstack_points = np.vstack(contour_points)
                if len(vstack_points) > 5:
                    z_ellipse = cv2.fitEllipse(vstack_points)
                    cv2.ellipse(display_image, z_ellipse, (0, 255, 0), 2)

                cv2.drawContours(display_image, contours, -1, (0, 255, 0), 3)

            cv2.imshow("Contours", display_image)

        self.prev_frame = color_frame

        #cv2.waitKey(5)

        return cv_image
def feature_extraction_fullVideo(videoName, scoreTxtName,startFrame,maskRegion,featureWriter, videoOutput, MIN_TIME_DELTA,MAX_TIME_DELTA,MHI_DURATION,THRESH_VALUE,DISPLAY=False): 
    cv2.namedWindow('rat activity recognition')
    visuals = ['input', 'frame_diff', 'motion_hist', 'grad_orient']
    # use MHI features (motion history intensity)
    visual_name = visuals[2]
  
    cam = cv2.VideoCapture(videoName)
    video_len = cam.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
    cam.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,startFrame)
    
    ret, frame = cam.read()
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:,:,1] = 255
    
    
    fps = 15.0
    fourcc = cv2.cv.CV_FOURCC(*'XVID')

    #outputVideoName = "activityRecognitionResults.avi";
    #VideoOutput = cv2.VideoWriter(outputVideoName,fourcc, fps, (w,h))   
    
    
    
    manualScores = read_manual_scoring(scoreTxtName,video_len)
    
    frame_base = extract_background(videoName, startFrame)
    frame_base = cv2.GaussianBlur(frame_base,(3,3),0) 
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    #cam.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,ii)
     
    for ii in range(int(startFrame)+1,int(video_len)-10):
    #while (ii<1000):
        ret, frame = cam.read()
        ## Mouse segmentation
        mouse, junk = segmentation_frame(frame,frame_base,mask,kernel)

        frame_results = cv2.add(frame,mouse)  
        cv2.putText(frame_results, 'stage: ' + str(manualScores[ii]), (200, 30),
        cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255,255,255))
        
        frame_diff = cv2.absdiff(frame, prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
        
        ret, motion_mask = cv2.threshold(gray_diff, THRESH_VALUE, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient(motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
        seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

        if visual_name == 'input':
            vis = frame.copy()
        elif visual_name == 'frame_diff':
            vis = frame_diff.copy()
        elif visual_name == 'motion_hist':
            vis0 = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            junk,mei0 = cv2.threshold(vis0,1,255,cv2.THRESH_BINARY)
            
            vis0 = vis0*maskRegion
            mei0 = mei0*scipy.sign(maskRegion)

        elif visual_name == 'grad_orient':
            hsv[:,:,0] = mg_orient/2
            hsv[:,:,2] = mg_mask*255
            vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
            
        ## Compute features
        M1 = cv2.moments(mei0)
        M2 = cv2.moments(vis0)    
        Hu1 = cv2.HuMoments(M1)
        Hu2 = cv2.HuMoments(M2)
        
        smallNum = [1e-200]*7 
        Hu1 = Hu1 + smallNum
        Hu2 = Hu2 + smallNum
        
        Hu1 = np.sign(Hu1)*np.log10(np.abs(Hu1))
        Hu2 = np.sign(Hu2)*np.log10(np.abs(Hu2))
      
        if M1['m00']!=0:
            cx1 = M1['m10']/M1['m00']
            cy1 = M1['m01']/M1['m00']
        else:
            cx1 = 0;
            cy1 = 0; 
            
        if M2['m00']!=0:
            cx2 = M2['m10']/M2['m00']
            cy2 = M2['m01']/M2['m00']
        else:
            cx2 = 0;
            cy2 = 0;     
                                       
        meiSize = np.count_nonzero(mei0);
        
        if meiSize == 0:
            corner1 = 0
            corner2 = 0
            corner3 = 0
            corner4 = 0
            height = 0
            width = 0
            extend = 0
        else:
            indices = np.nonzero(mei0)
            corner1 = max(indices[0])
            corner2 = min(indices[0])
            corner3 = max(indices[1])
            corner4 = min(indices[1])
            height = corner1 - corner2+1
            width = corner3 - corner4+1
            extend = meiSize/float(height*width)
            
        features = [ii,Hu1[0][0],Hu1[1][0],Hu1[2][0],Hu1[3][0],Hu1[4][0],Hu1[5][0],Hu1[6][0],
                    Hu2[0][0],Hu2[1][0],Hu2[2][0],Hu2[3][0],Hu2[4][0],Hu2[5][0],Hu2[6][0],
                    cx1, cy1, cx2, cy2, meiSize, corner1, corner2, corner3, corner4,height, width, extend,manualScores[ii]]
       
        featureWriter.writerow(features)
         
        prev_frame = frame.copy()      
     
        vis = cv2.cvtColor(vis0, cv2.COLOR_GRAY2BGR)
        mei = cv2.cvtColor(mei0, cv2.COLOR_GRAY2BGR)
            
                            
        if DISPLAY:

            cv2.imshow('MHI', vis)
            cv2.imshow('MEI', mei)
            cv2.imshow('Video',frame_results)
            videoOutput.write(frame_results)   
            
            if 0xff & cv2.waitKey(1) == 27:
                break
    
         
    cam.release()
    cv2.destroyAllWindows()
示例#18
0
def mhtest():
    mhist = list()
    for i in range(20):
        cv2.updateMotionHistory(processframe(i))
    return mhist
def feature_extraction_fullVideo(
    videoName,
    startFrame,
    maskRegion,
    featureWriter,
    videoOutput,
    MIN_TIME_DELTA,
    MAX_TIME_DELTA,
    MHI_DURATION,
    THRESH_VALUE,
    DISPLAY=False,
):
    cv2.namedWindow("rat activity recognition")
    visuals = ["input", "frame_diff", "motion_hist", "grad_orient"]
    # use MHI features (motion history intensity)
    visual_name = visuals[2]

    cam = cv2.VideoCapture(videoName)
    video_len = cam.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
    cam.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, startFrame)

    ret, frame = cam.read()
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:, :, 1] = 255

    # fps = 15.0
    # fourcc = cv2.cv.CV_FOURCC(*'XVID')

    # outputVideoName = "activityRecognitionResults.avi";
    # VideoOutput = cv2.VideoWriter(outputVideoName,fourcc, fps, (w,h))

    frame_base = extract_background(videoName, startFrame)
    frame_base = cv2.GaussianBlur(frame_base, (3, 3), 0)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    # cam.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,ii)

    # for ii in range(int(startFrame)+1,int(video_len)-10):
    for ii in range(1, 3000):
        ret, frame = cam.read()
        ## Mouse segmentation
        mouse, mFeat = segmentation_frame(frame, frame_base, mask, kernel)

        frame_results = cv2.add(frame, mouse)

        frame_diff = cv2.absdiff(frame, prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)

        ret, motion_mask = cv2.threshold(gray_diff, THRESH_VALUE, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient(motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5)
        seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

        if visual_name == "input":
            vis = frame.copy()
        elif visual_name == "frame_diff":
            vis = frame_diff.copy()
        elif visual_name == "motion_hist":
            vis0 = np.uint8(np.clip((motion_history - (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
            junk, mei0 = cv2.threshold(vis0, 1, 255, cv2.THRESH_BINARY)

            vis0 = vis0 * maskRegion
            mei0 = mei0 * scipy.sign(maskRegion)

        elif visual_name == "grad_orient":
            hsv[:, :, 0] = mg_orient / 2
            hsv[:, :, 2] = mg_mask * 255
            vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

        ## Compute features
        M1 = cv2.moments(mei0)
        M2 = cv2.moments(vis0)
        Hu1 = cv2.HuMoments(M1)
        Hu2 = cv2.HuMoments(M2)

        smallNum = [1e-200] * 7
        Hu1 = Hu1 + smallNum
        Hu2 = Hu2 + smallNum

        Hu1 = np.sign(Hu1) * np.log10(np.abs(Hu1))
        Hu2 = np.sign(Hu2) * np.log10(np.abs(Hu2))

        if M1["m00"] != 0:
            cx1 = M1["m10"] / M1["m00"]
            cy1 = M1["m01"] / M1["m00"]
        else:
            cx1 = 0
            cy1 = 0

        if M2["m00"] != 0:
            cx2 = M2["m10"] / M2["m00"]
            cy2 = M2["m01"] / M2["m00"]
        else:
            cx2 = 0
            cy2 = 0

        meiSize = np.count_nonzero(mei0)

        if meiSize == 0:
            corner1 = 0
            corner2 = 0
            corner3 = 0
            corner4 = 0
            height = 0
            width = 0
            extend = 0
        else:
            indices = np.nonzero(mei0)
            corner1 = max(indices[0])
            corner2 = min(indices[0])
            corner3 = max(indices[1])
            corner4 = min(indices[1])
            height = corner1 - corner2 + 1
            width = corner3 - corner4 + 1
            extend = meiSize / float(height * width)

        features = [
            ii,
            Hu1[0][0],
            Hu1[1][0],
            Hu1[2][0],
            Hu1[3][0],
            Hu1[4][0],
            Hu1[5][0],
            Hu1[6][0],
            Hu2[0][0],
            Hu2[1][0],
            Hu2[2][0],
            Hu2[3][0],
            Hu2[4][0],
            Hu2[5][0],
            Hu2[6][0],
            cx1,
            cy1,
            cx2,
            cy2,
            meiSize,
            corner1,
            corner2,
            corner3,
            corner4,
            height,
            width,
            extend,
            mFeat[0],
            mFeat[1],
            mFeat[2],
            mFeat[3],
            mFeat[4],
            mFeat[5],
            mFeat[6],
            mFeat[7],
            mFeat[8],
            mFeat[9],
            mFeat[10],
        ]

        featureWriter.writerow(features)

        prev_frame = frame.copy()

        vis = cv2.cvtColor(vis0, cv2.COLOR_GRAY2BGR)
        mei = cv2.cvtColor(mei0, cv2.COLOR_GRAY2BGR)

        frameDist0 = np.concatenate((frame_results, vis), axis=0)
        frameDist = frameDist0[::2, ::2, :]
        videoOutput.write(frameDist)

        if DISPLAY:
            cv2.imshow("Video", frameDist)
            if 0xFF & cv2.waitKey(1) == 27:
                break

    cam.release()
    cv2.destroyAllWindows()
示例#20
0
文件: main.py 项目: jennan/pystarters
        continue

    # Convert image to gray. Keep a colour copy for displaying.
    ctr = image.copy()
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # Subtract background from image.
    imgsub = cv2.absdiff(image, bg)

    # Threshold image.
    ret, silh = cv2.threshold(imgsub, 15, \
                              255, cv2.THRESH_BINARY)

    # Update motion history.
    clear_border(silh)
    cv2.updateMotionHistory(silh, mhi, time.clock(), MHI_DURATION)

    # Find contours.
    mhi_int = mhi.astype(np.uint8)
    contours, hierarchy = cv2.findContours(mhi_int,\
            mode=cv2.RETR_LIST,
            method=cv2.CHAIN_APPROX_NONE)
    if (len(contours) > 0):
        cnt = contours[np.argmax([
            cv2.contourArea(c) for c in contours
        ])]  # Find index of largest contour, which should be the mouse.
        # Fit an ellipse around the contour.
        cv2.drawContours(ctr, [cnt], 0, (255, 255, 255), 2)
        m = cv2.moments(cnt.astype(np.float32))
        try:
            c = [(m['m10'] / m['m00']), (m['m01'] / m['m00'])
h, w = frame.shape[:2]
motionHistory = numpy.zeros((h, w), numpy.float32)

while 1:
    successFlag, frame = video.read()

    if not successFlag:
        break

    frameDiff = cv2.absdiff(lastFrame, frame)
    greyDiff = cv2.cvtColor(frameDiff, code=cv2.COLOR_BGR2GRAY)

    retval, motionMask = cv2.threshold(greyDiff,10,1,cv2.THRESH_BINARY)

    timestamp = time.clock()
    cv2.updateMotionHistory(motionMask, motionHistory, timestamp, 0.5) #Updates the motion history image by a moving silhouette.
    mg_mask, mg_orient = cv2.calcMotionGradient( motionHistory, 0.25, 0.05, apertureSize=5 ) #to calculate gradient orientation of a motion history image at each pixel.
    seg_mask, seg_bounds = cv2.segmentMotion(motionHistory, timestamp, 0.25) #Splits a motion history image into a few parts corresponding to separate independent motions

    #print motionHistory

    total = sum(sum(motionHistory))/8
    movement.append(total)

    cv2.imshow("My Window", motionHistory)
    c = cv2.waitKey(7) % 0x100
    if c == 27:
        break

    lastFrame = frame.copy()
def video_feature_extraction_save(videoName, featureWriter, case, MIN_TIME_DELTA,MAX_TIME_DELTA,MHI_DURATION,THRESH_VALUE,DISPLAY=False): 
    cv2.namedWindow('rat activity recognition')
    visuals = ['input', 'frame_diff', 'motion_hist', 'grad_orient']
    # use MHI features (motion history intensity)
    visual_name = visuals[2]
  
    cam = cv2.VideoCapture(videoName)
    video_len = cam.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
    ret, frame = cam.read()
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:,:,1] = 255
    ii = 0         
    while (ii<video_len):
        ii += 1
        ret, frame = cam.read()
        frame_diff = cv2.absdiff(frame, prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
     
        ret, motion_mask = cv2.threshold(gray_diff, THRESH_VALUE, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient(motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
        seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

        if visual_name == 'input':
            vis = frame.copy()
        elif visual_name == 'frame_diff':
            vis = frame_diff.copy()
        elif visual_name == 'motion_hist':
            vis0 = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            junk,mei0 = cv2.threshold(vis0,1,255,cv2.THRESH_BINARY)

        elif visual_name == 'grad_orient':
            hsv[:,:,0] = mg_orient/2
            hsv[:,:,2] = mg_mask*255
            vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
            
        ## Compute features
        M1 = cv2.moments(mei0)
        M2 = cv2.moments(vis0)    
        Hu1 = cv2.HuMoments(M1)
        Hu2 = cv2.HuMoments(M2)
        
        smallNum = [1e-200]*7 
        Hu1 = Hu1 + smallNum
        Hu2 = Hu2 + smallNum
        
        Hu1 = np.sign(Hu1)*np.log10(np.abs(Hu1))
        Hu2 = np.sign(Hu2)*np.log10(np.abs(Hu2))
        
        if M1['m00']!=0:
            cx1 = M1['m10']/M1['m00']
            cy1 = M1['m01']/M1['m00']
        else:
            cx1 = 0;
            cy1 = 0; 
            
        if M2['m00']!=0:
            cx2 = M2['m10']/M2['m00']
            cy2 = M2['m01']/M2['m00']
        else:
            cx2 = 0;
            cy2 = 0;     
                                       
        meiSize = np.count_nonzero(mei0);
        
        if meiSize == 0:
            corner1 = 0
            corner2 = 0
            corner3 = 0
            corner4 = 0
            height = 0
            width = 0
            extend = 0
        else:
            indices = np.nonzero(mei0)
            corner1 = max(indices[0])
            corner2 = min(indices[0])
            corner3 = max(indices[1])
            corner4 = min(indices[1])
            height = corner1 - corner2+1
            width = corner3 - corner4+1
            extend = meiSize/float(height*width)
        
        features = [Hu1[0][0],Hu1[1][0],Hu1[2][0],Hu1[3][0],Hu1[4][0],Hu1[5][0],Hu1[6][0],
                    Hu2[0][0],Hu2[1][0],Hu2[2][0],Hu2[3][0],Hu2[4][0],Hu2[5][0],Hu2[6][0],
                    cx1, cy1, cx2, cy2, meiSize, corner1, corner2, corner3,corner4, height, width, extend, case]
      
        zeroFeatures = [-200]*14
        #zeroFeatures = [0]*14
        if case == 6:# Rest case
            featureWriter.writerow(features)
        else:
            if features[0:len(features)-6] != zeroFeatures:
                featureWriter.writerow(features)
       
        prev_frame = frame.copy()
                            
        if DISPLAY:
            #draw_str(vis, (20, 20), visual_name)
            vis = cv2.cvtColor(vis0, cv2.COLOR_GRAY2BGR)
            mei = cv2.cvtColor(mei0, cv2.COLOR_GRAY2BGR)
            cv2.imshow('MHI', vis)
            cv2.imshow('MEI', mei)
            cv2.imshow('Video',frame)
    
            if 0xff & cv2.waitKey(50) == 27:
                break
            
    cam.release()
    cv2.destroyAllWindows()
示例#23
0
h, w = frame.shape[:2]
motionHistory = numpy.zeros((h, w), numpy.float32)

while 1:
    successFlag, frame = video.read()

    if not successFlag:
        break

    frameDiff = cv2.absdiff(lastFrame, frame)
    greyDiff = cv2.cvtColor(frameDiff, code=cv2.COLOR_BGR2GRAY)

    retval, motionMask = cv2.threshold(greyDiff, 20, 1, cv2.THRESH_BINARY)

    timestamp = time.clock()
    cv2.updateMotionHistory(motionMask, motionHistory, timestamp, 0.5)
    mg_mask, mg_orient = cv2.calcMotionGradient(motionHistory,
                                                0.25,
                                                0.05,
                                                apertureSize=5)
    seg_mask, seg_bounds = cv2.segmentMotion(motionHistory, timestamp, 0.25)

    surf = cv2.SURF()
    grey = cv2.cvtColor(frame, code=cv2.COLOR_BGR2GRAY)
    mask = numpy.uint8(numpy.ones(grey.shape))
    points = surf.detect(grey, mask)

    vis = cv2.cvtColor(grey, cv2.COLOR_GRAY2BGR)

    for p in points[::10]:
        cv2.circle(vis, (int(p.pt[0]), int(p.pt[1])), 2, (0, 255, 0), -1)
示例#24
0
    def detect(self):
        # print('detect...')
        # visuals = ['input', 'frame_diff', 'motion_hist', 'grad_orient']
        # cv2.createTrackbar('visual', 'motempl', 2, len(visuals)-1, nothing)
        # cv2.createTrackbar('threshold', 'motempl', DEFAULT_THRESHOLD, 255, nothing)
        height, width = self.frame.shape[:2]
        frame_diff = cv2.absdiff(self.frame, self.prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
        thrs = self.threshold_spin.value()
        # thrs = cv2.getTrackbarPos('threshold', 'motempl')
        ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, self.motion_history, timestamp, self.MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient(self.motion_history, self.MAX_TIME_DELTA, self.MIN_TIME_DELTA, apertureSize=5)
        seg_mask, seg_bounds = cv2.segmentMotion(self.motion_history, timestamp, self.MAX_TIME_DELTA)

        for radio in (self.visual_radio_1, self.visual_radio_2, self.visual_radio_3, self.visual_radio_4):
            if radio.isChecked():
                visual_name = str(radio.text())
                break

        # visual_name = visuals[cv2.getTrackbarPos('visual', 'motempl')]
        # visual_name = 'input'
        if visual_name == 'input':
            vis = self.frame.copy()
        elif visual_name == 'frame_diff':
            vis = frame_diff.copy()
        elif visual_name == 'motion_hist':
            vis = np.uint8(np.clip((self.motion_history-(timestamp-self.MHI_DURATION)) / self.MHI_DURATION, 0, 1)*255)
            vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
        elif visual_name == 'grad_orient':
            self.hsv[:, :, 0] = mg_orient/2
            self.hsv[:, :, 2] = mg_mask*255
            vis = cv2.cvtColor(self.hsv, cv2.COLOR_HSV2BGR)

        for i, rect in enumerate([(0, 0, width, height)] + list(seg_bounds)):
            x, y, rw, rh = rect
            area = rw*rh
            if area < 64**2:
                continue
            silh_roi = motion_mask[y:y+rh, x:x+rw]
            orient_roi = mg_orient[y:y+rh, x:x+rw]
            mask_roi = mg_mask[y:y+rh, x:x+rw]
            mhi_roi = self.motion_history[y:y+rh, x:x+rw]
            if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
                continue
            angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, self.MHI_DURATION)
            color = ((255, 0, 0), (0, 0, 255))[i == 0]
            self.draw_motion_comp(vis, rect, angle, color)
            if i == 0:
                # 检测到目标运动
                if self.record_check.isChecked():
                    self.monitor_last_shoot = clock()
                    if not self.is_recording:
                        self.start_record()
                elif self.shoot_check.isChecked():
                    # print(self.monitor_last_shoot)
                    # print(clock())
                    delay = self.shoot_delay_spin.value()
                    if (not self.monitor_last_shoot) or (clock() - self.monitor_last_shoot >= delay):
                        self.shoot()
                        self.monitor_last_shoot = clock()
                if self.sound_check.isChecked():
                    self.play_sound()

        self.draw_str(vis, (20, 20), visual_name)

        self.prev_frame = self.frame.copy()
        return vis
示例#25
0
	img = camera.read()[1]

	#create the motion history (mhi) of the same size as the image
	motionHist = np.zeros((img.shape[0], img.shape[1]), np.float32)

	firstTime = False

	while True:
		img = camera.read()[1]
		img = cv2.cvtColor(img,cv2.cv.CV_RGB2GRAY)
		#create the silhouette with adaptative threshold
		silhouette = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
			cv2.THRESH_BINARY,7,10)
		
		timestamp = clock()
		cv2.updateMotionHistory(silhouette, motionHist, timestamp, MHI_DURATION)
		
		mask,orientation = cv2.calcMotionGradient(motionHist,MAX_TIME_DELTA,MIN_TIME_DELTA)
	
		if not firstTime:
			print mask.shape
			print orientation.shape
			firstTime = True
			print mask
			
		cv2.imshow("motionHist",motionHist)
		cv2.imshow("mask",mask)
		cv2.imshow("orientation",orientation)

		if (cv2.waitKey(5)!=-1):
			break
def doTestArchive():
    global saveTrainFeaturesDes, visuals, gSkipFrames, testData,testLabels

    print("Testing on real time video")
    cv2.namedWindow('Output')
    cv2.createTrackbar('visual', 'Output', 2, len(visuals)-1, nothing)
    cv2.createTrackbar('threshold', 'Output', DEFAULT_THRESHOLD, 255, nothing)
    testData = []
    testLabels = []

    printPageBreak()
    print("\n\nProcessing real time video..")

    frameCounter = 0
    cam = cv2.VideoCapture(0)

    ret, frame = cam.read()
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:,:,1] = 255
    frameCounter = frameCounter +1
    subFrameCount = 0
    skipFrames = gSkipFrames

    label = ""

    ##Read all frames
    while(True):
        ret, frame = cam.read()
        if(subFrameCount == 0):
            frame_diff = cv2.absdiff(frame, prev_frame)
            prev_frame = frame.copy()
            gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
            thrs = cv2.getTrackbarPos('threshold', 'Output')
            ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
            timestamp = clock()
            cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)

            mg_mask, mg_orient = cv2.calcMotionGradient( motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
            seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

            visual_name = visuals[cv2.getTrackbarPos('visual', 'Output')]
            if visual_name == 'input':
                vis = frame.copy()
            elif visual_name == 'frame_diff':
                vis = frame_diff.copy()
            elif visual_name == 'motion_hist':
                vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
            elif visual_name == 'grad_orient':
                hsv[:,:,0] = mg_orient/2
                hsv[:,:,2] = mg_mask*255
                vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

            for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
                x, y, rw, rh = rect
                area = rw*rh
                if area < 64**2:
                    continue
                silh_roi   = motion_mask   [y:y+rh,x:x+rw]
                orient_roi = mg_orient     [y:y+rh,x:x+rw]
                mask_roi   = mg_mask       [y:y+rh,x:x+rw]
                mhi_roi    = motion_history[y:y+rh,x:x+rw]
                if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
                    continue
                angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
                color = ((255, 0, 0), (0, 0, 255))[i == 0]
                draw_motion_comp(vis, rect, angle, color)

            visCopy = vis.copy()
            draw_str(visCopy, (20, 20), visual_name)
            cv2.imshow('Output', visCopy)

        subFrameCount = subFrameCount + 1
        if(subFrameCount > skipFrames):
            subFrameCount = 0
        frameCounter = frameCounter + 1

        if(visual_name == 'motion_hist'):
            mhi_vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            mhi_vis = cv2.cvtColor(mhi_vis, cv2.COLOR_GRAY2BGR)
        else:
            hsv[:,:,0] = mg_orient/2
            hsv[:,:,2] = mg_mask*255
            mhi_vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
        ##Start extracting features
        sift = cv2.SIFT()
        denseDetector = cv2.FeatureDetector_create(sDetector) ##using Dense Feature Detector

        kp = detector.detect(mhi_vis)
        print('KeyPoints Length:: ',len(kp))

        ##Check if there are any detected keypoints before processing.
        if len(kp) > 0:
            features = extractor.compute(mhi_vis,kp)
            featuresDes = features[1]
            # print('Descriptors:: ',featuresDes)
            # print('featuresDes Length:: ',len(featuresDes))
            # print('featuresDes Shape:: ',featuresDes.shape)

            bowDescriptor = bowDE.compute(mhi_vis, kp)
            # descriptors.push_back(bowDescriptor);
            # print('bowDescriptor:: ',bowDescriptor)
            # print('bowDescriptor Length:: ',len(bowDescriptor))
            # print('bowDescriptor Shape:: ',bowDescriptor.shape)

            ##Check if the operation on training data or test data
            testData.append(bowDescriptor)
            testLabels.append(label)
        else:
            print("No Key points were detectected for this image..")

        if(subFrameCount == 0 or frameCounter > 30):
            testData = np.float32(testData).reshape(-1,dictSize)
            # testLabels = np.float32(testLabels).reshape(-1,1)
            print("testData Shape: ", testData.shape)
            # print("testLabels Shape: ", testLabels.shape)
            print("testData : ", testData)
            # print("testLabels : ", testLabels)

            result = classifier.predict_all(testData)

            #######   Check Accuracy   ########################
            printPageBreak()
            printPageBreak()

            # print("TestLabels: ", testLabels.reshape(1,-1))
            print("Results: ", result.reshape(1,-1));
            frameCounter = 0
            testData = []
            testLabels = []

        draw_str(frame, (20, 20), label)
        cv2.imshow('Output', visCopy)
        cv2.imshow('Input', frame)

        # cv2.imwrite('keyPoints\\'+file.split("\\")[2]+'_keypoints.jpg',img)
        cv2.waitKey(25)

    cv2.destroyAllWindows()
示例#27
0
    frame = cv2.GaussianBlur(frame, (3, 3), -1)
    display = frame.copy()
    fgmask = fgbg.apply(frame)

    diff = cv2.absdiff(fgmask, prev_bg)

    diff = cv2.morphologyEx(diff, cv2.MORPH_CLOSE, se)
    diff = cv2.morphologyEx(diff, cv2.MORPH_OPEN, se)

    prev_bg = fgmask

    frame_count += 1
    timestamp = float(frame_count) / fps

    cv2.updateMotionHistory(diff, motion_history, timestamp, MHI_DURATION)
    #mgrad_mask, mgrad_orient = cv2.calcMotionGradient(motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5)
    mseg_mask, mseg_bounds = cv2.segmentMotion(motion_history, timestamp,
                                               MAX_TIME_DELTA)

    mseg_bounds = filter_inside(mseg_bounds)

    draw_detections(display, mseg_bounds, 3)

    cv2.imshow('frame', display)

    k = cv2.waitKey(30) & 0xff

    if k == 'q':
        break
示例#28
0
def main():
    """
    the folder path which contains all the video files
    """
    def clock():
        return cv.getTickCount() / cv.getTickFrequency()
    #containing paths to test and train folders
    train_folder_path = "/train.txt"
    test_folder_path = "/test.txt"
    if (not isfile(train_folder_path)) or (not isfile(test_folder_path)):
        print "please enter correct folder path!!"
        exit(1)

    """
    get all the file folders names in folder which specify the video category and video path
    """
    label_train = []
    label_test = []
    train_videos = []
    test_videos =[]
    print "Reading Text Files!!"
    with open(train_folder_path) as train_file:
        for line in train_file:
            if len(line):
                lbl,video_path = line.split()
                train_videos.append(video_path)
                label_train.append(int(lbl))
    with open(test_folder_path) as test_file:
        for line in test_file:
            if len(line) > 1:
                lbl,video_path = line.split()
                test_videos.append("D:/videoanalytics/"+video_path)
                label_test.append(int(lbl))


    training_data=[]
    testing_data = []
    train_count = len(label_train)
    count_nbr = 0
    """
    in each category retrieve all the video files and extract features from each video
    """

    print "CONSTRUCTING FEATURES FOR TRAINING AND TESTING DATA!!"
    for each_video_path in train_videos+test_videos:
        if not isfile(each_video_path):
            print "Video path doesn't exist!!",each_video_path
            exit(1)
        count_nbr = count_nbr + 1
        video = cv.VideoCapture(each_video_path)
        if not video.isOpened():
            print "video cannot be opened"
            exit(1)

            #read the first frame (t =1)
        ret, frame = video.read()
        h, w = frame.shape[:2]
        prev_frame = frame.copy()
        motion_history = np.zeros((h, w), np.float32)
        timestamp = clock()
        vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
        fram_count = 1
        while(video.isOpened()):
            ret, frame = video.read()
            if ret == False:
                #to see the mhi image uncomment following line
                #cv.imshow('Motion History Image',vis)
                vis = cv.cvtColor(vis,cv.COLOR_BGR2GRAY)

                features = cv.HuMoments(cv.moments(vis)).flatten()
                features = features.reshape((1,7))
                features = features.reshape(-1)
                features = features.tolist()
                features= -np.sign(features) * np.log10(np.abs(features))
                if count_nbr <= train_count:
                    training_data.append(features)
                else:
                    testing_data.append(features)

                #to see the mhi image uncomment following lines
                #if cv.waitKey(30) & 0xFF == ord('q'):
                #    break
                break

            if fram_count%rand.randint(1,7) ==0 :
                frame_diff = cv.absdiff(frame, prev_frame)
                gray_diff = cv.cvtColor(frame_diff, cv.COLOR_BGR2GRAY)
                ret, motion_mask = cv.threshold(gray_diff, DEFAULT_THRESHOLD, 1, cv.THRESH_BINARY)

                cv.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)
                vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                vis = cv.cvtColor(vis, cv.COLOR_GRAY2BGR)
            fram_count = fram_count+1
            prev_frame = frame.copy()

        video.release()
        cv.destroyAllWindows()

    """
    using rbf kernel with degree 3 will
    """

    print "TRAINING CLASSIFER"
    clf = OneVsRestClassifier(SVC(random_state=0,kernel="sigmoid"))
    clf.fit(np.array(training_data), np.array(label_train))
    print "TESTING ON TRAINING DATA"
    predict_train = clf.predict(training_data)

    cap = cv.VideoCapture(0)
    ret, frame = cap.read()
    fram_count = 0
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    timestamp = clock()
    vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
    while(True):
        # Capture frame-by-frame
        ret, frame = cap.read()

        # Our operations on the frame come here
        if fram_count%rand.randint(1,7) ==0 :
                frame_diff = cv.absdiff(frame, prev_frame)
                gray_diff = cv.cvtColor(frame_diff, cv.COLOR_BGR2GRAY)
                ret, motion_mask = cv.threshold(gray_diff, DEFAULT_THRESHOLD, 1, cv.THRESH_BINARY)

                cv.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)
                vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
        fram_count = fram_count+1
        if fram_count%300 == 0:
            # classify the motion history image and classify the motion history image
            features = cv.HuMoments(cv.moments(vis)).flatten()
            features = features.reshape((1,7))
            features = features.reshape(-1)
            features = features.tolist()
            features = -np.sign(features) * np.log10(np.abs(features))
            predict_test = clf.predict(features)
            print "motion is :", predict_test
            #reset all back
            h, w = frame.shape[:2]
            prev_frame = frame.copy()
            motion_history = np.zeros((h, w), np.float32)
            timestamp = clock()
            vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)


        prev_frame = frame.copy()
        if cv.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv.destroyAllWindows()
    print "TESTING ON TESTING DATA"
    predict_test = clf.predict(testing_data)

    correct_train = 0.0
    false_train = 0.0
    for i in range(len(label_train)):
        print label_train[i],predict_train[i]
        if label_train[i] == predict_train[i]:
            correct_train = correct_train+1
        else:
            false_train = false_train+1
    print "Totally Classified training data"
    print correct_train,false_train,correct_train/(float)(correct_train+false_train) * 100
    correct_cat = {}
    false_cat = {}
    for i in range(len(label_test)):
        correct_cat[i] = 0.0
        false_cat[i] = 0.0

    correct_test = 0.0
    false_test = 0.0

    for i in range(len(label_test)):
        print label_test[i],predict_test[i]
        if label_test[i] == predict_test[i]:
            correct_test = correct_test+1
            if correct_cat.has_key(label_test[i]):
                correct_cat[label_test[i]] = correct_cat[label_test[i]] + 1

        else:
            false_test = false_test+1
            if false_cat.has_key(label_test[i]):
                false_cat[label_test[i]] = false_cat[label_test[i]] + 1


    print "label    correct   wrong     percentage"
    for i in range(1,11):
        if correct_cat[i] != 0 and false_cat[i] !=0:
            print i,"       ",correct_cat[i],"       ",false_cat[i],"           ",correct_cat[i]/(correct_cat[i]+false_cat[i])*100


    print "Totally Classified testing data"
    print "Correct ",correct_test,"False ",false_test,"Percentage ",correct_test/(correct_test+false_test) * 100


    return
def real_time_evaluation(videoName, featureWriter,classifier, activities, MIN_TIME_DELTA,MAX_TIME_DELTA,MHI_DURATION,THRESH_VALUE,DISPLAY=False): 
    cv2.namedWindow('rat activity recognition')
    visuals = ['input', 'frame_diff', 'motion_hist', 'grad_orient']
    # use MHI features (motion history intensity)
    visual_name = visuals[2]
  
    cam = cv2.VideoCapture(videoName)
    video_len = cam.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
    ret, frame = cam.read()
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:,:,1] = 255
    
    
    fps = 15.0
    fourcc = cv2.cv.CV_FOURCC(*'XVID')

    outputVideoName = "activityRecognitionResults.avi";
    VideoOutput = cv2.VideoWriter(outputVideoName,fourcc, fps, (w,h))   
    
    
    ii = 0        
    
    #cam.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,ii)
     
    while (ii < video_len):
    #while (ii<1000):
        ii += 1
        ret, frame = cam.read()
        frame_diff = cv2.absdiff(frame, prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
     
        ret, motion_mask = cv2.threshold(gray_diff, THRESH_VALUE, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient(motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
        seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

        if visual_name == 'input':
            vis = frame.copy()
        elif visual_name == 'frame_diff':
            vis = frame_diff.copy()
        elif visual_name == 'motion_hist':
            vis0 = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            junk,mei0 = cv2.threshold(vis0,1,255,cv2.THRESH_BINARY)

        elif visual_name == 'grad_orient':
            hsv[:,:,0] = mg_orient/2
            hsv[:,:,2] = mg_mask*255
            vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
            
        ## Compute features
        M1 = cv2.moments(mei0)
        M2 = cv2.moments(vis0)    
        Hu1 = cv2.HuMoments(M1)
        Hu2 = cv2.HuMoments(M2)
        
        smallNum = [1e-200]*7 
        Hu1 = Hu1 + smallNum
        Hu2 = Hu2 + smallNum
        
        Hu1 = np.sign(Hu1)*np.log10(np.abs(Hu1))
        Hu2 = np.sign(Hu2)*np.log10(np.abs(Hu2))
      
        if M1['m00']!=0:
            cx1 = M1['m10']/M1['m00']
            cy1 = M1['m01']/M1['m00']
        else:
            cx1 = 0;
            cy1 = 0; 
            
        if M2['m00']!=0:
            cx2 = M2['m10']/M2['m00']
            cy2 = M2['m01']/M2['m00']
        else:
            cx2 = 0;
            cy2 = 0;     
                                       
        meiSize = np.count_nonzero(mei0);
        
        if meiSize == 0:
            corner1 = 0
            corner2 = 0
            corner3 = 0
            corner4 = 0
            height = 0
            width = 0
            extend = 0
        else:
            indices = np.nonzero(mei0)
            corner1 = max(indices[0])
            corner2 = min(indices[0])
            corner3 = max(indices[1])
            corner4 = min(indices[1])
            height = corner1 - corner2+1
            width = corner3 - corner4+1
            extend = meiSize/float(height*width)
            
        features = [Hu1[0][0],Hu1[1][0],Hu1[2][0],Hu1[3][0],Hu1[4][0],Hu1[5][0],Hu1[6][0],
                    Hu2[0][0],Hu2[1][0],Hu2[2][0],Hu2[3][0],Hu2[4][0],Hu2[5][0],Hu2[6][0],
                    cx1, cy1, cx2, cy2, meiSize, corner1, corner2, corner3, corner4,height, width, extend]
       
        featureWriter.writerow(features)
         
        tag = classifier.predict(features[0:26])
        activity = activities[tag]  
            
        prev_frame = frame.copy()      
     
                            
        if DISPLAY:
            cv2.putText(frame, activity, (5, 25),
                cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255,255,255))
            vis = cv2.cvtColor(vis0, cv2.COLOR_GRAY2BGR)
            mei = cv2.cvtColor(mei0, cv2.COLOR_GRAY2BGR)
            cv2.imshow('MHI', vis)
            cv2.imshow('MEI', mei)
            cv2.imshow('Video',frame)
            VideoOutput.write(frame)
            
            if 0xff & cv2.waitKey(50) == 27:
                break
            
    cam.release()
    cv2.destroyAllWindows()
示例#30
0
        angle = 180 * angle / np.pi
        v = 2. * np.sqrt(2.) * np.sqrt(v)

        a_s[k,i]=v[0]
        b_s[k,i]=v[1]
        ab_s[k,i]=(v[0]/v[1])
        theta_s[k,i]=(angle)

        # Draw ellipse on video
        cv2.ellipse(frame, pt1, (int(v[1]), int(v[0])), -angle, 0, 360, (64, k*255, 64), 3)

        # Compute silhouette and update mhi
        mask=np.zeros_like(fgmask)
        cv2.ellipse(mask, pt1, (int(v[1]), int(v[0])), -angle, 0, 360, (255, 255, 255), -1)
        silhouette = 255 - np.bitwise_and(fgmask,mask)
        cv2.updateMotionHistory(silhouette, raw_mhi[k], i, MHI_DURATION)

        # Scale and store mhi for processing
        out = cv2.convertScaleAbs(raw_mhi[k], None, 255.0/MHI_DURATION, (MHI_DURATION - i)*255.0/MHI_DURATION)
        mhi[k,i,:,:] = 255 - out

        #cv2.imshow(str(k), mhi[k,i,:,:])
        cv2.imshow('fgmask', fgmask)
        cv2.imshow('frame', frame)

    k = cv2.waitKey(30) & 0xff
    if k == 27:
        break
    i += 1

示例#31
0
successFlag, frame = video.read()

lastFrame = frame.copy()

h, w = frame.shape[:2]
motionHistory = numpy.zeros((h, w), numpy.float32)

while 1:
    successFlag, frame = video.read()

    if not successFlag:
        break

    frameDiff = cv2.absdiff(lastFrame, frame)
    greyDiff = cv2.cvtColor(frameDiff, code=cv2.COLOR_BGR2GRAY)

    retval, motionMask = cv2.threshold(greyDiff,10,1,cv2.THRESH_BINARY)

    timestamp = time.clock()
    cv2.updateMotionHistory(motionMask, motionHistory, timestamp, 0.5)
    mg_mask, mg_orient = cv2.calcMotionGradient( motionHistory, 0.25, 0.05, apertureSize=5 )
    seg_mask, seg_bounds = cv2.segmentMotion(motionHistory, timestamp, 0.25)

    total = sum(sum(motionHistory))/8
    print "movement: ", total

    cv2.imshow("My Window", motionHistory)
    cv2.waitKey(1)

    lastFrame = frame.copy()
示例#32
0
    cam = video.create_capture(video_src, fallback='synth:class=chess:bg=../cpp/lena.jpg:noise=0.01')
    ret, frame = cam.read()
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:, :, 1] = 255
    while True:
        ret, frame = cam.read()
        frame_diff = cv2.absdiff(frame, prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
        thrs = 30  # cv2.getTrackbarPos('threshold', 'motempl')
        ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient(motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5)
        seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

        visual_name = visuals[cv2.getTrackbarPos('visual', 'motempl')]
        if visual_name == 'input':
            vis = frame.copy()
        elif visual_name == 'frame_diff':
            vis = frame_diff.copy()
        elif visual_name == 'motion_hist':
            vis = np.uint8(np.clip((motion_history - (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
            vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
        elif visual_name == 'grad_orient':
            hsv[:, :, 0] = mg_orient / 2
            hsv[:, :, 2] = mg_mask * 255
            vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
def video_feature_extraction_save(
    videoName,
    featureWriter,
    maskRegion,
    case,
    MIN_TIME_DELTA,
    MAX_TIME_DELTA,
    MHI_DURATION,
    THRESH_VALUE,
    DISPLAY=False,
):
    cv2.namedWindow("rat activity recognition")
    visuals = ["input", "frame_diff", "motion_hist", "grad_orient"]
    # use MHI features (motion history intensity)
    visual_name = visuals[2]

    cam = cv2.VideoCapture(videoName)
    video_len = cam.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)

    ret, frame = cam.read()

    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:, :, 1] = 255
    ii = 0
    while ii < video_len - 1:
        ii += 1
        ret, frame = cam.read()

        frame_diff = cv2.absdiff(frame, prev_frame)

        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY) * maskRegion

        ret, motion_mask = cv2.threshold(gray_diff, THRESH_VALUE, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient(motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5)
        seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

        if visual_name == "input":
            vis = frame.copy()
        elif visual_name == "frame_diff":
            vis = frame_diff.copy()
        elif visual_name == "motion_hist":
            vis0 = np.uint8(np.clip((motion_history - (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
            junk, mei0 = cv2.threshold(vis0, 1, 255, cv2.THRESH_BINARY)

        elif visual_name == "grad_orient":
            hsv[:, :, 0] = mg_orient / 2
            hsv[:, :, 2] = mg_mask * 255
            vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

        ## Compute features
        M1 = cv2.moments(mei0)
        M2 = cv2.moments(vis0)
        Hu1 = cv2.HuMoments(M1)
        Hu2 = cv2.HuMoments(M2)

        smallNum = [1e-200] * 7
        Hu1 = Hu1 + smallNum
        Hu2 = Hu2 + smallNum

        Hu1 = np.sign(Hu1) * np.log10(np.abs(Hu1))
        Hu2 = np.sign(Hu2) * np.log10(np.abs(Hu2))

        if M1["m00"] != 0:
            cx1 = M1["m10"] / M1["m00"]
            cy1 = M1["m01"] / M1["m00"]
        else:
            cx1 = 0
            cy1 = 0

        if M2["m00"] != 0:
            cx2 = M2["m10"] / M2["m00"]
            cy2 = M2["m01"] / M2["m00"]
        else:
            cx2 = 0
            cy2 = 0

        meiSize = np.count_nonzero(mei0)

        if meiSize == 0:
            corner1 = 0
            corner2 = 0
            corner3 = 0
            corner4 = 0
            height = 0
            width = 0
            extend = 0
        else:
            maskInd = np.nonzero(maskRegion)
            maskCx = np.mean(maskInd[0])
            maskCy = np.mean(maskInd[1])

            indices = np.nonzero(mei0)
            corner1 = max(indices[0]) - maskCx
            corner2 = min(indices[0]) - maskCx
            corner3 = max(indices[1]) - maskCy
            corner4 = min(indices[1]) - maskCy
            height = corner1 - corner2 + 1
            width = corner3 - corner4 + 1
            extend = meiSize / float(height * width)

        # features = [Hu1[0][0],Hu1[1][0],Hu1[2][0],Hu1[3][0],Hu1[4][0],Hu1[5][0],Hu1[6][0],
        #            Hu2[0][0],Hu2[1][0],Hu2[2][0],Hu2[3][0],Hu2[4][0],Hu2[5][0],Hu2[6][0],
        #            cx1, cy1, cx2, cy2, meiSize, corner1, corner2, corner3,corner4, height, width, extend, case]

        features = [
            Hu1[0][0],
            Hu1[1][0],
            Hu1[2][0],
            Hu1[3][0],
            Hu1[4][0],
            Hu1[5][0],
            Hu1[6][0],
            Hu2[0][0],
            Hu2[1][0],
            Hu2[2][0],
            Hu2[3][0],
            Hu2[4][0],
            Hu2[5][0],
            Hu2[6][0],
            cx1,
            cy1,
            cx2,
            cy2,
            corner1,
            corner2,
            corner3,
            corner4,
            meiSize,
            height / (width + 0.000001),
            height,
            width,
            extend,
            case,
        ]
        # zeroFeatures = [-200]*14
        # zeroFeatures = [0]*14
        # if case == 1:# Rest case
        #    featureWriter.writerow(features)
        # else:
        #    if features[0:len(features)-6] != zeroFeatures:
        #       featureWriter.writerow(features)
        featureWriter.writerow(features)

        prev_frame = frame.copy()

        if DISPLAY:
            # draw_str(vis, (20, 20), visual_name)
            vis = cv2.cvtColor(vis0, cv2.COLOR_GRAY2BGR)
            mei = cv2.cvtColor(mei0, cv2.COLOR_GRAY2BGR)
            cv2.imshow("MHI", vis)
            # cv2.imshow('MEI', mei)
            cv2.imshow("Video", frame)

            if 0xFF & cv2.waitKey(1) == 27:
                break

    cam.release()
    cv2.destroyAllWindows()