Exemple #1
0
    def get_motion_angle(diff):
        times_s = time.clock()
        cv2.updateMotionHistory(diff, hist_32, times_s, DURATION)
        hist_8, direction = cv2.calcMotionGradient(hist_32, 0.25, 0.05, apertureSize=5)
        angle = cv2.calcGlobalOrientation(direction, hist_8, hist_32, times_s, DURATION)

        return angle
Exemple #2
0
    def on_frame(self, frame):
        h, w = frame.shape[:2]
        qi = 0
        #print "on_frame %d x %d" % (h, w)
        frame_diff = cv2.absdiff(frame, self.prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
        ret, motion_mask = cv2.threshold(gray_diff, self._threshold, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, self.motion_history, timestamp, MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient(self.motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
        seg_mask, seg_bounds = cv2.segmentMotion(self.motion_history, timestamp, MAX_TIME_DELTA)

        centers = []
        rects = []
        draws = []
        for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
            x, y, rw, rh = rect
            area = rw*rh
            if area < 64**2:
                continue
            silh_roi   = motion_mask        [y:y+rh,x:x+rw]
            orient_roi = mg_orient          [y:y+rh,x:x+rw]
            mask_roi   = mg_mask            [y:y+rh,x:x+rw]
            mhi_roi    = self.motion_history[y:y+rh,x:x+rw]
            if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
                continue
            angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
            color = ((255, 0, 0), (0, 0, 255))[i == 0]
            if self._use_cv_gui:
                draws.append(lambda vis, rect=rect, angle=angle, color=color:
                                draw_motion_comp(vis, rect, angle, color))
            centers.append( (x+rw/2, y+rh/2) )
            rects.append(rect)

        self.tracker_group.update_trackers(centers, rects)

        #print 'Active trackers: %d' % len(trackers)
        #print 'Tracker score: %s' % ','.join(['%2d'%len(tracker.hits) for tracker in trackers])
        trackers = self.tracker_group.trackers
        cx, cy = None, None
        #print "#trackers = %d" % len(trackers)
        if len(trackers):
            first_tracker = trackers[0]
            cx, cy = center_after_median_threshold(frame, first_tracker.rect)
            cv2.circle(frame, (cx, cy), 5, (255, 255, 255), 3)
        print str(qi)*5; qi += 1
        print self._on_cx_cy
        self._on_cx_cy(cx, cy) # gives None's for no identified balloon
        print str(qi)*5; qi += 1

        if self._use_cv_gui:
            self.on_frame_cv_gui(frame, draws, (cx, cy))
        else:
            self.frame_vis(frame, draws, (cx, cy))

        #time.sleep(0.5)
        self.prev_frame = frame.copy()
        # TODO - print visualization onto image
        return frame
 def process_motions(self, seg_bounds):
     mg_mask, mg_orient = cv2.calcMotionGradient( self.motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
     seg_mask, seg_bounds = cv2.segmentMotion(self.motion_history, timestamp, MAX_TIME_DELTA)
     for i, rect in enumerate([(0, 0, self.w, self.h)] + list(seg_bounds)):
         x, y, rw, rh = rect
         area = rw*rh
         if area < 64**2:
             continue
         silh_roi   = motion_mask   [y:y+rh,x:x+rw]
         orient_roi = mg_orient     [y:y+rh,x:x+rw]
         mask_roi   = mg_mask       [y:y+rh,x:x+rw]
         mhi_roi    = self.motion_history[y:y+rh,x:x+rw]
         if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
             continue
         angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
         color = ((255, 0, 0), (0, 0, 255))[i == 0]
         draw_motion_comp(self.vis, rect, angle, color)
Exemple #4
0
        elif visual_name == 'motion_hist':
            vis = np.uint8(np.clip((motion_history - (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
            vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
        elif visual_name == 'grad_orient':
            hsv[:, :, 0] = mg_orient / 2
            hsv[:, :, 2] = mg_mask * 255
            vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

        for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
            x, y, rw, rh = rect
            area = rw * rh
            if area < 64 ** 2:
                continue
            silh_roi = motion_mask   [y:y + rh, x:x + rw]
            orient_roi = mg_orient     [y:y + rh, x:x + rw]
            mask_roi = mg_mask       [y:y + rh, x:x + rw]
            mhi_roi = motion_history[y:y + rh, x:x + rw]
            if cv2.norm(silh_roi, cv2.NORM_L1) < area * 0.05:
                continue
            angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
            color = ((255, 0, 0), (0, 0, 255))[i == 0]
            draw_motion_comp(vis, rect, angle, color)

        draw_str(vis, (20, 20), visual_name)
        cv2.imshow('motempl', vis)

        prev_frame = frame.copy()
        if 0xFF & cv2.waitKey(5) == 27:
            break
    cv2.destroyAllWindows()
def processTrainingFiles(oper="extractTrainingVocabulary", fileType="train"):
    global saveTrainFeaturesDes
    cv2.namedWindow("Output")
    visuals = ["input", "frame_diff", "motion_hist", "grad_orient"]
    cv2.createTrackbar("visual", "Output", 2, len(visuals) - 1, nothing)
    cv2.createTrackbar("threshold", "Output", DEFAULT_THRESHOLD, 255, nothing)

    cv2.namedWindow("Output7")
    # visuals = ['input', 'frame_diff', 'motion_hist', 'grad_orient']
    cv2.createTrackbar("visual", "Output7", 2, len(visuals) - 1, nothing)
    cv2.createTrackbar("threshold", "Output7", DEFAULT_THRESHOLD, 255, nothing)

    cv2.namedWindow("Output14")
    # visuals = ['input', 'frame_diff', 'motion_hist', 'grad_orient']
    cv2.createTrackbar("visual", "Output14", 2, len(visuals) - 1, nothing)
    cv2.createTrackbar("threshold", "Output14", DEFAULT_THRESHOLD, 255, nothing)

    printPageBreak()
    print ("\n\nProcessing " + fileType.capitalize() + "ing Files")
    trainTestFiles = groupedTrainFiles
    if fileType == "train":
        trainTestFiles = groupedTrainFiles
    else:
        trainTestFiles = groupedTestFiles

    for label, fileList in trainTestFiles.iteritems():
        global trainData, testData, trainLabels, testLabels
        for file in fileList:
            frameCounter = 0
            print (label, file)
            cam = cv2.VideoCapture(file)
            # cam = video.create_capture(file,fallback='synth:class=chess:bg=lena.jpg:noise=0.01')
            # cam = video.create_capture(file, fallback='synth:class=chess:bg=lena.jpg:noise=0.01')
            ret, frame = cam.read()
            h, w = frame.shape[:2]
            prev_frame = frame.copy()
            motion_history = np.zeros((h, w), np.float32)
            # print(motion_history)
            hsv = np.zeros((h, w, 3), np.uint8)
            hsv[:, :, 1] = 255

            prev_frame7 = frame.copy()
            motion_history7 = np.zeros((h, w), np.float32)
            # print(motion_history7)
            hsv7 = np.zeros((h, w, 3), np.uint8)
            hsv7[:, :, 1] = 255

            prev_frame14 = frame.copy()
            motion_history14 = np.zeros((h, w), np.float32)
            # print(motion_history)
            hsv14 = np.zeros((h, w, 3), np.uint8)
            hsv14[:, :, 1] = 255

            totalFrames = int(cam.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
            frameCounter = frameCounter + 1
            print (totalFrames)

            subFrameCount = 0
            skipFrames = 1

            subFrameCount7 = 0
            skipFrames7 = 7

            subFrameCount14 = 0
            skipFrames14 = 14

            featureDesSAHMIS = None
            bowDescriptorSAHMIS = None

            while frameCounter < totalFrames:
                ret, frame = cam.read()

                """
                #################################################################################SAMHI-1
                """
                if subFrameCount == 0:
                    frame_diff = cv2.absdiff(frame, prev_frame)
                    gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
                    # Remove the noise and do the threshold
                    # gray_diff = cv2.morphologyEx(gray_diff, cv2.KERNEL_SMOOTH, kernel)
                    # gray_diff = cv2.dilate(gray_diff, kernel, iterations=1)
                    # gray_diff = cv2.erode(gray_diff, kernel, iterations=1)
                    # # gray_diff = cv2.morphologyEx(gray_diff, cv2.MORPH_OPEN, kernel)
                    # # gray_diff = cv2.morphologyEx(gray_diff, cv2.MORPH_CLOSE, kernel)
                    # gray_diff = cv2.erode(gray_diff, kernel, iterations=1)
                    # gray_diff = cv2.dilate(gray_diff, kernel, iterations=1)
                    # cv2.cv.Smooth(gray_diff, gray_diff, cv2.cv.CV_BLUR, 5,5)
                    # cv2.cv.MorphologyEx(gray_diff, gray_diff, None, None, cv2.cv.CV_MOP_OPEN)
                    # cv2.cv.MorphologyEx(gray_diff, gray_diff, None, None, cv2.cv.CV_MOP_CLOSE)
                    # cv2.cv.Threshold(gray_diff, gray_diff, 10, 255, cv2.cv.CV_THRESH_BINARY_INV)
                    #
                    # mhi_vis = cv2.dilate(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_OPEN, kernel)
                    # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_CLOSE, kernel)
                    # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.dilate(mhi_vis, kernel, iterations=2)
                    thrs = cv2.getTrackbarPos("threshold", "Output")
                    ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
                    timestamp = clock()
                    cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)

                    mg_mask, mg_orient = cv2.calcMotionGradient(
                        motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5
                    )
                    seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

                    visual_name = visuals[cv2.getTrackbarPos("visual", "Output")]
                    if visual_name == "input":
                        vis = frame.copy()
                    elif visual_name == "frame_diff":
                        vis = frame_diff.copy()
                    elif visual_name == "motion_hist":
                        vis = np.uint8(
                            np.clip((motion_history - (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255
                        )
                        vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
                    elif visual_name == "grad_orient":
                        hsv[:, :, 0] = mg_orient / 2
                        hsv[:, :, 2] = mg_mask * 255
                        vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

                    for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
                        x, y, rw, rh = rect
                        area = rw * rh
                        if area < 64 ** 2:
                            continue
                        silh_roi = motion_mask[y : y + rh, x : x + rw]
                        orient_roi = mg_orient[y : y + rh, x : x + rw]
                        mask_roi = mg_mask[y : y + rh, x : x + rw]
                        mhi_roi = motion_history[y : y + rh, x : x + rw]
                        if cv2.norm(silh_roi, cv2.NORM_L1) < area * 0.05:
                            continue
                        angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
                        color = ((255, 0, 0), (0, 0, 255))[i == 0]
                        draw_motion_comp(vis, rect, angle, color)

                    visCopy = vis.copy()
                    draw_str(visCopy, (20, 20), visual_name)
                    cv2.imshow("Output", visCopy)

                    prev_frame = frame.copy()
                    if 0xFF & cv2.waitKey(5) == 27:
                        break
                    cv2.waitKey(25)
                """
                #################################################################################SAMHI-7
                """
                if subFrameCount7 == 0:
                    frame_diff7 = cv2.absdiff(frame, prev_frame7)
                    gray_diff7 = cv2.cvtColor(frame_diff7, cv2.COLOR_BGR2GRAY)
                    # Remove the noise and do the threshold
                    # gray_diff = cv2.morphologyEx(gray_diff, cv2.KERNEL_SMOOTH, kernel)
                    # gray_diff = cv2.dilate(gray_diff, kernel, iterations=1)
                    # gray_diff = cv2.erode(gray_diff, kernel, iterations=1)
                    # # gray_diff = cv2.morphologyEx(gray_diff, cv2.MORPH_OPEN, kernel)
                    # # gray_diff = cv2.morphologyEx(gray_diff, cv2.MORPH_CLOSE, kernel)
                    # gray_diff = cv2.erode(gray_diff, kernel, iterations=1)
                    # gray_diff = cv2.dilate(gray_diff, kernel, iterations=1)
                    # cv2.cv.Smooth(gray_diff, gray_diff, cv2.cv.CV_BLUR, 5,5)
                    # cv2.cv.MorphologyEx(gray_diff, gray_diff, None, None, cv2.cv.CV_MOP_OPEN)
                    # cv2.cv.MorphologyEx(gray_diff, gray_diff, None, None, cv2.cv.CV_MOP_CLOSE)
                    # cv2.cv.Threshold(gray_diff, gray_diff, 10, 255, cv2.cv.CV_THRESH_BINARY_INV)
                    #
                    # mhi_vis = cv2.dilate(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_OPEN, kernel)
                    # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_CLOSE, kernel)
                    # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.dilate(mhi_vis, kernel, iterations=2)
                    thrs = cv2.getTrackbarPos("threshold", "Output")
                    ret7, motion_mask7 = cv2.threshold(gray_diff7, thrs, 1, cv2.THRESH_BINARY)
                    timestamp7 = clock()
                    cv2.updateMotionHistory(motion_mask7, motion_history7, timestamp7, MHI_DURATION)

                    mg_mask7, mg_orient7 = cv2.calcMotionGradient(
                        motion_history7, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5
                    )
                    seg_mask7, seg_bounds7 = cv2.segmentMotion(motion_history7, timestamp7, MAX_TIME_DELTA)

                    visual_name7 = visuals[cv2.getTrackbarPos("visual", "Output7")]
                    if visual_name7 == "input":
                        vis7 = frame.copy()
                    elif visual_name == "frame_diff":
                        vis7 = frame_diff7.copy()
                    elif visual_name == "motion_hist":
                        vis7 = np.uint8(
                            np.clip((motion_history7 - (timestamp7 - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255
                        )
                        vis7 = cv2.cvtColor(vis7, cv2.COLOR_GRAY2BGR)
                    elif visual_name == "grad_orient":
                        hsv7[:, :, 0] = mg_orient7 / 2
                        hsv7[:, :, 2] = mg_mask7 * 255
                        vis7 = cv2.cvtColor(hsv7, cv2.COLOR_HSV2BGR)

                    for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds7)):
                        x, y, rw, rh = rect
                        area = rw * rh
                        if area < 64 ** 2:
                            continue
                        silh_roi7 = motion_mask7[y : y + rh, x : x + rw]
                        orient_roi7 = mg_orient7[y : y + rh, x : x + rw]
                        mask_roi7 = mg_mask7[y : y + rh, x : x + rw]
                        mhi_roi7 = motion_history7[y : y + rh, x : x + rw]
                        if cv2.norm(silh_roi7, cv2.NORM_L1) < area * 0.05:
                            continue
                        angle7 = cv2.calcGlobalOrientation(orient_roi7, mask_roi7, mhi_roi7, timestamp7, MHI_DURATION)
                        color7 = ((255, 0, 0), (0, 0, 255))[i == 0]
                        draw_motion_comp(vis7, rect, angle7, color7)

                    visCopy7 = vis7.copy()
                    draw_str(visCopy7, (20, 20), visual_name)
                    cv2.imshow("Output7", visCopy7)

                    prev_frame7 = frame.copy()
                    if 0xFF & cv2.waitKey(5) == 27:
                        break
                    cv2.waitKey(25)

                """
                #################################################################################SAMHI-14
                """
                if subFrameCount14 == 0:
                    frame_diff14 = cv2.absdiff(frame, prev_frame)
                    gray_diff14 = cv2.cvtColor(frame_diff14, cv2.COLOR_BGR2GRAY)
                    # Remove the noise and do the threshold
                    # gray_diff = cv2.morphologyEx(gray_diff, cv2.KERNEL_SMOOTH, kernel)
                    # gray_diff = cv2.dilate(gray_diff, kernel, iterations=1)
                    # gray_diff = cv2.erode(gray_diff, kernel, iterations=1)
                    # # gray_diff = cv2.morphologyEx(gray_diff, cv2.MORPH_OPEN, kernel)
                    # # gray_diff = cv2.morphologyEx(gray_diff, cv2.MORPH_CLOSE, kernel)
                    # gray_diff = cv2.erode(gray_diff, kernel, iterations=1)
                    # gray_diff = cv2.dilate(gray_diff, kernel, iterations=1)
                    # cv2.cv.Smooth(gray_diff, gray_diff, cv2.cv.CV_BLUR, 5,5)
                    # cv2.cv.MorphologyEx(gray_diff, gray_diff, None, None, cv2.cv.CV_MOP_OPEN)
                    # cv2.cv.MorphologyEx(gray_diff, gray_diff, None, None, cv2.cv.CV_MOP_CLOSE)
                    # cv2.cv.Threshold(gray_diff, gray_diff, 10, 255, cv2.cv.CV_THRESH_BINARY_INV)
                    #
                    # mhi_vis = cv2.dilate(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_OPEN, kernel)
                    # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_CLOSE, kernel)
                    # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
                    # mhi_vis = cv2.dilate(mhi_vis, kernel, iterations=2)
                    thrs = cv2.getTrackbarPos("threshold", "Output14")
                    ret14, motion_mask14 = cv2.threshold(gray_diff14, thrs, 1, cv2.THRESH_BINARY)
                    timestamp14 = clock()
                    cv2.updateMotionHistory(motion_mask14, motion_history14, timestamp14, MHI_DURATION)

                    mg_mask14, mg_orient14 = cv2.calcMotionGradient(
                        motion_history14, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5
                    )
                    seg_mask14, seg_bounds14 = cv2.segmentMotion(motion_history14, timestamp14, MAX_TIME_DELTA)

                    visual_name = visuals[cv2.getTrackbarPos("visual", "Output14")]
                    if visual_name == "input":
                        vis14 = frame.copy()
                    elif visual_name == "frame_diff":
                        vis14 = frame_diff14.copy()
                    elif visual_name == "motion_hist":
                        vis14 = np.uint8(
                            np.clip((motion_history14 - (timestamp14 - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255
                        )
                        vis14 = cv2.cvtColor(vis14, cv2.COLOR_GRAY2BGR)
                    elif visual_name == "grad_orient":
                        hsv14[:, :, 0] = mg_orient14 / 2
                        hsv14[:, :, 2] = mg_mask14 * 255
                        vis14 = cv2.cvtColor(hsv14, cv2.COLOR_HSV2BGR)

                    for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds14)):
                        x, y, rw, rh = rect
                        area = rw * rh
                        if area < 64 ** 2:
                            continue
                        silh_roi14 = motion_mask14[y : y + rh, x : x + rw]
                        orient_roi14 = mg_orient14[y : y + rh, x : x + rw]
                        mask_roi14 = mg_mask14[y : y + rh, x : x + rw]
                        mhi_roi14 = motion_history14[y : y + rh, x : x + rw]
                        if cv2.norm(silh_roi14, cv2.NORM_L1) < area * 0.05:
                            continue
                        angle14 = cv2.calcGlobalOrientation(
                            orient_roi14, mask_roi14, mhi_roi14, timestamp14, MHI_DURATION
                        )
                        color14 = ((255, 0, 0), (0, 0, 255))[i == 0]
                        draw_motion_comp(vis14, rect, angle14, color14)

                    visCopy14 = vis14.copy()
                    draw_str(visCopy14, (20, 20), visual_name)
                    cv2.imshow("Output14", visCopy14)

                    prev_frame14 = frame.copy()
                    if 0xFF & cv2.waitKey(5) == 27:
                        break
                    cv2.waitKey(25)

                subFrameCount = subFrameCount + 1
                subFrameCount7 = subFrameCount7 + 1
                subFrameCount14 = subFrameCount14 + 1

                if subFrameCount > skipFrames:
                    subFrameCount = 0

                if subFrameCount7 > skipFrames7:
                    subFrameCount7 = 0

                if subFrameCount14 > skipFrames14:
                    subFrameCount14 = 0

                frameCounter = frameCounter + 1
            with open("mhiInfo", "a+") as mhiFile:
                mhiFile.write(
                    "\n======================================================================================================\n"
                )
                for row in motion_history:
                    # print(row)
                    mhiFile.write(" ".join(str(x) for x in row) + "\n")

            if visual_name == "motion_hist":
                mhi_vis = np.uint8(np.clip((motion_history - (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
                mhi_vis = cv2.cvtColor(mhi_vis, cv2.COLOR_GRAY2BGR)

                mhi_vis7 = np.uint8(np.clip((motion_history7 - (timestamp7 - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
                mhi_vis7 = cv2.cvtColor(mhi_vis7, cv2.COLOR_GRAY2BGR)

                mhi_vis14 = np.uint8(
                    np.clip((motion_history14 - (timestamp14 - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255
                )
                mhi_vis14 = cv2.cvtColor(mhi_vis14, cv2.COLOR_GRAY2BGR)
            else:
                hsv[:, :, 0] = mg_orient / 2
                hsv[:, :, 2] = mg_mask * 255
                mhi_vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

                hsv7[:, :, 0] = mg_orient7 / 2
                hsv7[:, :, 2] = mg_mask7 * 255
                mhi_vis7 = cv2.cvtColor(hsv7, cv2.COLOR_HSV2BGR)

                hsv14[:, :, 0] = mg_orient14 / 2
                hsv14[:, :, 2] = mg_mask14 * 255
                mhi_vis14 = cv2.cvtColor(hsv14, cv2.COLOR_HSV2BGR)

            # Remove the noise and do the threshold
            # cv2.cv.Smooth(mhi_vis, mhi_vis, cv2.cv.CV_BLUR, 5,5)
            # cv2.cv.MorphologyEx(mhi_vis, mhi_vis, None, None, cv2.cv.CV_MOP_OPEN)
            # cv2.cv.MorphologyEx(mhi_vis, mhi_vis, None, None, cv2.cv.CV_MOP_CLOSE)
            # cv2.cv.Threshold(mhi_vis, mhi_vis, 10, 255, cv2.cv.CV_THRESH_BINARY_INV)
            # #
            # mhi_vis = cv2.(mhi_vis, kernel, iterations=2)
            # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
            # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_OPEN, kernel)
            # mhi_vis = cv2.morphologyEx(mhi_vis, cv2.MORPH_CLOSE, kernel)
            # mhi_vis = cv2.erode(mhi_vis, kernel, iterations=2)
            # mhi_vis = cv2.dilate(mhi_vis, kernel, iterations=2)

            ##Start extracting features
            sift = cv2.SIFT()
            denseDetector = cv2.FeatureDetector_create(sDetector)  ##using Dense Feature Detector

            kp = detector.detect(mhi_vis)

            kp7 = detector.detect(mhi_vis7)

            kp14 = detector.detect(mhi_vis14)

            # kp2, des2 = sift.compute(mhi_vis,kp)
            # img=cv2.drawKeypoints(mhi_vis,kp2)

            print ("KeyPoints Length:: ", len(kp))

            hasAtleastOneKP = False

            ##Check if there are any detected keypoints before processing.
            if len(kp) > 0:
                hasAtleastOneKP = True
                features = extractor.compute(mhi_vis, kp)
                featuresDes = features[1]
                # print('Descriptors:: ',featuresDes)
                print ("Descriptors Length:: ", len(featuresDes))
                print ("Descriptors Shape:: ", featuresDes.shape)

                if featureDesSAHMIS is None:
                    featureDesSAHMIS = featuresDes
                else:
                    featureDesSAHMIS = np.append(featureDesSAHMIS, featuresDes, axis=0)

                if oper == "extractTrainingVocabulary":
                    bowTrainer.add(featureDesSAHMIS)
                    saveTrainFeaturesDes.append(featureDesSAHMIS)
                    cv2.imwrite("mhiImages\\" + file.split("\\")[2] + "_mhi.jpg", mhi_vis)
                elif oper == "extractBOWDescriptor":
                    bowDescriptor = bowDE.compute(mhi_vis, kp)
                    # descriptors.push_back(bowDescriptor);
                    # print('bowDescriptor:: ',bowDescriptor)
                    print ("bowDescriptor Length:: ", len(bowDescriptor))
                    print ("bowDescriptor Shape:: ", bowDescriptor.shape)
                    if fileType == "train":
                        img = cv2.drawKeypoints(mhi_vis, kp)
                        cv2.imwrite("keyPoints\\" + file.split("\\")[2] + "_keypoints.jpg", img)
                    if bowDescriptorSAHMIS is None:
                        bowDescriptorSAHMIS = bowDescriptor
                    else:
                        # bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor)
                        bowDescriptorSAHMIS = np.sum([bowDescriptorSAHMIS, bowDescriptor], axis=0)
            else:
                featuresDes = np.zeros((1, featureSize), np.float32)
                if featureDesSAHMIS is None:
                    featureDesSAHMIS = featuresDes
                else:
                    featureDesSAHMIS = np.append(featureDesSAHMIS, featuresDes, axis=0)
                bowDescriptor = np.zeros((1, 1000), np.float32)
                if bowDescriptorSAHMIS is None:
                    bowDescriptorSAHMIS = bowDescriptor
                else:
                    # bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor)
                    bowDescriptorSAHMIS = np.sum([bowDescriptorSAHMIS, bowDescriptor], axis=0)
                print ("No SAMHI-1 Key points were detectected for this image..")

            ##Check if there are any detected keypoints before processing.
            if len(kp7) > 0:
                hasAtleastOneKP = True
                features7 = extractor.compute(mhi_vis7, kp7)
                featuresDes7 = features7[1]
                # print('Descriptors7:: ',featuresDes7)
                print ("Descriptors7 Length:: ", len(featuresDes7))
                print ("Descriptors7 Shape:: ", featuresDes7.shape)

                if featureDesSAHMIS is None:
                    featureDesSAHMIS = featuresDes7
                else:
                    featureDesSAHMIS = np.append(featureDesSAHMIS, featuresDes7, axis=0)

                if oper == "extractTrainingVocabulary":
                    cv2.imwrite("mhiImages\\" + file.split("\\")[2] + "_mhi7.jpg", mhi_vis7)
                elif oper == "extractBOWDescriptor":
                    bowDescriptor7 = bowDE.compute(mhi_vis7, kp7)
                    # descriptors.push_back(bowDescriptor);
                    # print('bowDescriptor7:: ',bowDescriptor7)
                    print ("bowDescriptor7 Length:: ", len(bowDescriptor7))
                    print ("bowDescriptor7 Shape:: ", bowDescriptor7.shape)
                    if fileType == "train":
                        img7 = cv2.drawKeypoints(mhi_vis7, kp7)
                        cv2.imwrite("keyPoints\\" + file.split("\\")[2] + "_keypoints_7.jpg", img7)
                    if bowDescriptorSAHMIS is None:
                        bowDescriptorSAHMIS = bowDescriptor7
                    else:
                        # print("bowDescriptor7=> ",bowDescriptor7)
                        # bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor7)
                        bowDescriptorSAHMIS = np.sum([bowDescriptorSAHMIS, bowDescriptor7], axis=0)
            else:
                featuresDes7 = np.zeros((1, featureSize), np.float32)
                if featureDesSAHMIS is None:
                    featureDesSAHMIS = featuresDes7
                else:
                    featureDesSAHMIS = np.append(featureDesSAHMIS, featuresDes7, axis=0)
                bowDescriptor7 = np.zeros((1, 1000), np.float32)
                if bowDescriptorSAHMIS is None:
                    bowDescriptorSAHMIS = bowDescriptor7
                else:
                    # bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor7)
                    bowDescriptorSAHMIS = np.sum([bowDescriptorSAHMIS, bowDescriptor7], axis=0)
                print ("No SAMHI-7 Key points were detectected for this image..")

            ##Check if there are any detected keypoints before processing.
            if len(kp14) > 0:
                hasAtleastOneKP = True
                features14 = extractor.compute(mhi_vis14, kp14)
                featuresDes14 = features14[1]
                # print('Descriptor14:: ',featuresDes14)
                print ("Descriptors14 Length:: ", len(featuresDes14))
                print ("Descriptors14 Shape:: ", featuresDes14.shape)

                if featureDesSAHMIS is None:
                    featureDesSAHMIS = featuresDes14
                else:
                    featureDesSAHMIS = np.append(featureDesSAHMIS, featuresDes14, axis=0)

                if oper == "extractTrainingVocabulary":
                    cv2.imwrite("mhiImages\\" + file.split("\\")[2] + "_mhi14.jpg", mhi_vis14)
                elif oper == "extractBOWDescriptor":
                    bowDescriptor14 = bowDE.compute(mhi_vis14, kp14)
                    # descriptors.push_back(bowDescriptor);
                    # print('bowDescriptor14:: ',bowDescriptor14)
                    print ("bowDescriptor14 Length:: ", len(bowDescriptor14))
                    print ("bowDescriptor14 Shape:: ", bowDescriptor7.shape)
                    if fileType == "train":
                        img14 = cv2.drawKeypoints(mhi_vis14, kp14)
                        cv2.imwrite("keyPoints\\" + file.split("\\")[2] + "_keypoints_14.jpg", img14)

                    if bowDescriptorSAHMIS is None:
                        bowDescriptorSAHMIS = bowDescriptor14
                    else:
                        # bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor14)
                        bowDescriptorSAHMIS = np.sum([bowDescriptorSAHMIS, bowDescriptor14], axis=0)
            else:
                featuresDes14 = np.zeros((1, featureSize), np.float32)
                if featureDesSAHMIS is None:
                    featureDesSAHMIS = featuresDes14
                else:
                    featureDesSAHMIS = np.append(featureDesSAHMIS, featuresDes14, axis=0)
                bowDescriptor14 = np.zeros((1, 1000), np.float32)
                if bowDescriptorSAHMIS is None:
                    bowDescriptorSAHMIS = bowDescriptor14
                else:
                    # bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor14)
                    bowDescriptorSAHMIS = np.sum([bowDescriptorSAHMIS, bowDescriptor14], axis=0)
                print ("No SAMHI-14 Key points were detectected for this image..")

            if hasAtleastOneKP:
                if oper == "extractTrainingVocabulary":
                    # print('featureDesSAHMIS:: ',featureDesSAHMIS)
                    print ("featureDesSAHMIS Length:: ", len(featureDesSAHMIS))
                    print ("featureDesSAHMIS Shape:: ", featureDesSAHMIS.shape)
                    bowTrainer.add(featureDesSAHMIS)
                    saveTrainFeaturesDes.append(featureDesSAHMIS)
                else:
                    print ("bowDescriptorSAHMIS:: ", bowDescriptorSAHMIS)
                    print ("bowDescriptorSAHMIS Length:: ", len(bowDescriptorSAHMIS))
                    print ("bowDescriptorSAHMIS Shape:: ", bowDescriptorSAHMIS.shape)
                    ##Check if the operation on training data or test data
                    if fileType == "train":
                        trainData.append(bowDescriptorSAHMIS)
                        trainLabels.append(label)
                        # trainLabels.append(label)
                        # trainLabels.append(label)
                    else:
                        testData.append(bowDescriptorSAHMIS)
                        testLabels.append(label)
                        # testLabels.append(label)
                        # testLabels.append(label)

            print (file.split("\\")[2])

            # cv2.imwrite('keyPoints\\'+file.split("\\")[2]+'_keypoints.jpg',img)
            cv2.waitKey(25)

    cv2.destroyAllWindows()
def doTestRealTime():
    global saveTrainFeaturesDes, visuals, gSkipFrames, testData,testLabels

    print("Testing on real time video")
    cv2.namedWindow('Output')
    cv2.createTrackbar('visual', 'Output', 2, len(visuals)-1, nothing)
    cv2.createTrackbar('threshold', 'Output', DEFAULT_THRESHOLD, 255, nothing)

    cv2.namedWindow('Output7')
    cv2.createTrackbar('visual', 'Output7', 2, len(visuals)-1, nothing)
    cv2.createTrackbar('threshold', 'Output7', DEFAULT_THRESHOLD, 255, nothing)

    cv2.namedWindow('Output14')
    cv2.createTrackbar('visual', 'Output14', 2, len(visuals)-1, nothing)
    cv2.createTrackbar('threshold', 'Output14', DEFAULT_THRESHOLD, 255, nothing)
    testData = []
    testLabels = []

    printPageBreak()
    print("\n\nProcessing real time video..")

    frameCounter = 0
    cam = cv2.VideoCapture(0)

    ret, frame = cam.read()
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:,:,1] = 255
    frameCounter = frameCounter +1
    subFrameCount = 0
    skipFrames = 1

    prev_frame7 = frame.copy()
    motion_history7 = np.zeros((h, w), np.float32)
    # print(motion_history7)
    hsv7 = np.zeros((h, w, 3), np.uint8)
    hsv7[:,:,1] = 255

    prev_frame14 = frame.copy()
    motion_history14 = np.zeros((h, w), np.float32)
    # print(motion_history)
    hsv14 = np.zeros((h, w, 3), np.uint8)
    hsv14[:,:,1] = 255
    frameCounter =0

    # totalFrames = int(cam.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    frameCounter = frameCounter +1
    # print(totalFrames)

    subFrameCount = 0
    skipFrames = 1

    subFrameCount7 = 0
    skipFrames7 = 7

    subFrameCount14 = 0
    skipFrames14 = 14

    featureDesSAHMIS = None
    bowDescriptorSAHMIS = None

    timestamp = clock()
    timestamp7 = clock()
    timestamp14 = clock()


    label = ""

    ##Read all frames
    while(True):
        ret, frame = cam.read()
        frame7 = frame.copy()
        frame14 = frame.copy()

        if(subFrameCount == 0):
            frame_diff = cv2.absdiff(frame, prev_frame)
            prev_frame = frame.copy()
            gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
            thrs = cv2.getTrackbarPos('threshold', 'Output')
            ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
            timestamp = clock()
            cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)

            mg_mask, mg_orient = cv2.calcMotionGradient( motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
            seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

            visual_name = visuals[cv2.getTrackbarPos('visual', 'Output')]
            if visual_name == 'input':
                vis = frame.copy()
            elif visual_name == 'frame_diff':
                vis = frame_diff.copy()
            elif visual_name == 'motion_hist':
                vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
            elif visual_name == 'grad_orient':
                hsv[:,:,0] = mg_orient/2
                hsv[:,:,2] = mg_mask*255
                vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

            for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
                x, y, rw, rh = rect
                area = rw*rh
                if area < 64**2:
                    continue
                silh_roi   = motion_mask   [y:y+rh,x:x+rw]
                orient_roi = mg_orient     [y:y+rh,x:x+rw]
                mask_roi   = mg_mask       [y:y+rh,x:x+rw]
                mhi_roi    = motion_history[y:y+rh,x:x+rw]
                if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
                    continue
                angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
                color = ((255, 0, 0), (0, 0, 255))[i == 0]
                draw_motion_comp(vis, rect, angle, color)

            visCopy = vis.copy()
            draw_str(visCopy, (20, 20), visual_name)
            cv2.imshow('Output', visCopy)

        if(subFrameCount7 == 0):
            frame_diff7 = cv2.absdiff(frame7, prev_frame7)
            prev_frame7 = frame7.copy()
            gray_diff7 = cv2.cvtColor(frame_diff7, cv2.COLOR_BGR2GRAY)
            thrs =DEFAULT_THRESHOLD
            ret7, motion_mask7 = cv2.threshold(gray_diff7, thrs, 1, cv2.THRESH_BINARY)
            timestamp7 = clock()
            cv2.updateMotionHistory(motion_mask7, motion_history7, timestamp7, MHI_DURATION)

            mg_mask7, mg_orient7 = cv2.calcMotionGradient( motion_history7, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
            seg_mask7, seg_bounds7 = cv2.segmentMotion(motion_history7, timestamp7, MAX_TIME_DELTA)

            visual_name = 'motion_hist' #visuals[cv2.getTrackbarPos('visual', 'Output')]
            if visual_name == 'input':
                vis7 = frame7.copy()
            elif visual_name == 'frame_diff':
                vis7 = frame_diff7.copy()
            elif visual_name == 'motion_hist':
                vis7 = np.uint8(np.clip((motion_history7-(timestamp7-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                vis7 = cv2.cvtColor(vis7, cv2.COLOR_GRAY2BGR)
            elif visual_name == 'grad_orient':
                hsv7[:,:,0] = mg_orient7/2
                hsv7[:,:,2] = mg_mask7*255
                vis7 = cv2.cvtColor(hsv7, cv2.COLOR_HSV2BGR)

            visCopy7 = vis7.copy()
            draw_str(visCopy7, (20, 20), visual_name)
            cv2.imshow('Output7', visCopy7)


        if(subFrameCount14 == 0):
            frame_diff14 = cv2.absdiff(frame14, prev_frame14)
            prev_frame14 = frame14.copy()
            gray_diff14 = cv2.cvtColor(frame_diff14, cv2.COLOR_BGR2GRAY)
            thrs = DEFAULT_THRESHOLD
            ret14, motion_mask14 = cv2.threshold(gray_diff14, thrs, 1, cv2.THRESH_BINARY)
            timestamp14 = clock()
            cv2.updateMotionHistory(motion_mask14, motion_history14, timestamp14, MHI_DURATION)

            mg_mask14, mg_orient14 = cv2.calcMotionGradient( motion_history14, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
            seg_mask14, seg_bounds14 = cv2.segmentMotion(motion_history14, timestamp14, MAX_TIME_DELTA)

            visual_name = 'motion_hist' #visuals[cv2.getTrackbarPos('visual', 'Output')]
            if visual_name == 'input':
                vis14 = frame14.copy()
            elif visual_name == 'frame_diff':
                vis14 = frame_diff14.copy()
            elif visual_name == 'motion_hist':
                vis14 = np.uint8(np.clip((motion_history14-(timestamp14-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                vis14 = cv2.cvtColor(vis14, cv2.COLOR_GRAY2BGR)
            elif visual_name == 'grad_orient':
                hsv14[:,:,0] = mg_orient14/2
                hsv14[:,:,2] = mg_mask14*255
                vis14 = cv2.cvtColor(hsv14, cv2.COLOR_HSV2BGR)

            visCopy14 = vis14.copy()
            draw_str(visCopy14, (20, 20), visual_name)
            cv2.imshow('Output14', visCopy14)

        # cv2.waitKey(0)
        hasAtleastOneKetPoint = False

        if(subFrameCount == 0):
            mhi_vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            mhi_vis = cv2.cvtColor(mhi_vis, cv2.COLOR_GRAY2BGR)
            kp = detector.detect(mhi_vis)
            print('KeyPoints Length:: ',len(kp))
            if len(kp) > 0:
                features = extractor.compute(mhi_vis,kp)
                featuresDes = features[1]
                hasAtleastOneKetPoint = True
                bowDescriptor = bowDE.compute(mhi_vis, kp)

                if(bowDescriptorSAHMIS is None):
                    bowDescriptorSAHMIS = bowDescriptor
                else:
                    bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor)#,axis=0)

                ##Check if the operation on training data or test data
                # testData.append(bowDescriptor)
                # testLabels.append(label)
            else:
                print("No Key points were detectected for this image..")

        if(subFrameCount7 == 0):
            mhi_vis7 = np.uint8(np.clip((motion_history7-(timestamp7-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            mhi_vis7 = cv2.cvtColor(mhi_vis7, cv2.COLOR_GRAY2BGR)
            kp7 = detector.detect(mhi_vis7)
            print('KeyPoints7 Length:: ',len(kp7))
            if len(kp7) > 0:
                features7 = extractor.compute(mhi_vis7,kp7)
                featuresDes7 = features7[1]
                hasAtleastOneKetPoint = True
                bowDescriptor7 = bowDE.compute(mhi_vis7, kp7)
                if(bowDescriptorSAHMIS is None):
                    bowDescriptorSAHMIS = bowDescriptor7
                else:
                    bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor7)#,axis=0)

                ##Check if the operation on training data or test data
                # testData.append(bowDescriptor7)
            else:
                print("No Key points were detectected for this image7.")


        if(subFrameCount14 == 0):
            mhi_vis14 = np.uint8(np.clip((motion_history14-(timestamp14-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            mhi_vis14 = cv2.cvtColor(mhi_vis14, cv2.COLOR_GRAY2BGR)
            kp14 = detector.detect(mhi_vis14)
            print('KeyPoints14 Length:: ',len(kp14))
            if len(kp14) > 0:
                features14 = extractor.compute(mhi_vis14,kp14)
                featuresDes14 = features14[1]
                hasAtleastOneKetPoint = True
                bowDescriptor14 = bowDE.compute(mhi_vis14, kp14)
                if(bowDescriptorSAHMIS is None):
                    bowDescriptorSAHMIS = bowDescriptor14
                else:
                    bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor14)#,axis=0)

                ##Check if the operation on training data or test data
                # testData.append(bowDescriptor14)
                # testLabels.append(label)
            else:
                print("No Key points were detectected for this image14..")


        ##Start extracting features
        sift = cv2.SIFT()
        denseDetector = cv2.FeatureDetector_create(sDetector) ##using Dense Feature Detector

        ##Check if there are any detected keypoints before processing.
        if(subFrameCount14 == 0 and hasAtleastOneKetPoint):# or frameCounter > 30):
            testData.append(bowDescriptorSAHMIS)
            testData = np.float32(testData).reshape(-1,3*dictSize)
            # testLabels = np.float32(testLabels).reshape(-1,1)
            print("testData Shape: ", testData.shape)
            # print("testLabels Shape: ", testLabels.shape)
            print("testData : ", testData)
            # print("testLabels : ", testLabels)

            result = classifier.predict_all(testData)

            #######   Check Accuracy   ########################
            printPageBreak()
            printPageBreak()

            # print("TestLabels: ", testLabels.reshape(1,-1))
            print("Results: ", result.reshape(1,-1));
            frameCounter = 0
            testData = []
            testLabels = []
            featureDesSAHMIS = None
            bowDescriptorSAHMIS = None

        draw_str(frame, (20, 20), label)
        cv2.imshow('Output', visCopy)
        cv2.imshow('Input', frame)
        # cv2.imwrite('keyPoints\\'+file.split("\\")[2]+'_keypoints.jpg',img)
        cv2.waitKey(25)

        ##Update counters
        subFrameCount = subFrameCount + 1
        subFrameCount7 = subFrameCount7 + 1
        subFrameCount14 = subFrameCount14 + 1

        if(subFrameCount14 > skipFrames14):
            subFrameCount = 0
            subFrameCount7 = 0
            subFrameCount14 = 0
            frameCounter = 0
        frameCounter = frameCounter + 1

    cv2.destroyAllWindows()
def doTestArchive():
    global saveTrainFeaturesDes, visuals, gSkipFrames, testData,testLabels

    print("Testing on real time video")
    cv2.namedWindow('Output')
    cv2.createTrackbar('visual', 'Output', 2, len(visuals)-1, nothing)
    cv2.createTrackbar('threshold', 'Output', DEFAULT_THRESHOLD, 255, nothing)
    testData = []
    testLabels = []

    printPageBreak()
    print("\n\nProcessing real time video..")

    frameCounter = 0
    cam = cv2.VideoCapture(0)

    ret, frame = cam.read()
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:,:,1] = 255
    frameCounter = frameCounter +1
    subFrameCount = 0
    skipFrames = gSkipFrames

    label = ""

    ##Read all frames
    while(True):
        ret, frame = cam.read()
        if(subFrameCount == 0):
            frame_diff = cv2.absdiff(frame, prev_frame)
            prev_frame = frame.copy()
            gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
            thrs = cv2.getTrackbarPos('threshold', 'Output')
            ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
            timestamp = clock()
            cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)

            mg_mask, mg_orient = cv2.calcMotionGradient( motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
            seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

            visual_name = visuals[cv2.getTrackbarPos('visual', 'Output')]
            if visual_name == 'input':
                vis = frame.copy()
            elif visual_name == 'frame_diff':
                vis = frame_diff.copy()
            elif visual_name == 'motion_hist':
                vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
            elif visual_name == 'grad_orient':
                hsv[:,:,0] = mg_orient/2
                hsv[:,:,2] = mg_mask*255
                vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

            for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
                x, y, rw, rh = rect
                area = rw*rh
                if area < 64**2:
                    continue
                silh_roi   = motion_mask   [y:y+rh,x:x+rw]
                orient_roi = mg_orient     [y:y+rh,x:x+rw]
                mask_roi   = mg_mask       [y:y+rh,x:x+rw]
                mhi_roi    = motion_history[y:y+rh,x:x+rw]
                if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
                    continue
                angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
                color = ((255, 0, 0), (0, 0, 255))[i == 0]
                draw_motion_comp(vis, rect, angle, color)

            visCopy = vis.copy()
            draw_str(visCopy, (20, 20), visual_name)
            cv2.imshow('Output', visCopy)

        subFrameCount = subFrameCount + 1
        if(subFrameCount > skipFrames):
            subFrameCount = 0
        frameCounter = frameCounter + 1

        if(visual_name == 'motion_hist'):
            mhi_vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            mhi_vis = cv2.cvtColor(mhi_vis, cv2.COLOR_GRAY2BGR)
        else:
            hsv[:,:,0] = mg_orient/2
            hsv[:,:,2] = mg_mask*255
            mhi_vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
        ##Start extracting features
        sift = cv2.SIFT()
        denseDetector = cv2.FeatureDetector_create(sDetector) ##using Dense Feature Detector

        kp = detector.detect(mhi_vis)
        print('KeyPoints Length:: ',len(kp))

        ##Check if there are any detected keypoints before processing.
        if len(kp) > 0:
            features = extractor.compute(mhi_vis,kp)
            featuresDes = features[1]
            # print('Descriptors:: ',featuresDes)
            # print('featuresDes Length:: ',len(featuresDes))
            # print('featuresDes Shape:: ',featuresDes.shape)

            bowDescriptor = bowDE.compute(mhi_vis, kp)
            # descriptors.push_back(bowDescriptor);
            # print('bowDescriptor:: ',bowDescriptor)
            # print('bowDescriptor Length:: ',len(bowDescriptor))
            # print('bowDescriptor Shape:: ',bowDescriptor.shape)

            ##Check if the operation on training data or test data
            testData.append(bowDescriptor)
            testLabels.append(label)
        else:
            print("No Key points were detectected for this image..")

        if(subFrameCount == 0 or frameCounter > 30):
            testData = np.float32(testData).reshape(-1,dictSize)
            # testLabels = np.float32(testLabels).reshape(-1,1)
            print("testData Shape: ", testData.shape)
            # print("testLabels Shape: ", testLabels.shape)
            print("testData : ", testData)
            # print("testLabels : ", testLabels)

            result = classifier.predict_all(testData)

            #######   Check Accuracy   ########################
            printPageBreak()
            printPageBreak()

            # print("TestLabels: ", testLabels.reshape(1,-1))
            print("Results: ", result.reshape(1,-1));
            frameCounter = 0
            testData = []
            testLabels = []

        draw_str(frame, (20, 20), label)
        cv2.imshow('Output', visCopy)
        cv2.imshow('Input', frame)

        # cv2.imwrite('keyPoints\\'+file.split("\\")[2]+'_keypoints.jpg',img)
        cv2.waitKey(25)

    cv2.destroyAllWindows()
def processTrainingFiles(oper="extractTrainingVocabulary",fileType="train"):
    global saveTrainFeaturesDes, visuals, gSkipFrames

    cv2.namedWindow('Output')
    cv2.createTrackbar('visual', 'Output', 2, len(visuals)-1, nothing)
    cv2.createTrackbar('threshold', 'Output', DEFAULT_THRESHOLD, 255, nothing)

    printPageBreak()
    print("\n\nProcessing "+fileType.capitalize()+"ing Files")
    trainTestFiles = groupedTrainFiles
    if fileType == "train":
        trainTestFiles = groupedTrainFiles
    else:
        trainTestFiles = groupedTestFiles

    for label, fileList in trainTestFiles.iteritems():
        global trainData, testData, trainLabels, testLabels
        for file in fileList:
            frameCounter = 0
            print(label,file)
            cam = cv2.VideoCapture(file)

            ret, frame = cam.read()
            h, w = frame.shape[:2]
            prev_frame = frame.copy()
            motion_history = np.zeros((h, w), np.float32)
            hsv = np.zeros((h, w, 3), np.uint8)
            hsv[:,:,1] = 255
            totalFrames = int(cam.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
            frameCounter = frameCounter +1
            print(totalFrames)
            subFrameCount = 0
            skipFrames = 1

            prev_frame7 = frame.copy()
            motion_history7 = np.zeros((h, w), np.float32)
            # print(motion_history7)
            hsv7 = np.zeros((h, w, 3), np.uint8)
            hsv7[:,:,1] = 255

            prev_frame14 = frame.copy()
            motion_history14 = np.zeros((h, w), np.float32)
            # print(motion_history)
            hsv14 = np.zeros((h, w, 3), np.uint8)
            hsv14[:,:,1] = 255
            frameCounter =0

            totalFrames = int(cam.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
            frameCounter = frameCounter +1
            print(totalFrames)

            subFrameCount = 0
            skipFrames = 1

            subFrameCount7 = 0
            skipFrames7 = 7

            subFrameCount14 = 0
            skipFrames14 = 14

            featureDesSAHMIS = None
            bowDescriptorSAHMIS = None

            timestamp = clock()
            timestamp7 = clock()
            timestamp14 = clock()


            ##Read all frames
            while(frameCounter < totalFrames):
                ret, frame = cam.read()
                frame7 = frame.copy()
                frame14 = frame.copy()

                if(subFrameCount == 0):
                    frame_diff = cv2.absdiff(frame, prev_frame)
                    prev_frame = frame.copy()
                    gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
                    thrs = DEFAULT_THRESHOLD
                    ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
                    timestamp = clock()
                    cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)

                    mg_mask, mg_orient = cv2.calcMotionGradient( motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
                    seg_mask, seg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)

                    visual_name = 'motion_hist' #visuals[cv2.getTrackbarPos('visual', 'Output')]
                    if visual_name == 'input':
                        vis = frame.copy()
                    elif visual_name == 'frame_diff':
                        vis = frame_diff.copy()
                    elif visual_name == 'motion_hist':
                        vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                        vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
                    elif visual_name == 'grad_orient':
                        hsv[:,:,0] = mg_orient/2
                        hsv[:,:,2] = mg_mask*255
                        vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

                    for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
                        x, y, rw, rh = rect
                        area = rw*rh
                        if area < 64**2:
                            continue
                        silh_roi   = motion_mask   [y:y+rh,x:x+rw]
                        orient_roi = mg_orient     [y:y+rh,x:x+rw]
                        mask_roi   = mg_mask       [y:y+rh,x:x+rw]
                        mhi_roi    = motion_history[y:y+rh,x:x+rw]
                        if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
                            continue
                        angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
                        color = ((255, 0, 0), (0, 0, 255))[i == 0]
                        draw_motion_comp(vis, rect, angle, color)

                    visCopy = vis.copy()
                    draw_str(visCopy, (20, 20), visual_name)
                    cv2.imshow('Output', visCopy)

                if(subFrameCount7 == 0):
                    frame_diff7 = cv2.absdiff(frame7, prev_frame7)
                    prev_frame7 = frame7.copy()
                    gray_diff7 = cv2.cvtColor(frame_diff7, cv2.COLOR_BGR2GRAY)
                    thrs =DEFAULT_THRESHOLD
                    ret7, motion_mask7 = cv2.threshold(gray_diff7, thrs, 1, cv2.THRESH_BINARY)
                    timestamp7 = clock()
                    cv2.updateMotionHistory(motion_mask7, motion_history7, timestamp7, MHI_DURATION)

                    mg_mask7, mg_orient7 = cv2.calcMotionGradient( motion_history7, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
                    seg_mask7, seg_bounds7 = cv2.segmentMotion(motion_history7, timestamp7, MAX_TIME_DELTA)

                    visual_name = 'motion_hist' #visuals[cv2.getTrackbarPos('visual', 'Output')]
                    if visual_name == 'input':
                        vis7 = frame7.copy()
                    elif visual_name == 'frame_diff':
                        vis7 = frame_diff7.copy()
                    elif visual_name == 'motion_hist':
                        vis7 = np.uint8(np.clip((motion_history7-(timestamp7-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                        vis7 = cv2.cvtColor(vis7, cv2.COLOR_GRAY2BGR)
                    elif visual_name == 'grad_orient':
                        hsv7[:,:,0] = mg_orient7/2
                        hsv7[:,:,2] = mg_mask7*255
                        vis7 = cv2.cvtColor(hsv7, cv2.COLOR_HSV2BGR)

                    visCopy7 = vis7.copy()
                    draw_str(visCopy7, (20, 20), visual_name)
                    cv2.imshow('Output7', visCopy7)

                if(subFrameCount14 == 0):
                    frame_diff14 = cv2.absdiff(frame14, prev_frame14)
                    prev_frame14 = frame14.copy()
                    gray_diff14 = cv2.cvtColor(frame_diff14, cv2.COLOR_BGR2GRAY)
                    thrs = DEFAULT_THRESHOLD
                    ret14, motion_mask14 = cv2.threshold(gray_diff14, thrs, 1, cv2.THRESH_BINARY)
                    timestamp14 = clock()
                    cv2.updateMotionHistory(motion_mask14, motion_history14, timestamp14, MHI_DURATION)

                    mg_mask14, mg_orient14 = cv2.calcMotionGradient( motion_history14, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
                    seg_mask14, seg_bounds14 = cv2.segmentMotion(motion_history14, timestamp14, MAX_TIME_DELTA)

                    visual_name = 'motion_hist' #visuals[cv2.getTrackbarPos('visual', 'Output')]
                    if visual_name == 'input':
                        vis14 = frame14.copy()
                    elif visual_name == 'frame_diff':
                        vis14 = frame_diff14.copy()
                    elif visual_name == 'motion_hist':
                        vis14 = np.uint8(np.clip((motion_history14-(timestamp14-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                        vis14 = cv2.cvtColor(vis14, cv2.COLOR_GRAY2BGR)
                    elif visual_name == 'grad_orient':
                        hsv14[:,:,0] = mg_orient14/2
                        hsv14[:,:,2] = mg_mask14*255
                        vis14 = cv2.cvtColor(hsv14, cv2.COLOR_HSV2BGR)

                    visCopy14 = vis14.copy()
                    draw_str(visCopy14, (20, 20), visual_name)
                    cv2.imshow('Output14', visCopy14)


                if(visual_name == 'motion_hist'):
                    mhi_vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                    mhi_vis = cv2.cvtColor(mhi_vis, cv2.COLOR_GRAY2BGR)
                    mhi_vis7 = np.uint8(np.clip((motion_history7-(timestamp7-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                    mhi_vis7 = cv2.cvtColor(mhi_vis7, cv2.COLOR_GRAY2BGR)
                    mhi_vis14 = np.uint8(np.clip((motion_history14-(timestamp14-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
                    mhi_vis14 = cv2.cvtColor(mhi_vis14, cv2.COLOR_GRAY2BGR)

                ##Start extracting features
                sift = cv2.SIFT()
                denseDetector = cv2.FeatureDetector_create(sDetector) ##using Dense Feature Detector

                kp = detector.detect(mhi_vis)
                print('KeyPoints Length:: ',len(kp))
                kp7 = detector.detect(mhi_vis7)
                print('KeyPoints7 Length:: ',len(kp7))
                kp14 = detector.detect(mhi_vis14)
                print('KeyPoints14 Length:: ',len(kp14))

                hasAtleastOneKP = False

                ##Check if there are any detected keypoints before processing.
                if len(kp) > 0:
                    hasAtleastOneKP = True
                    features = extractor.compute(mhi_vis,kp)
                    featuresDes = features[1]

                    if(featureDesSAHMIS is None):
                        featureDesSAHMIS = featuresDes
                    else:
                        featureDesSAHMIS = np.append(featureDesSAHMIS,featuresDes,axis=0)

                    print('Descriptors:: ',featuresDes)
                    print('Descriptors Length:: ',len(featuresDes))
                    print('Descriptors Shape:: ',featuresDes.shape)

                    # print('KeyPoints:: ',kp)
                    # print('Descriptors:: ',des)
                    #desFlattened = features.flatten()
                    # print('desFlattened Length:: ',len(desFlattened)
                    if(oper == "extractTrainingVocabulary"):
                        # bowTrainer.add(featuresDes)

                        saveTrainFeaturesDes.append(featuresDes)
                        cv2.imwrite('mhiImages\\'+file.split("\\")[2]+'_mhi.jpg',mhi_vis)
                        # saveTrainFeaturesDes1 = np.array(saveTrainFeaturesDes)
                        # saveTrainFeaturesDes1.dump(open('trainFeatures.dat', 'wb'))
                    elif(oper=="extractBOWDescriptor"):
                        bowDescriptor = bowDE.compute(mhi_vis, kp)
                        # descriptors.push_back(bowDescriptor);
                        # print('bowDescriptor:: ',bowDescriptor)
                        print('bowDescriptor Length:: ',len(bowDescriptor))
                        print('bowDescriptor Shape:: ',bowDescriptor.shape)

                        ##Check if the operation on training data or test data
                        if(fileType=="train"):
                            if(bowDescriptorSAHMIS is None):
                                bowDescriptorSAHMIS = bowDescriptor
                            else:
                                bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor)#,axis=0)
                            img=cv2.drawKeypoints(mhi_vis,kp)
                            cv2.imwrite('keyPoints\\'+file.split("\\")[2]+'_keypoints.jpg',img)
                        else:
                            testData.append(bowDescriptor)
                            testLabels.append(label)
                else:
                    print("No Key points were detectected for this image..")


                ##Check if there are any detected keypoints before processing.
                if len(kp7) > 0:

                    hasAtleastOneKP = True
                    features7 = extractor.compute(mhi_vis7,kp7)
                    featuresDes7 = features7[1]

                    if(featureDesSAHMIS is None):
                        featureDesSAHMIS = featuresDes7
                    else:
                        featureDesSAHMIS = np.append(featureDesSAHMIS,featuresDes7,axis=0)
                    print('Descriptors:: ',featuresDes)
                    print('featuresDes7 Length:: ',len(featuresDes7))
                    print('featuresDes7 Shape:: ',featuresDes7.shape)

                    if(oper == "extractTrainingVocabulary"):
                        # bowTrainer.add(featuresDes7)
                        saveTrainFeaturesDes.append(featuresDes7)
                        cv2.imwrite('mhiImages\\'+file.split("\\")[2]+'_mhi7.jpg',mhi_vis7)
                    elif(oper=="extractBOWDescriptor"):
                        bowDescriptor7 = bowDE.compute(mhi_vis7, kp7)
                        print('bowDescriptor7 Length:: ',len(bowDescriptor7))
                        print('bowDescriptor7 Shape:: ',bowDescriptor7.shape)

                        ##Check if the operation on training data or test data
                        if(fileType=="train"):
                            if(bowDescriptorSAHMIS is None):
                                bowDescriptorSAHMIS = bowDescriptor7
                            else:
                                bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor7)#,axis=0)
                            img7=cv2.drawKeypoints(mhi_vis7,kp7)
                            cv2.imwrite('keyPoints\\'+file.split("\\")[2]+'_keypoints7.jpg',img7)
                        else:
                            testData.append(bowDescriptor7)
                            testLabels.append(label)
                else:
                    print("No Key points were detectected for this image7..")


                ##Check if there are any detected keypoints before processing.
                if len(kp14) > 0:
                    hasAtleastOneKP = True
                    features14 = extractor.compute(mhi_vis14,kp14)
                    featuresDes14 = features14[1]
                    if(featureDesSAHMIS is None):
                        featureDesSAHMIS = featuresDes14
                    else:
                        featureDesSAHMIS = np.append(featureDesSAHMIS,featuresDes14,axis=0)
                    print('featuresDes14 Length:: ',len(featuresDes14))
                    print('featuresDes14 Shape:: ',featuresDes14.shape)

                    if(oper == "extractTrainingVocabulary"):
                        # bowTrainer.add(featuresDes14)
                        saveTrainFeaturesDes.append(featuresDes14)
                        cv2.imwrite('mhiImages\\'+file.split("\\")[2]+'_mhi14.jpg',mhi_vis14)
                        # saveTrainFeaturesDes1 = np.array(saveTrainFeaturesDes)
                        # saveTrainFeaturesDes1.dump(open('trainFeatures.dat', 'wb'))
                    elif(oper=="extractBOWDescriptor"):
                        bowDescriptor14 = bowDE.compute(mhi_vis14, kp14)
                        # descriptors.push_back(bowDescriptor);
                        # print('bowDescriptor:: ',bowDescriptor)
                        print('bowDescriptor14 Length:: ',len(bowDescriptor14))
                        print('bowDescriptor14 Shape:: ',bowDescriptor14.shape)

                        ##Check if the operation on training data or test data
                        if(fileType=="train"):
                            if(bowDescriptorSAHMIS is None):
                                bowDescriptorSAHMIS = bowDescriptor14
                            else:
                                bowDescriptorSAHMIS = np.append(bowDescriptorSAHMIS,bowDescriptor14)#,axis=0)
                            img14=cv2.drawKeypoints(mhi_vis14,kp14)
                            cv2.imwrite('keyPoints\\'+file.split("\\")[2]+'_keypoints14.jpg',img14)
                        else:
                            testData.append(bowDescriptor14)
                            testLabels.append(label)
                else:
                    print("No Key points were detectected for this image14..")

                if(hasAtleastOneKP):
                    if(oper == "extractTrainingVocabulary"):
                        # print('featureDesSAHMIS:: ',featureDesSAHMIS)
                        print('featureDesSAHMIS Length:: ',len(featureDesSAHMIS))
                        print('featureDesSAHMIS Shape:: ',featureDesSAHMIS.shape)
                        bowTrainer.add(featureDesSAHMIS)
                        saveTrainFeaturesDes.append(featureDesSAHMIS)
                    else:
                        print('bowDescriptorSAHMIS:: ',bowDescriptorSAHMIS)
                        print('bowDescriptorSAHMIS Length:: ',len(bowDescriptorSAHMIS))
                        print('bowDescriptorSAHMIS Shape:: ',bowDescriptorSAHMIS.shape)
                        ##Check if the operation on training data or test data
                        if(fileType=="train"):
                            trainData.append(bowDescriptorSAHMIS)
                            trainLabels.append(label)
                            # trainLabels.append(label)
                            # trainLabels.append(label)
                        else:
                            testData.append(bowDescriptorSAHMIS)
                            testLabels.append(label)
                            # testLabels.append(label)
                            # testLabels.append(label)
                    featureDesSAHMIS = None
                    bowDescriptorSAHMIS = None


                print(file.split("\\")[2])

                subFrameCount = subFrameCount + 1
                if(subFrameCount > skipFrames):
                    subFrameCount = 0

                subFrameCount7 = subFrameCount7 + 1
                if(subFrameCount7 > skipFrames7):
                    subFrameCount7 = 0

                subFrameCount14 = subFrameCount14 + 1
                if(subFrameCount14 > skipFrames14):
                    subFrameCount14 = 0

                frameCounter = frameCounter + 1


                # cv2.imwrite('keyPoints\\'+file.split("\\")[2]+'_keypoints.jpg',img)
                cv2.waitKey(25)

    cv2.destroyAllWindows()
Exemple #9
0
    def detect(self):
        # print('detect...')
        # visuals = ['input', 'frame_diff', 'motion_hist', 'grad_orient']
        # cv2.createTrackbar('visual', 'motempl', 2, len(visuals)-1, nothing)
        # cv2.createTrackbar('threshold', 'motempl', DEFAULT_THRESHOLD, 255, nothing)
        height, width = self.frame.shape[:2]
        frame_diff = cv2.absdiff(self.frame, self.prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
        thrs = self.threshold_spin.value()
        # thrs = cv2.getTrackbarPos('threshold', 'motempl')
        ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, self.motion_history, timestamp, self.MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient(self.motion_history, self.MAX_TIME_DELTA, self.MIN_TIME_DELTA, apertureSize=5)
        seg_mask, seg_bounds = cv2.segmentMotion(self.motion_history, timestamp, self.MAX_TIME_DELTA)

        for radio in (self.visual_radio_1, self.visual_radio_2, self.visual_radio_3, self.visual_radio_4):
            if radio.isChecked():
                visual_name = str(radio.text())
                break

        # visual_name = visuals[cv2.getTrackbarPos('visual', 'motempl')]
        # visual_name = 'input'
        if visual_name == 'input':
            vis = self.frame.copy()
        elif visual_name == 'frame_diff':
            vis = frame_diff.copy()
        elif visual_name == 'motion_hist':
            vis = np.uint8(np.clip((self.motion_history-(timestamp-self.MHI_DURATION)) / self.MHI_DURATION, 0, 1)*255)
            vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
        elif visual_name == 'grad_orient':
            self.hsv[:, :, 0] = mg_orient/2
            self.hsv[:, :, 2] = mg_mask*255
            vis = cv2.cvtColor(self.hsv, cv2.COLOR_HSV2BGR)

        for i, rect in enumerate([(0, 0, width, height)] + list(seg_bounds)):
            x, y, rw, rh = rect
            area = rw*rh
            if area < 64**2:
                continue
            silh_roi = motion_mask[y:y+rh, x:x+rw]
            orient_roi = mg_orient[y:y+rh, x:x+rw]
            mask_roi = mg_mask[y:y+rh, x:x+rw]
            mhi_roi = self.motion_history[y:y+rh, x:x+rw]
            if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
                continue
            angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, self.MHI_DURATION)
            color = ((255, 0, 0), (0, 0, 255))[i == 0]
            self.draw_motion_comp(vis, rect, angle, color)
            if i == 0:
                # 检测到目标运动
                if self.record_check.isChecked():
                    self.monitor_last_shoot = clock()
                    if not self.is_recording:
                        self.start_record()
                elif self.shoot_check.isChecked():
                    # print(self.monitor_last_shoot)
                    # print(clock())
                    delay = self.shoot_delay_spin.value()
                    if (not self.monitor_last_shoot) or (clock() - self.monitor_last_shoot >= delay):
                        self.shoot()
                        self.monitor_last_shoot = clock()
                if self.sound_check.isChecked():
                    self.play_sound()

        self.draw_str(vis, (20, 20), visual_name)

        self.prev_frame = self.frame.copy()
        return vis
Exemple #10
0
        elif visual_name == 'motion_hist':
            vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
        elif visual_name == 'grad_orient':
            hsv[:,:,0] = mg_orient/2
            hsv[:,:,2] = mg_mask*255
            vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

        for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
            x, y, rw, rh = rect
            area = rw*rh
            if area < 64**2:
                continue
            silh_roi   = motion_mask   [y:y+rh,x:x+rw]
            orient_roi = mg_orient     [y:y+rh,x:x+rw]
            mask_roi   = mg_mask       [y:y+rh,x:x+rw]
            mhi_roi    = motion_history[y:y+rh,x:x+rw]
            if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
                continue
            angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
            color = ((255, 0, 0), (0, 0, 255))[i == 0]
            draw_motion_comp(vis, rect, angle, color)

        draw_str(vis, (20, 20), visual_name)
        cv2.imshow('motempl', vis)

        prev_frame = frame.copy()
        if 0xFF & cv2.waitKey(5) == 27:
            break
    cv2.destroyAllWindows()
Exemple #11
0
def main(argv=None):
    if argv is None:
        argv = sys.argv[1:]
    
    parser = argparse.ArgumentParser(description="Track moving objects in a video stream")
    
    parser.add_argument("-f", "--file", 
                        help="use given file")
    parser.add_argument("-p", "--play-only", action="store_true",
                        help="playback only. Don't do any recognition. Useful for sanity checking files or installation")
    parser.add_argument("--motion-threshold", type=int, default=32,
                        help="threshold for motion. (difference in grey values between frames)")
    parser.add_argument("--max-track-time", type=float, default=0.5,
                        help="maximum time for a motion track")
    
    args = parser.parse_args(argv)
    
    source = args.file
    
    if source is None:
        print "No video source given!"
        return
        
    video = cv2.VideoCapture()
    video.open(source)
    
    if not video.isOpened():
        print "Video not open"
        return
        
    width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = video.get(cv2.CAP_PROP_FPS)
    
    print "opened {w}x{h} video @ {f}fps".format(w=width,h=height,f=fps)
    
    HISTORY_NAME = "motion history"
    MASK_NAME = "motion mask"
    ORIENTATION_NAME = "orientation"
    
    make_nth_named_window(WIN_NAME, height)
    
    if not args.play_only:
        make_nth_named_window(HISTORY_NAME, height, 1)
        # make_nth_named_window(MASK_NAME, height, 2)
        # make_nth_named_window(ORIENTATION_NAME, height, 3)
        
        motion_history = np.zeros((height, width), np.float32)
        
    prev_frame = None
    
    frame_count = 0
    frame_interval_normal = int(1000.0/fps)
    frame_interval = frame_interval_normal
    while video.grab():
        got_frame, frame = video.retrieve()
        
        if not got_frame:
            print "frame miss"
            continue
        
        frame_count += 1
        print "frame: {c}   \r".format(c=frame_count), 
        sys.stdout.flush()
        
        display = frame.copy()
        
        timestamp = float(frame_count) / fps
        
        if not args.play_only:
            if prev_frame is None:
                prev_frame = frame.copy()
                
            frame_diff = cv2.absdiff(frame, prev_frame)
            gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
            ret, motion_mask = cv2.threshold(gray_diff, args.motion_threshold, 1, cv2.THRESH_BINARY)
            # cv2.imshow(MASK_NAME, motion_mask)
            
            cv2.updateMotionHistory(motion_mask, motion_history, timestamp, args.max_track_time)
            cv2.imshow(HISTORY_NAME, motion_history)
            
            mgrad_mask, mgrad_orient = cv2.calcMotionGradient( motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
            mseg_mask, mseg_bounds = cv2.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)
            
            # cv2.imshow(ORIENTATION_NAME, mgrad_orient)
            # if frame_interval == 0:
            #     import pdb; pdb.set_trace()
            
            for i, rect in enumerate([(0, 0, width, height)] + list(mseg_bounds)):
                x, y, rw, rh = rect
                area = rw * rh
                # TODO: where does 64**2 come from?
                if area < 64*2:
                    continue
                motion_roi = motion_mask[y:y+rh, x:x+rw]
                if cv2.norm(motion_roi, cv2.NORM_L1) < 0.05 * area:
                    # eliminate small things
                    continue
                mgrad_orient_roi = mgrad_orient[y:y+rh, x:x+rw]
                mgrad_mask_roi = mgrad_mask[y:y+rh, x:x+rw]
                motion_hist_roi = motion_history[y:y+rh, x:x+rw]
                angle = cv2.calcGlobalOrientation(mgrad_orient_roi, mgrad_mask_roi, motion_hist_roi, timestamp, args.max_track_time)
                
                cv2.rectangle(display, (x, y), (x+rw, y+rh), GREEN)
                cv2.putText(display, "{:.1f}".format(angle), (x, y+rh), FONT, 1, GREEN)
                            
            cv2.imshow(WIN_NAME, display)
    
            prev_frame = frame
        
        key = cv2.waitKey(frame_interval)
        if key == 27:
            return
        elif key == 32:
            # toggle pause on space
            frame_interval = 0 if frame_interval !=0 else frame_interval_normal
        elif key >= 0:
            print "\nkey: {k}\n".format(k=key)
        
    print
    video.release()
    def process_image(self, cv_image):
        if self.motion_history == None:
            self.h, self.w = cv_image.shape[:2]
            self.prev_frame = cv_image.copy()
            self.motion_history = np.zeros((self.h, self.w), np.float32)
            self.hsv = np.zeros((self.h, self.w, 3), np.uint8)
            self.hsv[:,:,1] = 255
            self.erode_kernel = cv2.getStructuringElement(cv2.MORPH_ERODE,(3,3))
        
        color_frame = cv_image.copy()
        frame_diff = cv2.absdiff(color_frame, self.prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
        
        thresh = cv2.getTrackbarPos('threshold', self.node_name)
        
        ret, motion_mask = cv2.threshold(gray_diff, thresh, 1, cv2.THRESH_BINARY)
        
        motion_mask = cv2.erode(motion_mask, self.erode_kernel, iterations=2)
        motion_mask = cv2.dilate(motion_mask, self.erode_kernel, iterations=2)
                
        timestamp = clock()
        
        cv2.updateMotionHistory(motion_mask, self.motion_history, timestamp, MHI_DURATION)

        mg_mask, mg_orient = cv2.calcMotionGradient(self.motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
        seg_mask, seg_bounds = cv2.segmentMotion(self.motion_history, timestamp, MAX_TIME_DELTA)

        visual_name = self.visuals[cv2.getTrackbarPos('visual', self.node_name)]
        if visual_name == 'input':
            vis = cv_image.copy()
        elif visual_name == 'frame_diff':
            vis = frame_diff.copy()
        elif visual_name == 'motion_hist':
            vis = np.uint8(np.clip((self.motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
        elif visual_name == 'motion_hist_color':
            vis = np.uint8(np.clip((self.motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
            vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
        elif visual_name == 'grad_orient':
            self.hsv[:,:,0] = mg_orient/2
            self.hsv[:,:,2] = mg_mask*255
            vis = cv2.cvtColor(self.hsv, cv2.COLOR_HSV2BGR)

        max_rect_area = 0
        
        for i, rect in enumerate([(0, 0, self.w, self.h)] + list(seg_bounds)):
            x, y, rw, rh = rect
            area = rw*rh
            if area < 64**2:
                continue
            if area < 640*480 and area > max_rect_area:
                max_rect_area = area
                max_rect = rect
            silh_roi   = motion_mask   [y:y+rh,x:x+rw]
            orient_roi = mg_orient     [y:y+rh,x:x+rw]
            mask_roi   = mg_mask       [y:y+rh,x:x+rw]
            mhi_roi    = self.motion_history[y:y+rh,x:x+rw]
            if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
                continue
            angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
            color = ((255, 0, 0), (0, 0, 255))[i == 0]
            #draw_motion_comp(vis, rect, angle, color)

        #draw_str(vis, (20, 20), visual_name)

        display_image = cv_image.copy()


        if max_rect_area != 0:
            x, y, w, h = max_rect
            display = color_frame[y:y+h,x:x+w] 
#            #bounding_box = cv2.boundingRect(vis)
#            #print bounding_box
#        
            if visual_name == 'motion_hist':
                display = vis.copy()
            else:
                display = cv2.bitwise_and(color_frame, vis, vis)

            draw_str(vis, (20, 20), visual_name)
            
            contour_image = vis.copy()
            
            contours, hierarchy = cv2.findContours(contour_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            
            #ty:
            contour_points = list()
            
            if len(contours) != 0:
                for cnt in contours:
                    contour_points.append(cnt)
                
                vstack_points = np.vstack(contour_points)
                if len(vstack_points) > 5:
                    z_ellipse = cv2.fitEllipse(vstack_points)
                    cv2.ellipse(display_image, z_ellipse, (0,255,0), 2)
        
                cv2.drawContours(display_image, contours, -1, (0,255,0), 3)
            
            cv2.imshow("Contours", display_image)

        self.prev_frame = color_frame
        
        #cv2.waitKey(5)
                
        return cv_image
Exemple #13
0
    def process_image(self, cv_image):
        if self.motion_history == None:
            self.h, self.w = cv_image.shape[:2]
            self.prev_frame = cv_image.copy()
            self.motion_history = np.zeros((self.h, self.w), np.float32)
            self.hsv = np.zeros((self.h, self.w, 3), np.uint8)
            self.hsv[:, :, 1] = 255
            self.erode_kernel = cv2.getStructuringElement(
                cv2.MORPH_ERODE, (3, 3))

        color_frame = cv_image.copy()
        frame_diff = cv2.absdiff(color_frame, self.prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)

        thresh = cv2.getTrackbarPos('threshold', self.node_name)

        ret, motion_mask = cv2.threshold(gray_diff, thresh, 1,
                                         cv2.THRESH_BINARY)

        motion_mask = cv2.erode(motion_mask, self.erode_kernel, iterations=2)
        motion_mask = cv2.dilate(motion_mask, self.erode_kernel, iterations=2)

        timestamp = clock()

        cv2.updateMotionHistory(motion_mask, self.motion_history, timestamp,
                                MHI_DURATION)

        mg_mask, mg_orient = cv2.calcMotionGradient(self.motion_history,
                                                    MAX_TIME_DELTA,
                                                    MIN_TIME_DELTA,
                                                    apertureSize=5)
        seg_mask, seg_bounds = cv2.segmentMotion(self.motion_history,
                                                 timestamp, MAX_TIME_DELTA)

        visual_name = self.visuals[cv2.getTrackbarPos('visual',
                                                      self.node_name)]
        if visual_name == 'input':
            vis = cv_image.copy()
        elif visual_name == 'frame_diff':
            vis = frame_diff.copy()
        elif visual_name == 'motion_hist':
            vis = np.uint8(
                np.clip(
                    (self.motion_history -
                     (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
        elif visual_name == 'motion_hist_color':
            vis = np.uint8(
                np.clip(
                    (self.motion_history -
                     (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
            vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
        elif visual_name == 'grad_orient':
            self.hsv[:, :, 0] = mg_orient / 2
            self.hsv[:, :, 2] = mg_mask * 255
            vis = cv2.cvtColor(self.hsv, cv2.COLOR_HSV2BGR)

        max_rect_area = 0

        for i, rect in enumerate([(0, 0, self.w, self.h)] + list(seg_bounds)):
            x, y, rw, rh = rect
            area = rw * rh
            if area < 64**2:
                continue
            if area < 640 * 480 and area > max_rect_area:
                max_rect_area = area
                max_rect = rect
            silh_roi = motion_mask[y:y + rh, x:x + rw]
            orient_roi = mg_orient[y:y + rh, x:x + rw]
            mask_roi = mg_mask[y:y + rh, x:x + rw]
            mhi_roi = self.motion_history[y:y + rh, x:x + rw]
            if cv2.norm(silh_roi, cv2.NORM_L1) < area * 0.05:
                continue
            angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi,
                                              timestamp, MHI_DURATION)
            color = ((255, 0, 0), (0, 0, 255))[i == 0]
            #draw_motion_comp(vis, rect, angle, color)

        #draw_str(vis, (20, 20), visual_name)

        display_image = cv_image.copy()

        if max_rect_area != 0:
            x, y, w, h = max_rect
            display = color_frame[y:y + h, x:x + w]
            #            #bounding_box = cv2.boundingRect(vis)
            #            #print bounding_box
            #
            if visual_name == 'motion_hist':
                display = vis.copy()
            else:
                display = cv2.bitwise_and(color_frame, vis, vis)

            draw_str(vis, (20, 20), visual_name)

            contour_image = vis.copy()

            contours, hierarchy = cv2.findContours(contour_image,
                                                   cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)

            #ty:
            contour_points = list()

            if len(contours) != 0:
                for cnt in contours:
                    contour_points.append(cnt)

                vstack_points = np.vstack(contour_points)
                if len(vstack_points) > 5:
                    z_ellipse = cv2.fitEllipse(vstack_points)
                    cv2.ellipse(display_image, z_ellipse, (0, 255, 0), 2)

                cv2.drawContours(display_image, contours, -1, (0, 255, 0), 3)

            cv2.imshow("Contours", display_image)

        self.prev_frame = color_frame

        #cv2.waitKey(5)

        return cv_image