Exemple #1
0
def detect_and_show(wnd, detector, image, frame_pos, wait=True):
    detect_and_draw(detector, image)
    if frame_pos is not None:
        utils.put_frame_pos(image, frame_pos)
    wnd.imshow(image)
    if wait:
        cv2.waitKey()
Exemple #2
0
def main_6_video():
    video, calibration_image = get_capture_and_calibration_image_video6()
    calibrator = Calibrator.calibrate(calibration_image, 2)
    visualize_calibration(calibrator, calibration_image)
    if not calibrator.calibrated:
        print('System was not calibrated.')
        return

    detector = Detector(calibrator)
    wnd = CvNamedWindow('detection', cv2.WINDOW_NORMAL)

    video.set_pos(961)
    ellipses_count = 0
    for frame in video.frames():
        pos = video.frame_pos()
        print(pos)
        t, ellipses = detect_and_draw(detector, frame)
        ellipses_count = max(ellipses_count, len(ellipses))
        utils.put_frame_pos(frame, pos)
        put_video_duration(frame, video.frame_pos_msec())
        put_ellipses_count(frame, ellipses_count)
        wnd.imshow(frame)
        cv2.waitKey()
    cv2.waitKey()

    video.release()
    cv2.destroyAllWindows()
Exemple #3
0
def main():
    video = VideoCapture(video_sources.video_6)
    work_area = WorkAreaView(video_sources.video_6_work_area_markers)

    vc = VideoController(10, 'pause')
    video_wnd, = Wnd.create('video')
    # h_wnd, s_wnd, v_wnd = Wnd.create('H', 'S', 'V')
    # L_wnd, a_wnd, b_wnd = Wnd.create('L', 'a', 'b')

    frames_iter = work_area.skip_non_area(video.frames())
    for frame, _ in frames_iter:
        # hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        # h_wnd.imshow(hsv[:, :, 0])
        # s_wnd.imshow(hsv[:, :, 1])
        # v_wnd.imshow(hsv[:, :, 2])

        # lab = cv2.cvtColor(frame, cv2.COLOR_BGR2Lab)
        # L_wnd.imshow(lab[:, :, 0])
        # a_wnd.imshow(lab[:, :, 1])
        # b_wnd.imshow(lab[:, :, 2])

        vis_img = utils.put_frame_pos(frame, video.frame_pos(), xy=(2, 55))
        video_wnd.imshow(vis_img)

        if vc.wait_key() == 27: break

    video.release()
Exemple #4
0
def main():
    video = VideoCapture(video_sources.video_2)
    workArea = WorkAreaView(video_sources.video_2_work_area_markers)

    vc = VideoController(10, 'pause')
    (video_wnd, bin_diff_wnd, gray_diff_wnd, colorDiffWnd,
     learned_BG_wnd) = Wnd.create('video', 'binary diff', 'gray diff',
                                  'color diff', 'Learned BG')
    colorAbsDiffWnd = Wnd('color_abs_diff')
    segmentedWnd = Wnd('segmented')

    segmenter = Segmenter()

    frames_iter = workArea.skip_non_area(video.frames())

    motionDetector = MotionDetector(next(frames_iter)[0], 3)
    backgroundModel = BackgroundModel(15)
    prevBackground = None
    for frame, _ in frames_iter:
        motionDetector.detect(frame)

        if motionDetector.motionEnded():
            # calc fgMask
            mask, gray_diff, color_diff, colorAbsDiff = calcForegroundMask(
                prevBackground, frame)
            # bin_diff_wnd.imshow(resize(mask, 0.5))
            bin_diff_wnd.imshow(cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR))

            # gray_diff_wnd.imshow(resize(gray_diff, .5))
            # colorDiffWnd.imshow(resize(color_diff, .5))
            # colorAbsDiffWnd.imshow(resize(colorAbsDiff, .5))
            markers, objectsCount = segmenter.segment(mask)
            segmentedWnd.imshow(
                resize(Segmenter.markersToDisplayImage(markers, objectsCount),
                       .5))

            backgroundModel = BackgroundModel(15)

        if motionDetector.isSilence():
            backgroundModel.learn(frame, foregroundMask=None)
            learned_BG_wnd.imshow(resize(backgroundModel.learned, .5))

        if motionDetector.motionStarted():
            prevBackground = backgroundModel.learned
            backgroundModel = None
            learned_BG_wnd.imshow(resize(prevBackground, .5))

        # VIS

        vis_img = motionDetector.indicateCurrentState(frame.copy())
        vis_img = utils.put_frame_pos(vis_img, video.frame_pos(), xy=(2, 55))
        video_wnd.imshow(vis_img)
        # bin_diff_wnd.imshow(resize(motionDetector.bin_diff, .5))
        # gray_diff_wnd.imshow(resize(motionDetector.gray_diff, .5))
        # VIS END

        if vc.wait_key() == 27: break

    video.release()
Exemple #5
0
def main_6_video_write_results(max_frames):
    max_frames = max_frames or -1
    video, calibration_image = get_capture_and_calibration_image_video6()
    print(f'FRAME COUNT: {video.frame_count()}')
    calibrator = Calibrator.calibrate(calibration_image, 2)
    # visualize_calibration(calibrator, calibration_image)
    if not calibrator.calibrated:
        print('System was not calibrated.')
        return

    fourcc = cv2.VideoWriter_fourcc(*'XVID')  # fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
    out = cv2.VideoWriter('d:/DiskE/Computer_Vision_Task/Video_6_out_full.avi', fourcc, video.fps(), video.resolution())
    detector = Detector(calibrator)

    import time
    t0 = time.time()

    ellipses_count = 0
    for frame in video.frames():
        pos = video.frame_pos()

        try:
            t, ellipses = detect_and_draw(detector, frame, False)
        except:
            print(f'Error at frame {pos}')
            raise

        ellipses_count = max(ellipses_count, len(ellipses))
        utils.put_frame_pos(frame, pos)
        put_video_duration(frame, video.frame_pos_msec())
        put_ellipses_count(frame, ellipses_count)
        out.write(frame)
        if pos % 100 == 0:
            secs_per_frame = round((time.time() - t0) / pos, 2)
            print(f'Frames processed: {pos}. Secs per frame: {secs_per_frame}')
        if max_frames > 0 and pos > max_frames:
            break

    print('Done!', time.time() - t0)

    video.release()
    out.release()
def main():
    video = get_video()
    prev = cv2.cvtColor(video.read(), cv2.COLOR_BGR2GRAY)
    for current in video.frames():
        current = cv2.cvtColor(current, cv2.COLOR_BGR2GRAY)
        diff = cv2.absdiff(current, prev)
        cv2.imshow('diff', diff)

        thresh_binary_val, thresh_binary = cv2.threshold(diff, 30, 255, cv2.THRESH_BINARY)
        cv2.imshow('thresh_binary', thresh_binary)
        thresh_otsu_val, thresh_otsu = cv2.threshold(diff, 30, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
        cv2.imshow('thresh_otsu', thresh_otsu)
        print('diff', diff.min(), diff.max(), 'thresh_binary', thresh_binary.min(), thresh_binary.max(),
              thresh_binary_val, 'thresh_otsu', thresh_otsu.min(), thresh_otsu.max(), thresh_otsu_val)

        prev = current.copy()

        utils.put_frame_pos(current, video.frame_pos())
        cv2.imshow('current', current)

        if cv2.waitKey() == 27:
            break
    video.release()
Exemple #7
0
def main():
    video = VideoCapture(video_sources.video_6)
    work_area = WorkAreaView(video_sources.video_6_work_area_markers)

    vc = VideoController(10, 'pause')
    (video_wnd, bin_diff_wnd, gray_diff_wnd, frame0_diff_wnd,
     learned_BG_wnd) = Wnd.create('video', 'binary diff', 'gray diff',
                                  'diff with frame0', 'Learned BG')

    frames_iter = work_area.skip_non_area(video.frames())

    motion_detector = MotionDetector(next(frames_iter)[0], 3)
    background = BackgroundModel(motion_detector, 15)

    for frame, _ in frames_iter:
        motion_detector.detect(frame)
        if not background.done:
            background.learn()
        else:
            if motion_detector.motion_ended():
                frame0_diff = cv2.absdiff(background.learned, frame)
                gray_of_color_diff = Helper.to_gray(frame0_diff)

                frame0_diff_wnd.imshow(
                    resize(
                        np.hstack(
                            (frame0_diff, Helper.to_bgr(gray_of_color_diff))),
                        .5))

                _, binary = cv2.threshold(gray_of_color_diff, 35, 255,
                                          cv2.THRESH_BINARY)
                cv2.imshow('1 binary', resize(binary, .5))

                # VIS
        if background.done:
            learned_BG_wnd.imshow(resize(background.learned, 1))

        vis_img = motion_detector.put_current_state(frame.copy())
        vis_img = utils.put_frame_pos(vis_img, video.frame_pos(), xy=(2, 55))
        video_wnd.imshow(vis_img)
        # bin_diff_wnd.imshow(resize(motion_detector.bin_diff, .5))
        # gray_diff_wnd.imshow(resize(motion_detector.gray_diff, .5))
        # VIS END

        if vc.wait_key() == 27: break

    video.release()
Exemple #8
0
def main():
    video = VideoCapture(video_sources.video_6)

    diff_wnd = CvNamedWindow('diff')
    mask_wnd = CvNamedWindow('mask')
    input_wnd = CvNamedWindow('input')

    # prev_frame = denoise(video.read())
    prev_frame = cv2.cvtColor(denoise(video.read()), cv2.COLOR_BGR2GRAY)

    vm = VideoController(10)
    diff = None
    for frame in video.frames():
        # with utils.timeit_context('frame processing'):
        #     frame = denoise(frame)
        #     diff = cv2.absdiff(prev_frame, frame, dst=diff)
        #     ret, mask = cv2.threshold(diff, 45, 255, cv2.THRESH_BINARY)
        #     binary_mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
        #     # cn = cv2.countNonZero(binary_mask)
        #     nonzero = cv2.findNonZero(binary_mask)

        with utils.timeit_context('frame processing'):
            frame = denoise(frame)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            diff = cv2.absdiff(prev_frame, gray, dst=diff)
            ret, mask = cv2.threshold(diff, 45, 255, cv2.THRESH_BINARY)
            # cn = cv2.countNonZero(binary_mask)
            nonzero = cv2.findNonZero(mask)

        diff_wnd.imshow(diff)
        mask_wnd.imshow(mask)
        input_wnd.imshow(utils.put_frame_pos(frame.copy(), video.frame_pos()))

        prev_frame = gray

        if not vm.wait_key():
            break

    video.release()
    cv2.destroyAllWindows()