Exemple #1
0
def main():
    video = VideoCapture(video_sources.video_6)
    work_area = WorkAreaView(video_sources.video_6_work_area_markers)

    vc = VideoController(10, 'pause')
    video_wnd, = Wnd.create('video')
    # h_wnd, s_wnd, v_wnd = Wnd.create('H', 'S', 'V')
    # L_wnd, a_wnd, b_wnd = Wnd.create('L', 'a', 'b')

    frames_iter = work_area.skip_non_area(video.frames())
    for frame, _ in frames_iter:
        # hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        # h_wnd.imshow(hsv[:, :, 0])
        # s_wnd.imshow(hsv[:, :, 1])
        # v_wnd.imshow(hsv[:, :, 2])

        # lab = cv2.cvtColor(frame, cv2.COLOR_BGR2Lab)
        # L_wnd.imshow(lab[:, :, 0])
        # a_wnd.imshow(lab[:, :, 1])
        # b_wnd.imshow(lab[:, :, 2])

        vis_img = utils.put_frame_pos(frame, video.frame_pos(), xy=(2, 55))
        video_wnd.imshow(vis_img)

        if vc.wait_key() == 27: break

    video.release()
Exemple #2
0
def main():
    video = VideoCapture(video_sources.video_2)
    workArea = WorkAreaView(video_sources.video_2_work_area_markers)

    vc = VideoController(10, 'pause')
    (video_wnd, bin_diff_wnd, gray_diff_wnd, colorDiffWnd,
     learned_BG_wnd) = Wnd.create('video', 'binary diff', 'gray diff',
                                  'color diff', 'Learned BG')
    colorAbsDiffWnd = Wnd('color_abs_diff')
    segmentedWnd = Wnd('segmented')

    segmenter = Segmenter()

    frames_iter = workArea.skip_non_area(video.frames())

    motionDetector = MotionDetector(next(frames_iter)[0], 3)
    backgroundModel = BackgroundModel(15)
    prevBackground = None
    for frame, _ in frames_iter:
        motionDetector.detect(frame)

        if motionDetector.motionEnded():
            # calc fgMask
            mask, gray_diff, color_diff, colorAbsDiff = calcForegroundMask(
                prevBackground, frame)
            # bin_diff_wnd.imshow(resize(mask, 0.5))
            bin_diff_wnd.imshow(cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR))

            # gray_diff_wnd.imshow(resize(gray_diff, .5))
            # colorDiffWnd.imshow(resize(color_diff, .5))
            # colorAbsDiffWnd.imshow(resize(colorAbsDiff, .5))
            markers, objectsCount = segmenter.segment(mask)
            segmentedWnd.imshow(
                resize(Segmenter.markersToDisplayImage(markers, objectsCount),
                       .5))

            backgroundModel = BackgroundModel(15)

        if motionDetector.isSilence():
            backgroundModel.learn(frame, foregroundMask=None)
            learned_BG_wnd.imshow(resize(backgroundModel.learned, .5))

        if motionDetector.motionStarted():
            prevBackground = backgroundModel.learned
            backgroundModel = None
            learned_BG_wnd.imshow(resize(prevBackground, .5))

        # VIS

        vis_img = motionDetector.indicateCurrentState(frame.copy())
        vis_img = utils.put_frame_pos(vis_img, video.frame_pos(), xy=(2, 55))
        video_wnd.imshow(vis_img)
        # bin_diff_wnd.imshow(resize(motionDetector.bin_diff, .5))
        # gray_diff_wnd.imshow(resize(motionDetector.gray_diff, .5))
        # VIS END

        if vc.wait_key() == 27: break

    video.release()
Exemple #3
0
def main():
    resultWnd, markers1Wnd, markers2Wnd = Wnd.create(
        result=cv2.WINDOW_NORMAL,
        markers1=cv2.WINDOW_NORMAL,
        markers2=cv2.WINDOW_NORMAL)

    # img = cv2.imread('water_coins.jpg')
    # gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

    thresh = realImage()
    # thresh = testImage()

    # noise removal
    kernel = np.ones((3, 3), np.uint8)
    opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)

    # sure background area
    sure_bg = cv2.dilate(opening, kernel, iterations=3)

    # Finding sure foreground area
    dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
    # dist_transform = cv2.distanceTransform(opening, cv2.DIST_L1, 3)
    ret, sure_fg = cv2.threshold(dist_transform, 0.7 * dist_transform.max(),
                                 255, 0)
    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg, sure_fg)

    # Marker labelling
    ret, markers = cv2.connectedComponents(sure_fg)
    # Add one to all labels so that sure background is not 0, but 1
    markers = markers + 1
    # Now, mark the region of unknown with zero
    markers[unknown == 255] = 0

    markers1Wnd.imshow(markers / markers.max())

    # markers = cv2.watershed(img, markers)
    img = cv2.cvtColor(opening, cv2.COLOR_GRAY2BGR)
    markers = cv2.watershed(img, markers)

    markers2Wnd.imshow(markers / markers.max())

    # [-1  1  2  3  4  5]
    img[markers == 3] = [0, 255, 0]
    img[markers == -1] = [255, 0, 0]

    cv2.imshow('thr', thresh)
    cv2.imshow('op', opening)
    cv2.imshow('sure_bg', sure_bg)
    cv2.imshow('sure_fg', sure_fg)
    resultWnd.imshow(img)
    cv2.waitKey()
Exemple #4
0
def main():
    img = testImage()
    origWnd, dstWnd, centersWnd = CvNamedWindow.create('orig', 'dstTransform', 'centers')
    dst = cv2.distanceTransform(img, cv2.DIST_L2, cv2.DIST_MASK_5)

    dx = cv2.Sobel(dst, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=5)
    dy = cv2.Sobel(dst, ddepth=cv2.CV_32F, dx=0, dy=1, ksize=5)
    grad = cv2.addWeighted(cv2.convertScaleAbs(dx), 1, cv2.convertScaleAbs(dy), 1, 0)
    grad = cv2.convertScaleAbs(grad)

    origWnd.imshow(img)
    centersWnd.imshow(grad)
    condition = (grad == 0) & (dst > 0)
    print(np.where(condition))

    cvWaitKeys()
Exemple #5
0
def main():
    video = VideoCapture(video_sources.video_6)
    work_area = WorkAreaView(video_sources.video_6_work_area_markers)

    vc = VideoController(10, 'pause')
    (video_wnd, bin_diff_wnd, gray_diff_wnd, frame0_diff_wnd,
     learned_BG_wnd) = Wnd.create('video', 'binary diff', 'gray diff',
                                  'diff with frame0', 'Learned BG')

    frames_iter = work_area.skip_non_area(video.frames())

    motion_detector = MotionDetector(next(frames_iter)[0], 3)
    background = BackgroundModel(motion_detector, 15)

    for frame, _ in frames_iter:
        motion_detector.detect(frame)
        if not background.done:
            background.learn()
        else:
            if motion_detector.motion_ended():
                frame0_diff = cv2.absdiff(background.learned, frame)
                gray_of_color_diff = Helper.to_gray(frame0_diff)

                frame0_diff_wnd.imshow(
                    resize(
                        np.hstack(
                            (frame0_diff, Helper.to_bgr(gray_of_color_diff))),
                        .5))

                _, binary = cv2.threshold(gray_of_color_diff, 35, 255,
                                          cv2.THRESH_BINARY)
                cv2.imshow('1 binary', resize(binary, .5))

                # VIS
        if background.done:
            learned_BG_wnd.imshow(resize(background.learned, 1))

        vis_img = motion_detector.put_current_state(frame.copy())
        vis_img = utils.put_frame_pos(vis_img, video.frame_pos(), xy=(2, 55))
        video_wnd.imshow(vis_img)
        # bin_diff_wnd.imshow(resize(motion_detector.bin_diff, .5))
        # gray_diff_wnd.imshow(resize(motion_detector.gray_diff, .5))
        # VIS END

        if vc.wait_key() == 27: break

    video.release()
def main():
    oringWnd, dstWnd, centersWnd = CvNamedWindow.create('orig', {'dstTransform': cv2.WINDOW_NORMAL}, 'centers')

    img = testImage()
    # img = realImage()
    img = fillHoles(img)

    centers = img.copy()
    indexes, (distTransform, dx, dy) = findCentersIndexesSobel(img)
    centers[indexes] = 127

    oringWnd.imshow(img)
    centersWnd.imshow(centers)
    dstWnd.imshow(visDistTransformResult(distTransform))

    def mouse_callback(evt, x, y, flags, _):
        if evt == cv2.EVENT_LBUTTONDOWN:
            print(x, y, distTransform[y, x], dx[y, x], dy[y, x])

    dstWnd.mouse_callback = mouse_callback

    cvWaitKeys()