Beispiel #1
0
def main_6_video():
    video, calibration_image = get_capture_and_calibration_image_video6()
    calibrator = Calibrator.calibrate(calibration_image, 2)
    visualize_calibration(calibrator, calibration_image)
    if not calibrator.calibrated:
        print('System was not calibrated.')
        return

    detector = Detector(calibrator)
    wnd = CvNamedWindow('detection', cv2.WINDOW_NORMAL)

    video.set_pos(961)
    ellipses_count = 0
    for frame in video.frames():
        pos = video.frame_pos()
        print(pos)
        t, ellipses = detect_and_draw(detector, frame)
        ellipses_count = max(ellipses_count, len(ellipses))
        utils.put_frame_pos(frame, pos)
        put_video_duration(frame, video.frame_pos_msec())
        put_ellipses_count(frame, ellipses_count)
        wnd.imshow(frame)
        cv2.waitKey()
    cv2.waitKey()

    video.release()
    cv2.destroyAllWindows()
Beispiel #2
0
def main():
    # img = testImage()
    img = realImage()
    n = 1
    sz = n * 2 + 1  # 3, 5 ...
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (sz, sz))
    iterations = 1
    mode = None  # erode dilate
    wnd = CvNamedWindow()

    def images():
        while True:
            if mode == 'dilate':
                cv2.dilate(img, kernel, dst=img, iterations=iterations)
            elif mode == 'erode':
                cv2.erode(img, kernel, dst=img, iterations=iterations)

            yield img

    for im in images():
        wnd.imshow(im)
        key = cvWaitKeys(27, '1', '2')
        if key == 27:
            break
        elif key == ord('1'):
            mode = 'erode'
        elif key == ord('2'):
            mode = 'dilate'
Beispiel #3
0
def main():
    video = VideoCapture(video_sources.video_2)
    workArea = WorkAreaView(video_sources.video_2_work_area_markers)

    vc = VideoController(10, 'pause')
    (video_wnd, bin_diff_wnd, gray_diff_wnd, colorDiffWnd,
     learned_BG_wnd) = Wnd.create('video', 'binary diff', 'gray diff',
                                  'color diff', 'Learned BG')
    colorAbsDiffWnd = Wnd('color_abs_diff')
    segmentedWnd = Wnd('segmented')

    segmenter = Segmenter()

    frames_iter = workArea.skip_non_area(video.frames())

    motionDetector = MotionDetector(next(frames_iter)[0], 3)
    backgroundModel = BackgroundModel(15)
    prevBackground = None
    for frame, _ in frames_iter:
        motionDetector.detect(frame)

        if motionDetector.motionEnded():
            # calc fgMask
            mask, gray_diff, color_diff, colorAbsDiff = calcForegroundMask(
                prevBackground, frame)
            # bin_diff_wnd.imshow(resize(mask, 0.5))
            bin_diff_wnd.imshow(cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR))

            # gray_diff_wnd.imshow(resize(gray_diff, .5))
            # colorDiffWnd.imshow(resize(color_diff, .5))
            # colorAbsDiffWnd.imshow(resize(colorAbsDiff, .5))
            markers, objectsCount = segmenter.segment(mask)
            segmentedWnd.imshow(
                resize(Segmenter.markersToDisplayImage(markers, objectsCount),
                       .5))

            backgroundModel = BackgroundModel(15)

        if motionDetector.isSilence():
            backgroundModel.learn(frame, foregroundMask=None)
            learned_BG_wnd.imshow(resize(backgroundModel.learned, .5))

        if motionDetector.motionStarted():
            prevBackground = backgroundModel.learned
            backgroundModel = None
            learned_BG_wnd.imshow(resize(prevBackground, .5))

        # VIS

        vis_img = motionDetector.indicateCurrentState(frame.copy())
        vis_img = utils.put_frame_pos(vis_img, video.frame_pos(), xy=(2, 55))
        video_wnd.imshow(vis_img)
        # bin_diff_wnd.imshow(resize(motionDetector.bin_diff, .5))
        # gray_diff_wnd.imshow(resize(motionDetector.gray_diff, .5))
        # VIS END

        if vc.wait_key() == 27: break

    video.release()
Beispiel #4
0
 def __init__(self, detector, image, region_selection=None):
     self.detector = detector
     self.base_image = image
     self.buffer_image = np.empty_like(self.base_image)
     self.region_selection = None
     self.set_region_selection(region_selection)
     self.ellipses = self.polygons = self.selected_ellipses = self.selected_polygons = []
     self.wnd = CvNamedWindow('test', mouse_callback=self.__mc)
Beispiel #5
0
def main():
    img = np.zeros((300, 300, 3), np.uint8)
    cv2.circle(img, (150, 150), 75, (0, 180, 0), -1)
    wnd = CvNamedWindow('select ROI')
    while True:
        wnd.imshow(img)
        key = cvWaitKeys(27, ord('r'), ord('s'))
        if key in (27, -1):
            break
Beispiel #6
0
def main():
    from skimage.feature import peak_local_max
    from skimage.morphology import watershed
    import scipy.ndimage as ndi

    img = realImage()
    # img = testImage()
    img = fillHoles(img)

    thresh = img.copy()

    with utils.timeit_context():
        dst = ndi.distance_transform_edt(img)
        localMax = peak_local_max(dst, indices=False, min_distance=1, labels=thresh)
        markers = ndi.label(localMax)[0]
        labels = watershed(-dst, markers, mask=thresh)

    segmImg = (labels * (255 / labels.max())).astype(np.uint8)

    wnd = CvNamedWindow(flags=cv2.WINDOW_NORMAL)
    segmWnd = CvNamedWindow('segm', flags=cv2.WINDOW_NORMAL)

    wnd.imshow(img)
    segmWnd.imshow(segmImg)

    cvWaitKeys()
Beispiel #7
0
def main():
    def roiSelectedEvent(roi, imageRoi, wnd, image):
        if roi is None:
            return
        predictions = predict_on_image(model, None, imageRoi, 0.95)
        cv2.imshow('roi', predictions)

    model = getModel()
    video = VideoCapture(video_sources.video_2)
    vc = VideoController(10, 'pause')
    wnd = Wnd('video', roiSelectedEvent=roiSelectedEvent)
    for frame in video.frames():
        cv2.destroyWindow('roi')
        wnd.imshow(frame)
        if vc.wait_key() == 27: break
Beispiel #8
0
def main_2():
    video, calibration_image = get_capture_and_calibration_image_video2()
    calibrator = Calibrator.calibrate(calibration_image, 2)
    # visualize_calibration(calibrator, calibration_image)
    if not calibrator.calibrated:
        print('System was not calibrated.')
        return

    detector = Detector(calibrator)

    wnd = CvNamedWindow('detection', cv2.WINDOW_NORMAL)

    # detect_and_show('detection', detector, video.read_at_pos(442), video.frame_pos() - 1)
    # detect_and_show('detection', detector, video.read_at_pos(464), video.frame_pos() - 1)
    # detect_and_show('detection', detector, video.read_at_pos(471), video.frame_pos() - 1)
    # detect_and_show('detection', detector, video.read_at_pos(833), video.frame_pos() - 1)

    # detect_and_show('detection', detector, video.read_at_pos(497), video.frame_pos() - 1)
    # detect_and_show('detection', detector, video.read_at_pos(320), video.frame_pos() - 1)
    detect_and_show(wnd, detector, video.read_at_pos(820), video.frame_pos() - 1)

    detect_and_show(wnd, detector, video.read_at_pos(286), video.frame_pos() - 1)

    # detect_and_show('detection', detector, video.read_at_pos(1511), video.frame_pos() - 1)
    # detect_and_show('detection', detector, video.read_at_pos(1601), video.frame_pos() - 1)
    # detect_and_show('detection', detector, video.read_at_pos(1602), video.frame_pos() - 1)
    # detect_and_show('detection', detector, video.read_at_pos(1603), video.frame_pos() - 1)
    # detect_and_show('detection', detector, video.read_at_pos(1604), video.frame_pos() - 1)
    # 833

    video.release()
    cv2.destroyAllWindows()
Beispiel #9
0
def main():
    video = VideoCapture(video_sources.video_6)
    work_area = WorkAreaView(video_sources.video_6_work_area_markers)

    vc = VideoController(10, 'pause')
    video_wnd, = Wnd.create('video')
    # h_wnd, s_wnd, v_wnd = Wnd.create('H', 'S', 'V')
    # L_wnd, a_wnd, b_wnd = Wnd.create('L', 'a', 'b')

    frames_iter = work_area.skip_non_area(video.frames())
    for frame, _ in frames_iter:
        # hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        # h_wnd.imshow(hsv[:, :, 0])
        # s_wnd.imshow(hsv[:, :, 1])
        # v_wnd.imshow(hsv[:, :, 2])

        # lab = cv2.cvtColor(frame, cv2.COLOR_BGR2Lab)
        # L_wnd.imshow(lab[:, :, 0])
        # a_wnd.imshow(lab[:, :, 1])
        # b_wnd.imshow(lab[:, :, 2])

        vis_img = utils.put_frame_pos(frame, video.frame_pos(), xy=(2, 55))
        video_wnd.imshow(vis_img)

        if vc.wait_key() == 27: break

    video.release()
Beispiel #10
0
 def show_goodFeaturesToTrack(img, ):
     img_gft = img.copy()
     corners = cv2.goodFeaturesToTrack(img_gft,
                                       maxCorners=30,
                                       qualityLevel=0.01,
                                       minDistance=5)
     for ((x, y), ) in corners.astype(np.uint32):
         img_gft[y - 1:y + 1, x - 1:x + 1] = 127
     CvNamedWindow('gft', flags=cv2.WINDOW_NORMAL).imshow(img_gft)
Beispiel #11
0
def main():
    feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7)
    lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

    video = VideoCapture(video_sources.video_2)
    wnd = CvNamedWindow('video')
    vc = VideoController(delay=50)

    prev_gray = None
    po = None
    tracking = False

    for frame in video.frames():
        if tracking:
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # calculate optical flow
            p1, st, err = cv2.calcOpticalFlowPyrLK(prev_gray, frame_gray, p0, None, **lk_params)
            p1 = p1[st == 1]
            # draw the tracks
            for pt in p1:
                a, b = pt.ravel()
                frame = cv2.circle(frame, (a, b), 5, (0, 255, 0), -1)
            prev_gray = frame_gray
            p0 = p1.reshape(-1, 1, 2)

        wnd.imshow(frame)

        key = vc.wait_key()
        if key == 27:
            break
        elif not tracking and key == ord('r'):  # init tracking
            roi = cv2.selectROI('roi', frame)
            cv2.destroyWindow('roi')

            if roi is None or sum(roi) == 0:
                continue

            prev_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            p0 = cv2.goodFeaturesToTrack(prev_gray, mask=roi_mask(prev_gray, roi), **feature_params)
            if p0 is not None and len(p0) > 0:
                tracking = True

    video.release()
Beispiel #12
0
def main():
    resultWnd, markers1Wnd, markers2Wnd = Wnd.create(
        result=cv2.WINDOW_NORMAL,
        markers1=cv2.WINDOW_NORMAL,
        markers2=cv2.WINDOW_NORMAL)

    # img = cv2.imread('water_coins.jpg')
    # gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

    thresh = realImage()
    # thresh = testImage()

    # noise removal
    kernel = np.ones((3, 3), np.uint8)
    opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)

    # sure background area
    sure_bg = cv2.dilate(opening, kernel, iterations=3)

    # Finding sure foreground area
    dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
    # dist_transform = cv2.distanceTransform(opening, cv2.DIST_L1, 3)
    ret, sure_fg = cv2.threshold(dist_transform, 0.7 * dist_transform.max(),
                                 255, 0)
    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg, sure_fg)

    # Marker labelling
    ret, markers = cv2.connectedComponents(sure_fg)
    # Add one to all labels so that sure background is not 0, but 1
    markers = markers + 1
    # Now, mark the region of unknown with zero
    markers[unknown == 255] = 0

    markers1Wnd.imshow(markers / markers.max())

    # markers = cv2.watershed(img, markers)
    img = cv2.cvtColor(opening, cv2.COLOR_GRAY2BGR)
    markers = cv2.watershed(img, markers)

    markers2Wnd.imshow(markers / markers.max())

    # [-1  1  2  3  4  5]
    img[markers == 3] = [0, 255, 0]
    img[markers == -1] = [255, 0, 0]

    cv2.imshow('thr', thresh)
    cv2.imshow('op', opening)
    cv2.imshow('sure_bg', sure_bg)
    cv2.imshow('sure_fg', sure_fg)
    resultWnd.imshow(img)
    cv2.waitKey()
Beispiel #13
0
    def VIS_POLYGONS(img, polygons):
        if img.shape[0] > 300 or img.shape[1] > 300:
            return
        print('VIS_POLYGONS: polygons count:', len(polygons),
              [p.len for p in polygons])
        if len(polygons) == 0:
            return

        tails_colors = ((0, 127, 0), (0, 255, 0))
        points_color = (0, 0, 0)
        separator = DEBUG.separator(img)
        images = []

        im = DEBUG.__draw_polygons(img.copy(), polygons, (0, 255, 0), 1,
                                   tails_colors, points_color)
        images.extend([im, separator])

        if len(polygons) > 1:
            for p in polygons:
                im = DEBUG.__draw_polygons(img.copy(), [p], (0, 255, 0), 1,
                                           tails_colors, points_color)
                images.extend([im, separator])

        wnd = CvNamedWindow('polygons', cv2.WINDOW_NORMAL)
        wnd.imshow(np.hstack(images))
        cv2.waitKey()
        wnd.destroy()
Beispiel #14
0
    def walk_points_sequence(points, is_closed=False, point_action=None):
        point_action = point_action or (lambda i, pt: print(f'{i}, {pt}'))

        max_x = points[..., 0].max() + 20
        max_y = points[..., 1].max() + 20

        img = np.zeros((max_y, max_x), dtype=np.uint8)
        cv2.polylines(img, [points], is_closed, 255)

        # show_goodFeaturesToTrack(img)

        cv2.circle(img, tuple(points[0, 0]), 3, 255, 1)
        cv2.circle(img, tuple(points[-1, 0]), 3, 127, -1)

        wnd = CvNamedWindow(flags=cv2.WINDOW_NORMAL)
        wnd.imshow(img)
        if cv2.waitKey() == 27:
            return

        for i, pt in enumerate(points):
            point_action(i, pt)
            im = img.copy()
            cv2.circle(im, tuple(pt[0]), 2, 255, 1)
            wnd.imshow(im)
            if cv2.waitKey() == 27:
                return
Beispiel #15
0
def visualize(contours, image, winname):
    draw_contours(contours, image, (0, 255, 0), thickness=2, draw_measurements=False)

    wnd = CvNamedWindow(winname, cv2.WINDOW_NORMAL)
    wnd.imshow(image)
    cv2.waitKey()
    wnd.destroy()
Beispiel #16
0
def main():
    wnd = CvNamedWindow('ORB', flags=cv2.WINDOW_NORMAL)
    vc = VideoController(delay=-1)
    detector = cv2.ORB_create(nfeatures=1000)
    # frames = rect_frames_sequence((200, 200), (40, 50), (5, 5), 50, step=2)
    frames = ellipse_frames_sequence((200, 200), (65, 75), (40, 50),
                                     23,
                                     steps=30,
                                     step=2)
    frame0 = next(frames)
    keypoints, descrs = detector.detectAndCompute(frame0, None)

    pts0 = np.reshape([key_pt.pt for key_pt in keypoints],
                      (-1, 1, 2)).astype(np.float32)
    wnd.imshow(draw_points(frame0.copy(), pts0))
    vc.wait_key()

    lk_params = dict(winSize=(15, 15),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))
    for frame in frames:
        pts1, st, err = cv2.calcOpticalFlowPyrLK(frame0, frame, pts0, None,
                                                 **lk_params)
        pts1 = pts1[st == 1]

        wnd.imshow(draw_points(frame.copy(), pts1))
        pts0 = pts1.reshape(-1, 1, 2)
        frame0 = frame
        if vc.wait_key() == 27:
            break
Beispiel #17
0
 def VIS_EDGES(edges):
     if DEBUG.should_not_vis(edges):
         return
     wnd = CvNamedWindow('DEBUG EDGES')
     wnd.imshow(edges)
     cv2.waitKey()
     wnd.destroy()
Beispiel #18
0
    def VIS_assemble_ellipses_from_parts(bgr, part, next_part, ellipses):
        if bgr.shape[0] > 300 or bgr.shape[1] > 300:
            return
        green = (0, 255, 0)
        red = (0, 0, 255)
        blue = (255, 0, 0)

        im = bgr.copy()

        cv2.polylines(im, [part.points], False, green, 2)
        x, y = part.first_pt
        im[y - 3:y + 3, x - 3:x + 3] = [0, 255, 0]
        x, y = part.last_pt
        im[y - 3:y + 3, x - 3:x + 3] = [0, 127, 0]

        if next_part:
            cv2.polylines(im, [next_part.points], False, red, 2)
            x, y = next_part.first_pt
            im[y - 3:y + 3, x - 3:x + 3] = [0, 0, 255]
            x, y = next_part.last_pt
            im[y - 3:y + 3, x - 3:x + 3] = [0, 0, 127]

        for poly in ellipses:
            cv2.polylines(im, [poly.points], False, blue, 2)
        wnd = CvNamedWindow('DEBUG')
        wnd.imshow(im)
        cv2.waitKey()
        wnd.destroy()
Beispiel #19
0
    def VIS_ALL_CONTOURS(img, contours):
        if img.shape[0] > 300 or img.shape[1] > 300:
            return

        print('contours LEN:', len(contours))
        if len(contours) == 0:
            return
        separator = DEBUG.separator(img)
        images = []
        tails_colors = ((0, 127, 0), (0, 255, 0))
        points_color = (0, 0, 0)

        im = DEBUG.__draw_contours(img.copy(), contours, (0, 255, 0), 1,
                                   tails_colors, points_color)
        images.extend([im, separator])

        if len(contours) > 1:
            for c in contours:
                im = DEBUG.__draw_contours(img.copy(), [c], (0, 255, 0), 1,
                                           tails_colors, points_color)
                images.extend([im, separator])
                print(f'VIS_ALL_CONTOURS: contour len {c.len()}')

        wnd = CvNamedWindow('contours', cv2.WINDOW_NORMAL)
        wnd.imshow(np.hstack(images))
        cv2.waitKey()
        wnd.destroy()
Beispiel #20
0
def main():
    wnd = CvNamedWindow('frames', flags=cv2.WINDOW_NORMAL)
    vc = VideoController(delay=-1)

    # frames = rect_frames_sequence((200, 200), (40, 50), (5, 5), 50, step=2)
    frames = ellipse_frames_sequence((200, 200), (45, 55), (40, 50),
                                     23,
                                     steps=30,
                                     step=2)
    frame0 = next(frames)

    feature_params = dict(maxCorners=100,
                          qualityLevel=0.3,
                          minDistance=7,
                          blockSize=7)
    pts0 = cv2.goodFeaturesToTrack(frame0, mask=None, **feature_params)
    # TODO: add point at center of rect
    # additional_pts = (
    #     [[5 + 40 / 2, 5 + 50 / 2]],
    #     [[4, 4]],
    #     [[3, 3]],
    #     [[5, 5]],
    #     [[2, 2]],
    #     [[0, 0]],
    #     [[-1, -1]],
    #     #-------------
    #     [[47, 57]],
    #     [[49, 59]],
    #     #-------------
    #     [[4, 6]],
    #     [[4, 8]],
    #     [[4, 9]],
    #     [[4, 11]],
    #     [[4, 12]],
    #     [[4, 13]],
    # )
    # pts0 = np.append(pts0, additional_pts, axis=0).astype(np.float32)

    if pts0 is None:
        print('No good features to track')
        return

    wnd.imshow(draw_points(frame0.copy(), pts0))
    vc.wait_key()

    lk_params = dict(winSize=(15, 15),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))
    for frame in frames:
        pts1, st, err = cv2.calcOpticalFlowPyrLK(frame0, frame, pts0, None,
                                                 **lk_params)
        pts1 = pts1[st == 1]

        wnd.imshow(draw_points(frame.copy(), pts1))
        pts0 = pts1.reshape(-1, 1, 2)
        frame0 = frame
        if vc.wait_key() == 27:
            break
Beispiel #21
0
    def show_contour_parts(contour, parts):
        points = contour.points()
        max_x = points[..., 0].max() + 20
        max_y = points[..., 1].max() + 20
        separator = np.full((max_y, 2), 127, dtype=np.uint8)

        images = []
        for poly in parts:
            img = np.zeros((max_y, max_x), dtype=np.uint8)
            poly_points = poly.points
            if len(poly_points.shape) == 3:
                poly_points = poly_points[:, 0]
            cv2.polylines(img, [poly_points], False, 255, 1)
            for (x, y) in poly_points:
                r = 2
                img[y - r:y + r, x - r:x + r] = 200
            images.extend([img, separator])

        wnd = CvNamedWindow('parts')
        if len(images) == 0:
            return
        wnd.imshow(np.hstack(images))
Beispiel #22
0
def main():
    # img = testImage()
    img = realImage()

    segmenter = Segmenter()

    # iters = 1000
    # images = [img.copy() for _ in range(iters)]
    # with utils.timeit_context():
    #     for i in range(1000):
    #         segmenter.segment2(images[i])
    #
    # images = [img.copy() for _ in range(iters)]
    # with utils.timeit_context():
    #     for i in range(1000):
    #         segmenter.segment(images[i])

    markers = segmenter.segment(img.copy())

    wnd = Wnd('labels')
    wnd.imshow(segmenter.markersToDisplayImage(markers))
    waitKeys()
Beispiel #23
0
def main_2_test():
    def test_region(frame_num, region):
        col, row, d_col, d_row = region
        detect_and_show(wnd, detector, video.read_at_pos(frame_num)[row:row + d_row, col:col + d_col], None,
                        True)

    video, calibration_image = get_capture_and_calibration_image_video2()
    calibrator = Calibrator.calibrate(calibration_image, 2)
    # visualize_calibration(calibrator, calibration_image)
    if not calibrator.calibrated:
        print('System was not calibrated.')
        return

    wnd = CvNamedWindow('detection', cv2.WINDOW_NORMAL)

    detector = Detector(calibrator)

    test_region(442, (474, 545, 134, 74))
    test_region(442, (403, 545, 148, 74))
    test_region(442, (403, 545, 204, 74))
    test_region(442, (434, 542, 163, 76))
    test_region(442, (472, 540, 78, 78))
    #
    # test_region(464, (596, 422, 187, 136))
    # test_region(464, (652, 422, 73, 136))
    # test_region(464, (652, 432, 73, 70))

    # test_region(833, (838, 485, 68, 73))
    # test_region(833, (770, 321, 68, 122))
    # test_region(833, (770, 256, 71, 132))

    # test_region(833, (598, 317, 68, 68))
    # test_region(833, (601, 317, 66, 122))
    # test_region(833, (561, 317, 106, 122))

    # test_region(1511, (721, 151, 131, 132))
    # test_region(1511, (723, 151, 127, 77))
    # test_region(1511, (658, 151, 250, 132))

    # test_region(1511, (779, 151, 71, 77))
    # test_region(1511, (780, 157, 66, 67))

    test_region(497, (958, 173, 74, 65))

    # test_region(1601, (308, 428, 70, 70))
    # test_region(1601, (308, 373, 76, 66))
    # test_region(1601, (308, 317, 136, 187))
    # test_region(1601, (318, 160, 76, 66))

    video.release()
    cv2.destroyAllWindows()
Beispiel #24
0
def main_6():
    video, calibration_image = get_capture_and_calibration_image_video6()
    calibrator = Calibrator.calibrate(calibration_image, 2)
    # visualize_calibration(calibrator, calibration_image)
    if not calibrator.calibrated:
        print('System was not calibrated.')
        return

    detector = Detector(calibrator)
    wnd = CvNamedWindow('detection', cv2.WINDOW_NORMAL)
    detect_and_show(wnd, detector, video.read_at_pos(229), video.frame_pos() - 1)

    video.release()
    cv2.destroyAllWindows()
Beispiel #25
0
def visualize_contours(contours, image, winname):
    contours = sorted(contours, key=lambda c: c.len(), reverse=True)
    wnd = CvNamedWindow(winname, cv2.WINDOW_NORMAL)
    for part in contours:
        cv2.drawContours(image, [part.points()], -1, utils.random_color(), 2)
        print('part', part.len())
        wnd.imshow(image)
        cv2.waitKey()
    wnd.destroy(winname)
Beispiel #26
0
def main():
    img = testImage()
    origWnd, dstWnd, centersWnd = CvNamedWindow.create('orig', 'dstTransform', 'centers')
    dst = cv2.distanceTransform(img, cv2.DIST_L2, cv2.DIST_MASK_5)

    dx = cv2.Sobel(dst, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=5)
    dy = cv2.Sobel(dst, ddepth=cv2.CV_32F, dx=0, dy=1, ksize=5)
    grad = cv2.addWeighted(cv2.convertScaleAbs(dx), 1, cv2.convertScaleAbs(dy), 1, 0)
    grad = cv2.convertScaleAbs(grad)

    origWnd.imshow(img)
    centersWnd.imshow(grad)
    condition = (grad == 0) & (dst > 0)
    print(np.where(condition))

    cvWaitKeys()
Beispiel #27
0
def main():
    video = VideoCapture(video_sources.video_6)
    work_area = WorkAreaView(video_sources.video_6_work_area_markers)

    vc = VideoController(10, 'pause')
    (video_wnd, bin_diff_wnd, gray_diff_wnd, frame0_diff_wnd,
     learned_BG_wnd) = Wnd.create('video', 'binary diff', 'gray diff',
                                  'diff with frame0', 'Learned BG')

    frames_iter = work_area.skip_non_area(video.frames())

    motion_detector = MotionDetector(next(frames_iter)[0], 3)
    background = BackgroundModel(motion_detector, 15)

    for frame, _ in frames_iter:
        motion_detector.detect(frame)
        if not background.done:
            background.learn()
        else:
            if motion_detector.motion_ended():
                frame0_diff = cv2.absdiff(background.learned, frame)
                gray_of_color_diff = Helper.to_gray(frame0_diff)

                frame0_diff_wnd.imshow(
                    resize(
                        np.hstack(
                            (frame0_diff, Helper.to_bgr(gray_of_color_diff))),
                        .5))

                _, binary = cv2.threshold(gray_of_color_diff, 35, 255,
                                          cv2.THRESH_BINARY)
                cv2.imshow('1 binary', resize(binary, .5))

                # VIS
        if background.done:
            learned_BG_wnd.imshow(resize(background.learned, 1))

        vis_img = motion_detector.put_current_state(frame.copy())
        vis_img = utils.put_frame_pos(vis_img, video.frame_pos(), xy=(2, 55))
        video_wnd.imshow(vis_img)
        # bin_diff_wnd.imshow(resize(motion_detector.bin_diff, .5))
        # gray_diff_wnd.imshow(resize(motion_detector.gray_diff, .5))
        # VIS END

        if vc.wait_key() == 27: break

    video.release()
Beispiel #28
0
def main_6_test():
    def test_region(frame_num, region):
        col, row, d_col, d_row = region
        detect_and_show(wnd, detector, video.read_at_pos(frame_num)[row:row + d_row, col:col + d_col], None,
                        True)

    video, calibration_image = get_capture_and_calibration_image_video6()
    calibrator = Calibrator.calibrate(calibration_image, 2)
    # visualize_calibration(calibrator, calibration_image)
    if not calibrator.calibrated:
        print('System was not calibrated.')
        return
    wnd = CvNamedWindow('detection', cv2.WINDOW_NORMAL)
    detector = Detector(calibrator)
    # 1201 1218 3001
    test_region(1660, (1406, 444, 128, 120))

    video.release()
    cv2.destroyAllWindows()
Beispiel #29
0
def main():
    oringWnd, dstWnd, centersWnd = CvNamedWindow.create('orig', {'dstTransform': cv2.WINDOW_NORMAL}, 'centers')

    img = testImage()
    # img = realImage()
    img = fillHoles(img)

    centers = img.copy()
    indexes, (distTransform, dx, dy) = findCentersIndexesSobel(img)
    centers[indexes] = 127

    oringWnd.imshow(img)
    centersWnd.imshow(centers)
    dstWnd.imshow(visDistTransformResult(distTransform))

    def mouse_callback(evt, x, y, flags, _):
        if evt == cv2.EVENT_LBUTTONDOWN:
            print(x, y, distTransform[y, x], dx[y, x], dy[y, x])

    dstWnd.mouse_callback = mouse_callback

    cvWaitKeys()
Beispiel #30
0
    def VIS_CONTOURS(edges, contours):
        print(f'DEBUG.VIS_CONTOURS: contours count {len(contours)}')
        if DEBUG.should_not_vis(edges):
            return

        im = np.zeros_like(edges)
        cv2.drawContours(im, contours, -1, 255, 1)

        images = [im]
        for c in contours:
            im = np.zeros_like(edges)
            cv2.drawContours(im, [c], -1, 255, 1)
            images.append(im)

        wnd = CvNamedWindow('DEBUG CONTOURS')
        wnd.imshow(np.hstack(images))
        cv2.waitKey()
        wnd.destroy()