示例#1
0
def _find_initial_grid_points_contours(preproc,
                                       transform,
                                       pattern_specs,
                                       det_params,
                                       vis=None):
    print('WARNING - FINDING INITIAL GRID POINTS BY CONTOURS IS DEPRECATED')
    pyutils.tic('initial grid estimate - contour')  #TODO remove
    ctpl = pattern_specs.calibration_template  # Alias
    coords_dst = points2numpy(ctpl.refpts_full_marker)
    coords_src = points2numpy(transform.marker_corners)
    H = cv2.getPerspectiveTransform(coords_src, coords_dst)
    if H is None:
        return None, vis
    h, w = ctpl.tpl_full.shape[:2]
    # OpenCV doc: finding contours is finding white objects from black background!
    warped_img = cv2.warpPerspective(preproc.wb, H, (w, h), cv2.INTER_CUBIC)
    warped_mask = cv2.warpPerspective(
        np.ones(preproc.wb.shape[:2], dtype=np.uint8), H, (w, h),
        cv2.INTER_NEAREST)
    cnts = cv2.findContours(warped_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if len(cnts) == 2 else cnts[1]
    vis_alt = imutils.ensure_c3(warped_img.copy())
    idx = 0
    expected_circle_area = (pattern_specs.calibration_template.dia_circle_px /
                            2)**2 * np.pi
    exp_circ_area_lower = 0.5 * expected_circle_area
    exp_circ_area_upper = 2 * expected_circle_area
    for shape in cnts:
        area = cv2.contourArea(shape)
        if area < exp_circ_area_lower or area > exp_circ_area_upper:
            color = (255, 0, 0)
        else:
            color = (0, 0, 255)
            # continue
        # Centroid
        M = cv2.moments(shape)
        try:
            cx = np.round(M['m10'] / M['m00'])
            cy = np.round(M['m01'] / M['m00'])
        except ZeroDivisionError:
            continue

        idx += 1
        if det_params.debug:
            cv2.drawContours(vis_alt, [shape], 0, color, -1)
            cv2.circle(vis_alt, (int(cx), int(cy)), 1, (255, 255, 0), -1)
            if idx % 10 == 0:
                imvis.imshow(vis_alt, 'Points by contour', wait_ms=10)
    if det_params.debug:
        imvis.imshow(vis_alt, 'Points by contour', wait_ms=10)

    initial_estimates = list()
    #TODO match the points
    #TODO draw debug on vis
    pyutils.toc('initial grid estimate - contour')  #TODO remove
    return initial_estimates, vis
示例#2
0
    def _process_next_frameset(self, capture, frameset):
        # assert len(frameset) == 1  # Currently, we assume there's only one mobotix camera connected!
        if frameset[0] is None:
            self._streamer.stop()
            return

        if self._args.verbose:
            # Measure time between received framesets
            if self._mean_ms_between_framesets is None:
                ms_between_framesets = 0
            else:
                ms_between_framesets = pyutils.ttoc('[frameset received]')
            self._mean_ms_between_framesets = ms_between_framesets if self._mean_ms_between_framesets is None else 0.9 * self._mean_ms_between_framesets + 0.1 * ms_between_framesets
            if ms_between_framesets > 0:
                print(
                    'Frameset received after {:.2f} ms, avg. framerate {:.1f}'.
                    format(ms_between_framesets,
                           1000.0 / self._mean_ms_between_framesets))
            pyutils.tic('[frameset received]')

        vis_frames = list()
        vis_labels = list()
        for idx, frame in enumerate(frameset):
            frame_lbl = capture.frame_label(idx)
            vis_frame = imvis.pseudocolor(frame, [0, 5000], color_map=colormaps.colormap_turbo_rgb)\
                if capture.is_depth(idx) or capture.is_infrared(idx) else frame
            vis_frames.append(vis_frame)
            vis_labels.append(frame_lbl)

        # Overlay labels
        vis_frames = [
            imvis.draw_text_box(vis_frames[idx],
                                vis_labels[idx],
                                (vis_frames[idx].shape[1] // 2, 40),
                                text_anchor='north',
                                bg_color=(220, 0, 0),
                                font_color=(0, 0, 0))
            for idx in range(len(vis_frames))
        ]

        # Forward to storage processes
        self._store(capture, frameset)

        # Display the live stream
        collage = imvis.make_collage(vis_frames,
                                     num_images_per_row=2,
                                     padding=0,
                                     fixed_size_per_image=(640, 480))
        k = imvis.imshow(collage, "Live view", wait_ms=10)
        if k == ord('q') or k == 27:
            self._streamer.stop()
示例#3
0
def test_tictoc(capsys):
    import time
    tic()
    time.sleep(0.5)
    # Time into variable
    passed_ms = ttoc(seconds=False)
    passed_s = ttoc(seconds=True)
    assert passed_ms >= 500.0
    assert passed_s >= 0.5
    # Log time to stdout
    toc(seconds=False)
    captured = capsys.readouterr()
    assert captured.out.startswith('[default] Elapsed time: ')
    assert captured.out.endswith(' ms\n')
    tic('test')
    toc('test', seconds=True)
    captured = capsys.readouterr()
    assert captured.out.startswith('[test] Elapsed time: ')
    assert captured.out.endswith(' s\n')
    # Test toc_nsec
    tic(label='nsec')
    toc_nsec(label='nsec', nsec=0.5, seconds=False)
    toc_nsec(label='nsec', nsec=0.5, seconds=False)
    toc_nsec(label='nsec', nsec=0.5, seconds=False)
    captured = capsys.readouterr()
    assert captured.out.startswith('[nsec] Elapsed time: ')
    assert captured.out.endswith(' ms\n')
    assert captured.out.count('\n') == 1
    time.sleep(0.5)
    toc_nsec(label='nsec', nsec=0.5, seconds=True)
    toc_nsec(label='nsec', nsec=0.5, seconds=False)
    captured = capsys.readouterr()
    assert captured.out.startswith('[nsec] Elapsed time: ')
    assert captured.out.endswith(' s\n')
    assert captured.out.count('\n') == 1
示例#4
0
def demo():
    #TODO separate assets folder, use abspath
    img = imutils.imread('flamingo.jpg')
    rect = (180, 170, 120, 143)
    target_template = imutils.roi(img, rect)
    imvis.imshow(target_template, 'Template', wait_ms=10)
    warped, H_gt = _generate_warped_image(img, -45, -25, 20, 30, -30, -360)
    imvis.imshow(warped, 'Simulated Warp', wait_ms=10)

    # Initial estimate H0
    H0 = np.eye(3, dtype=float)
    H0[0, 2] = rect[0]
    H0[1, 2] = rect[1]
    _logger.info(f'Initial estimate, H0:\n{H0}')

    # print('H0\n', H0)
    # print('H_gt\n', H_gt)

    verbose = True
    pyutils.tic('FC')
    align = Alignment(target_template,
                      Method.FC,
                      full_reference_image=img,
                      num_pyramid_levels=5,
                      verbose=verbose)
    align.set_true_warp(H_gt)
    H_est, result = align.align(warped, H0)
    pyutils.toc('FC')
    imvis.imshow(result, 'Result FC', wait_ms=10)

    pyutils.tic('IC')
    align = Alignment(target_template,
                      Method.IC,
                      full_reference_image=img,
                      num_pyramid_levels=3,
                      verbose=verbose)
    align.set_true_warp(H_gt)
    H_est, result = align.align(warped, H0)
    pyutils.toc('IC')
    imvis.imshow(result, 'Result IC', wait_ms=10)

    pyutils.tic('ESM')
    align = Alignment(target_template,
                      Method.ESM,
                      full_reference_image=img,
                      num_pyramid_levels=5,
                      verbose=verbose)
    align.set_true_warp(H_gt)
    H_est, result = align.align(warped, H0)
    pyutils.toc('ESM')
    imvis.imshow(result, 'Result ESM', wait_ms=-1)
示例#5
0
def test_tictoc(capsys):
    import time
    tic()
    # We should be able to immediately call toc:
    toc()
    time.sleep(0.5)
    # Time into variable
    passed_ms = ttoc(seconds=False)
    passed_s = ttoc(seconds=True)
    assert passed_ms >= 500.0
    assert passed_s >= 0.5
    # Log time to stdout
    toc(seconds=False)
    captured = capsys.readouterr()
    assert captured.out.startswith('[default] Elapsed time: ')
    assert captured.out.endswith(' ms\n')
    tic('test')
    toc('test', seconds=True)
    captured = capsys.readouterr()
    assert captured.out.startswith('[test] Elapsed time: ')
    assert captured.out.endswith(' s\n')
    # Test toc_nsec
    tic(label='nsec')
    toc_nsec(label='nsec', nsec=0.5, seconds=False)
    toc_nsec(label='nsec', nsec=0.5, seconds=False)
    toc_nsec(label='nsec', nsec=0.5, seconds=False)
    captured = capsys.readouterr()
    assert captured.out.startswith('[nsec] Elapsed time: ')
    assert captured.out.endswith(' ms\n')
    assert captured.out.count('\n') == 1
    time.sleep(0.5)
    toc_nsec(label='nsec', nsec=0.5, seconds=True)
    toc_nsec(label='nsec', nsec=0.5, seconds=False)
    captured = capsys.readouterr()
    assert captured.out.startswith('[nsec] Elapsed time: ')
    assert captured.out.endswith(' s\n')
    assert captured.out.count('\n') == 1
    # Call invalid timer
    with pytest.raises(KeyError):
        toc('no-such-timer')
示例#6
0
def demo_align():
    cfg_base_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                 '..', 'data', 'data-best')
    cfg_file = os.path.join(cfg_base_path, 'k4a-manual-alignment.cfg')

    streamer = best.MulticamStepper(cfg_file,
                                    True,
                                    cfg_file_rel_path_base_dir=cfg_base_path,
                                    verbose=True)
    capture = streamer.start()

    _, frameset = streamer.next_frameset()

    K_color = capture.intrinsics(0)
    K_depth = capture.intrinsics(1)
    Rt_stereo = capture.stereo_transformation(1)
    D_color = capture.distortion_coefficients(0)
    D_depth = capture.distortion_coefficients(1)

    alignment = None
    num_frames_processed = 1
    while streamer.is_available():
        capture, frameset = streamer.next_frameset()
        if frameset is None:
            print('Invalid frameset received, terminating now!')
            break
        color, depth, infrared = frameset

        if alignment is None:
            alignment = best.RgbdAlignment(K_color, K_depth, Rt_stereo[0],
                                           Rt_stereo[1], img_resolution(color),
                                           img_resolution(depth), D_color,
                                           D_depth)
        # pyutils.tic('cpp-align')
        # aligned_depth_cpp = alignment.align_d2c(depth)
        # pyutils.toc('cpp-align')

        pyutils.tic('cpp-align-di')
        aligned_depth_cpp, aligned_ir_cpp = alignment.align_di2c(
            depth, infrared)
        pyutils.toc('cpp-align-di')

        vis_frames = [color, vis_depth(depth), vis_infrared(infrared)]
        vis_labels = ['RGB', 'Depth (original)', 'IR (original)']

        ## The cpp version takes care of properly interpolating depth values during alignment
        # takes about 3-5ms per 640x480 depth
        # Additionally aligning the intensity image adds another 0.8-1ms to the cpp processing time
        ## The python version is a naive reprojection + binning (without depth ordering, filtering, interpolation, etc.)
        # takes about 20-30 ms (~4-5 times slower than cpp)
        # pyutils.tic('py-align')
        # aligned_depth_py = align_depth_to_color(depth, K_color, K_depth, Rt_stereo, color.shape[1], color.shape[0])
        # pyutils.toc('py-align')

        aligned_depth = aligned_depth_cpp
        vis_aligned_depth = vis_depth(aligned_depth)
        vis_frames.append(vis_aligned_depth)
        vis_labels.append('Aligned Depth')

        aligned_ir = aligned_ir_cpp
        vis_aligned_ir = vis_infrared(aligned_ir)
        # vis_frames.append(vis_aligned_ir)
        # vis_labels.append('Aligned IR')

        # color_resized = cv2.resize(color, (aligned_depth.shape[1], aligned_depth.shape[0]))
        # vis_frames.append(color_resized)
        # vis_labels.append('color resized')

        # vis_frames.append(imvis.overlay(vis_aligned_depth, color_resized, 0.7))#, aligned_depth > 0))
        vis_frames.append(imvis.overlay(vis_aligned_depth, color, 0.7))
        vis_labels.append('RGB+D')

        vis_frames.append(imvis.overlay(vis_aligned_ir, color, 0.7))
        vis_labels.append('RGB+IR')

        # Visualization (rescale, overlay a label, show a single collage)
        bg_color = (180, 180, 180)
        vis_frames = [
            imutils.aspect_aware_resize(f, (640, 480),
                                        padding_value=bg_color)[0]
            for f in vis_frames
        ]
        vis_frames = overlay_labels(vis_frames, vis_labels)

        img_per_row = len(vis_frames) // 2
        img_per_row = img_per_row if len(
            vis_frames) % 2 == 0 else img_per_row + 1
        collage = imvis.make_collage(vis_frames,
                                     bg_color=bg_color,
                                     num_images_per_row=img_per_row)
        k = imvis.imshow(collage, title='Stream', wait_ms=20)
        if k == ord('q') or k == 27:
            break

        num_frames_processed += 1
示例#7
0
def _find_initial_grid_points_correlation(preproc,
                                          transform,
                                          pattern_specs,
                                          det_params,
                                          vis=None):
    pyutils.tic('initial grid estimate - correlation')  #TODO remove
    ctpl = pattern_specs.calibration_template  # Alias
    coords_dst = points2numpy(ctpl.refpts_full_marker)
    coords_src = points2numpy(transform.marker_corners)
    H = cv2.getPerspectiveTransform(coords_src, coords_dst)
    if H is None:
        return None, vis
    h, w = ctpl.tpl_full.shape[:2]
    warped_img = cv2.warpPerspective(preproc.bw, H, (w, h), cv2.INTER_CUBIC)
    warped_mask = cv2.warpPerspective(
        np.ones(preproc.bw.shape[:2], dtype=np.uint8), H, (w, h),
        cv2.INTER_NEAREST)

    ncc = cv2.matchTemplate(
        warped_img, ctpl.tpl_cropped_circle,
        cv2.TM_CCOEFF_NORMED)  # mask must be template size??
    ncc[ncc < det_params.grid_ccoeff_thresh_initial] = 0

    if det_params.debug:
        overlay = imutils.ensure_c3(
            imvis.overlay(ctpl.tpl_full, 0.3, warped_img, warped_mask))
        warped_img_corners = pru.apply_projection(
            H, points2numpy(image_corners(preproc.bw), Nx2=False))
        for i in range(4):
            pt1 = numpy2cvpt(warped_img_corners[:, i])
            pt2 = numpy2cvpt(warped_img_corners[:, (i + 1) % 4])
            cv2.line(overlay, pt1, pt2, color=(0, 0, 255), thickness=3)

    #FIXME FIXME FIXME
    # Idea: detect blobs in thresholded NCC
    # this could replace the greedy nms below
    # barycenter/centroid of each blob gives the top-left corner (then compute the relative offset to get the initial corner guess)
    initial_estimates = list()
    tpl = ctpl.tpl_cropped_circle
    while True:
        y, x = np.unravel_index(ncc.argmax(), ncc.shape)
        # print('Next', y, x, ncc[y, x], det_params.grid_ccoeff_thresh_initial, ncc.shape)
        if ncc[y, x] < det_params.grid_ccoeff_thresh_initial:
            break
        initial_estimates.append(
            CalibrationGridPoint(x=x, y=y, score=ncc[y, x]))
        # Clear the NCC peak around the detected template
        left = x - tpl.shape[1] // 2
        top = y - tpl.shape[0] // 2
        left, top, nms_w, nms_h = imutils.clip_rect_to_image(
            (left, top, tpl.shape[1], tpl.shape[0]), ncc.shape[1],
            ncc.shape[0])
        right = left + nms_w
        bottom = top + nms_h
        ncc[top:bottom, left:right] = 0
        if det_params.debug:
            cv2.rectangle(overlay, (x, y),
                          (x + ctpl.tpl_cropped_circle.shape[1],
                           y + ctpl.tpl_cropped_circle.shape[0]),
                          (255, 0, 255))
            if len(initial_estimates) % 20 == 0:
                # imvis.imshow(imvis.pseudocolor(ncc, [-1, 1]), 'NCC Result', wait_ms=10)
                imvis.imshow(overlay, 'Points by correlation', wait_ms=10)

    if vis is not None:
        cv2.drawContours(vis, [transform.shape['hull']], 0, (200, 0, 200), 3)
    if det_params.debug:
        print('Check "Points by correlation". Press key to continue')
        imvis.imshow(overlay, 'Points by correlation', wait_ms=-1)
    pyutils.toc('initial grid estimate - correlation')  #TODO remove
    return initial_estimates, vis
示例#8
0
def demo(pseudocolor=True, inspect=False):
    imgs = [imutils.imread('../data/ninja-01.jpg'), imutils.imread('../data/ninja-02.jpg')]

    bgmodels = list()
    bgmodel = bgm.BackgroundModel()
    bgmodel.approximate_median_bgm(
        adaptation_step=5,
        fg_report_threshold=50,
        median_on_grayscale=True)
    bgmodels.append(bgmodel)

    bgmodel = bgm.BackgroundModel()
    bgmodel.block_mean_bgm(
        block_size=(16, 16),
        block_overlap=0.5,
        update_rate=0.01,
        fg_report_threshold=50,
        channel='grayscale')
    bgmodels.append(bgmodel)

    bgmodel = bgm.BackgroundModel()
    bgmodel.gaussian_mixture_bgm(
        history=500,
        detect_shadows=True,
        var_thresh=100,
        comp_thresh=0.05)
    bgmodels.append(bgmodel)

    bgmodel = bgm.BackgroundModel()
    bgmodel.normalized_rgb_bgm(
        report_as_binary=False,
        # binary_reporting_threshold=20,
        update_rate=0.1,
        alpha=0.1,
        beta=1.0)
    bgmodels.append(bgmodel)

    print("""
! Note that the reported initialization time for the first BGM includes
  library initialization - maybe OpenCV (haven't tracked that down yet).
    """)
    fg_masks = list()
    for bgmodel in bgmodels:
        pyutils.tic('init')
        bgmodel.init(imgs[0])
        t_init = pyutils.ttoc('init')
        pyutils.tic('apply')
        mask = bgmodel.report_changes(imgs[1])
        t_apply = pyutils.ttoc('apply')

        if pseudocolor:
            fg_masks.append(imvis.pseudocolor(mask, limits=None, color_map=colormaps.colormap_viridis_rgb))
        else:
            fg_masks.append(mask)
        print('init/apply: {:17s} {:7.2f} ms, {:7.2f} ms'.format(bgmodel.name(), t_init, t_apply))

        if inspect:
            import iminspect
            iminspect.show(mask)

    padding = 10
    # Add alpha channel to render the README visualization nicely for web display
    fg_masks[0] = np.dstack((fg_masks[0], 255 * np.ones(fg_masks[0].shape[:2], dtype=np.uint8)))
    collage = imvis.make_collage(fg_masks,
        padding=padding,
        fixed_size_per_image=(200, 266),
        bg_color=(0, 0, 0, 0),
        num_images_per_row=2)
    input_seq = cv2.resize(imutils.imread('../data/ninja-seq.png'), (200, 266))
    collage = imvis.make_collage([input_seq, collage],
        padding=padding, bg_color=(0, 0, 0, 0))

    # Overlay names
    height, width = collage.shape[:2]
    collage = imvis.draw_text_box(collage, 'Input',
            (input_seq.shape[1]/2, height/3+20), text_anchor='center', bg_color=(255, 255, 255),
            font_color=(-1, -1, -1), font_scale=1.0,
            font_thickness=1, padding=5, fill_opacity=0.8)

    names = [bg.name() for bg in bgmodels]
    mask_width = (width-input_seq.shape[1] - padding) / 2
    for i in range(len(names)):
        pos = (input_seq.shape[1] + padding + (i % 2) * (mask_width + padding) + mask_width/2,
            (i // 2) * (266 + padding) + 266 - 10)
        collage = imvis.draw_text_box(collage, names[i],
            pos, text_anchor='south', bg_color=(255, 255, 255),
            font_color=(-1, -1, -1), font_scale=1.0,
            font_thickness=1, padding=5, fill_opacity=0.8)

    imvis.imshow(collage, title="Background Subtraction", wait_ms=-1)
    imutils.imsave('../../doc/example-bgm.png', collage)