def test_tictoc(capsys): import time tic() time.sleep(0.5) # Time into variable passed_ms = ttoc(seconds=False) passed_s = ttoc(seconds=True) assert passed_ms >= 500.0 assert passed_s >= 0.5 # Log time to stdout toc(seconds=False) captured = capsys.readouterr() assert captured.out.startswith('[default] Elapsed time: ') assert captured.out.endswith(' ms\n') tic('test') toc('test', seconds=True) captured = capsys.readouterr() assert captured.out.startswith('[test] Elapsed time: ') assert captured.out.endswith(' s\n') # Test toc_nsec tic(label='nsec') toc_nsec(label='nsec', nsec=0.5, seconds=False) toc_nsec(label='nsec', nsec=0.5, seconds=False) toc_nsec(label='nsec', nsec=0.5, seconds=False) captured = capsys.readouterr() assert captured.out.startswith('[nsec] Elapsed time: ') assert captured.out.endswith(' ms\n') assert captured.out.count('\n') == 1 time.sleep(0.5) toc_nsec(label='nsec', nsec=0.5, seconds=True) toc_nsec(label='nsec', nsec=0.5, seconds=False) captured = capsys.readouterr() assert captured.out.startswith('[nsec] Elapsed time: ') assert captured.out.endswith(' s\n') assert captured.out.count('\n') == 1
def _find_initial_grid_points_contours(preproc, transform, pattern_specs, det_params, vis=None): print('WARNING - FINDING INITIAL GRID POINTS BY CONTOURS IS DEPRECATED') pyutils.tic('initial grid estimate - contour') #TODO remove ctpl = pattern_specs.calibration_template # Alias coords_dst = points2numpy(ctpl.refpts_full_marker) coords_src = points2numpy(transform.marker_corners) H = cv2.getPerspectiveTransform(coords_src, coords_dst) if H is None: return None, vis h, w = ctpl.tpl_full.shape[:2] # OpenCV doc: finding contours is finding white objects from black background! warped_img = cv2.warpPerspective(preproc.wb, H, (w, h), cv2.INTER_CUBIC) warped_mask = cv2.warpPerspective( np.ones(preproc.wb.shape[:2], dtype=np.uint8), H, (w, h), cv2.INTER_NEAREST) cnts = cv2.findContours(warped_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if len(cnts) == 2 else cnts[1] vis_alt = imutils.ensure_c3(warped_img.copy()) idx = 0 expected_circle_area = (pattern_specs.calibration_template.dia_circle_px / 2)**2 * np.pi exp_circ_area_lower = 0.5 * expected_circle_area exp_circ_area_upper = 2 * expected_circle_area for shape in cnts: area = cv2.contourArea(shape) if area < exp_circ_area_lower or area > exp_circ_area_upper: color = (255, 0, 0) else: color = (0, 0, 255) # continue # Centroid M = cv2.moments(shape) try: cx = np.round(M['m10'] / M['m00']) cy = np.round(M['m01'] / M['m00']) except ZeroDivisionError: continue idx += 1 if det_params.debug: cv2.drawContours(vis_alt, [shape], 0, color, -1) cv2.circle(vis_alt, (int(cx), int(cy)), 1, (255, 255, 0), -1) if idx % 10 == 0: imvis.imshow(vis_alt, 'Points by contour', wait_ms=10) if det_params.debug: imvis.imshow(vis_alt, 'Points by contour', wait_ms=10) initial_estimates = list() #TODO match the points #TODO draw debug on vis pyutils.toc('initial grid estimate - contour') #TODO remove return initial_estimates, vis
def demo(): #TODO separate assets folder, use abspath img = imutils.imread('flamingo.jpg') rect = (180, 170, 120, 143) target_template = imutils.roi(img, rect) imvis.imshow(target_template, 'Template', wait_ms=10) warped, H_gt = _generate_warped_image(img, -45, -25, 20, 30, -30, -360) imvis.imshow(warped, 'Simulated Warp', wait_ms=10) # Initial estimate H0 H0 = np.eye(3, dtype=float) H0[0, 2] = rect[0] H0[1, 2] = rect[1] _logger.info(f'Initial estimate, H0:\n{H0}') # print('H0\n', H0) # print('H_gt\n', H_gt) verbose = True pyutils.tic('FC') align = Alignment(target_template, Method.FC, full_reference_image=img, num_pyramid_levels=5, verbose=verbose) align.set_true_warp(H_gt) H_est, result = align.align(warped, H0) pyutils.toc('FC') imvis.imshow(result, 'Result FC', wait_ms=10) pyutils.tic('IC') align = Alignment(target_template, Method.IC, full_reference_image=img, num_pyramid_levels=3, verbose=verbose) align.set_true_warp(H_gt) H_est, result = align.align(warped, H0) pyutils.toc('IC') imvis.imshow(result, 'Result IC', wait_ms=10) pyutils.tic('ESM') align = Alignment(target_template, Method.ESM, full_reference_image=img, num_pyramid_levels=5, verbose=verbose) align.set_true_warp(H_gt) H_est, result = align.align(warped, H0) pyutils.toc('ESM') imvis.imshow(result, 'Result ESM', wait_ms=-1)
def test_tictoc(capsys): import time tic() # We should be able to immediately call toc: toc() time.sleep(0.5) # Time into variable passed_ms = ttoc(seconds=False) passed_s = ttoc(seconds=True) assert passed_ms >= 500.0 assert passed_s >= 0.5 # Log time to stdout toc(seconds=False) captured = capsys.readouterr() assert captured.out.startswith('[default] Elapsed time: ') assert captured.out.endswith(' ms\n') tic('test') toc('test', seconds=True) captured = capsys.readouterr() assert captured.out.startswith('[test] Elapsed time: ') assert captured.out.endswith(' s\n') # Test toc_nsec tic(label='nsec') toc_nsec(label='nsec', nsec=0.5, seconds=False) toc_nsec(label='nsec', nsec=0.5, seconds=False) toc_nsec(label='nsec', nsec=0.5, seconds=False) captured = capsys.readouterr() assert captured.out.startswith('[nsec] Elapsed time: ') assert captured.out.endswith(' ms\n') assert captured.out.count('\n') == 1 time.sleep(0.5) toc_nsec(label='nsec', nsec=0.5, seconds=True) toc_nsec(label='nsec', nsec=0.5, seconds=False) captured = capsys.readouterr() assert captured.out.startswith('[nsec] Elapsed time: ') assert captured.out.endswith(' s\n') assert captured.out.count('\n') == 1 # Call invalid timer with pytest.raises(KeyError): toc('no-such-timer')
def demo_align(): cfg_base_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'data-best') cfg_file = os.path.join(cfg_base_path, 'k4a-manual-alignment.cfg') streamer = best.MulticamStepper(cfg_file, True, cfg_file_rel_path_base_dir=cfg_base_path, verbose=True) capture = streamer.start() _, frameset = streamer.next_frameset() K_color = capture.intrinsics(0) K_depth = capture.intrinsics(1) Rt_stereo = capture.stereo_transformation(1) D_color = capture.distortion_coefficients(0) D_depth = capture.distortion_coefficients(1) alignment = None num_frames_processed = 1 while streamer.is_available(): capture, frameset = streamer.next_frameset() if frameset is None: print('Invalid frameset received, terminating now!') break color, depth, infrared = frameset if alignment is None: alignment = best.RgbdAlignment(K_color, K_depth, Rt_stereo[0], Rt_stereo[1], img_resolution(color), img_resolution(depth), D_color, D_depth) # pyutils.tic('cpp-align') # aligned_depth_cpp = alignment.align_d2c(depth) # pyutils.toc('cpp-align') pyutils.tic('cpp-align-di') aligned_depth_cpp, aligned_ir_cpp = alignment.align_di2c( depth, infrared) pyutils.toc('cpp-align-di') vis_frames = [color, vis_depth(depth), vis_infrared(infrared)] vis_labels = ['RGB', 'Depth (original)', 'IR (original)'] ## The cpp version takes care of properly interpolating depth values during alignment # takes about 3-5ms per 640x480 depth # Additionally aligning the intensity image adds another 0.8-1ms to the cpp processing time ## The python version is a naive reprojection + binning (without depth ordering, filtering, interpolation, etc.) # takes about 20-30 ms (~4-5 times slower than cpp) # pyutils.tic('py-align') # aligned_depth_py = align_depth_to_color(depth, K_color, K_depth, Rt_stereo, color.shape[1], color.shape[0]) # pyutils.toc('py-align') aligned_depth = aligned_depth_cpp vis_aligned_depth = vis_depth(aligned_depth) vis_frames.append(vis_aligned_depth) vis_labels.append('Aligned Depth') aligned_ir = aligned_ir_cpp vis_aligned_ir = vis_infrared(aligned_ir) # vis_frames.append(vis_aligned_ir) # vis_labels.append('Aligned IR') # color_resized = cv2.resize(color, (aligned_depth.shape[1], aligned_depth.shape[0])) # vis_frames.append(color_resized) # vis_labels.append('color resized') # vis_frames.append(imvis.overlay(vis_aligned_depth, color_resized, 0.7))#, aligned_depth > 0)) vis_frames.append(imvis.overlay(vis_aligned_depth, color, 0.7)) vis_labels.append('RGB+D') vis_frames.append(imvis.overlay(vis_aligned_ir, color, 0.7)) vis_labels.append('RGB+IR') # Visualization (rescale, overlay a label, show a single collage) bg_color = (180, 180, 180) vis_frames = [ imutils.aspect_aware_resize(f, (640, 480), padding_value=bg_color)[0] for f in vis_frames ] vis_frames = overlay_labels(vis_frames, vis_labels) img_per_row = len(vis_frames) // 2 img_per_row = img_per_row if len( vis_frames) % 2 == 0 else img_per_row + 1 collage = imvis.make_collage(vis_frames, bg_color=bg_color, num_images_per_row=img_per_row) k = imvis.imshow(collage, title='Stream', wait_ms=20) if k == ord('q') or k == 27: break num_frames_processed += 1
def _find_initial_grid_points_correlation(preproc, transform, pattern_specs, det_params, vis=None): pyutils.tic('initial grid estimate - correlation') #TODO remove ctpl = pattern_specs.calibration_template # Alias coords_dst = points2numpy(ctpl.refpts_full_marker) coords_src = points2numpy(transform.marker_corners) H = cv2.getPerspectiveTransform(coords_src, coords_dst) if H is None: return None, vis h, w = ctpl.tpl_full.shape[:2] warped_img = cv2.warpPerspective(preproc.bw, H, (w, h), cv2.INTER_CUBIC) warped_mask = cv2.warpPerspective( np.ones(preproc.bw.shape[:2], dtype=np.uint8), H, (w, h), cv2.INTER_NEAREST) ncc = cv2.matchTemplate( warped_img, ctpl.tpl_cropped_circle, cv2.TM_CCOEFF_NORMED) # mask must be template size?? ncc[ncc < det_params.grid_ccoeff_thresh_initial] = 0 if det_params.debug: overlay = imutils.ensure_c3( imvis.overlay(ctpl.tpl_full, 0.3, warped_img, warped_mask)) warped_img_corners = pru.apply_projection( H, points2numpy(image_corners(preproc.bw), Nx2=False)) for i in range(4): pt1 = numpy2cvpt(warped_img_corners[:, i]) pt2 = numpy2cvpt(warped_img_corners[:, (i + 1) % 4]) cv2.line(overlay, pt1, pt2, color=(0, 0, 255), thickness=3) #FIXME FIXME FIXME # Idea: detect blobs in thresholded NCC # this could replace the greedy nms below # barycenter/centroid of each blob gives the top-left corner (then compute the relative offset to get the initial corner guess) initial_estimates = list() tpl = ctpl.tpl_cropped_circle while True: y, x = np.unravel_index(ncc.argmax(), ncc.shape) # print('Next', y, x, ncc[y, x], det_params.grid_ccoeff_thresh_initial, ncc.shape) if ncc[y, x] < det_params.grid_ccoeff_thresh_initial: break initial_estimates.append( CalibrationGridPoint(x=x, y=y, score=ncc[y, x])) # Clear the NCC peak around the detected template left = x - tpl.shape[1] // 2 top = y - tpl.shape[0] // 2 left, top, nms_w, nms_h = imutils.clip_rect_to_image( (left, top, tpl.shape[1], tpl.shape[0]), ncc.shape[1], ncc.shape[0]) right = left + nms_w bottom = top + nms_h ncc[top:bottom, left:right] = 0 if det_params.debug: cv2.rectangle(overlay, (x, y), (x + ctpl.tpl_cropped_circle.shape[1], y + ctpl.tpl_cropped_circle.shape[0]), (255, 0, 255)) if len(initial_estimates) % 20 == 0: # imvis.imshow(imvis.pseudocolor(ncc, [-1, 1]), 'NCC Result', wait_ms=10) imvis.imshow(overlay, 'Points by correlation', wait_ms=10) if vis is not None: cv2.drawContours(vis, [transform.shape['hull']], 0, (200, 0, 200), 3) if det_params.debug: print('Check "Points by correlation". Press key to continue') imvis.imshow(overlay, 'Points by correlation', wait_ms=-1) pyutils.toc('initial grid estimate - correlation') #TODO remove return initial_estimates, vis