def test_it_should_convert_to_parameters(self): matrix = np.array([[-0.7597, -0.0316, 0.6496, 10], [-0.5236, -0.5626, -0.6398, 1], [0.3856, -0.8262, 0.4108, 2.2], [0, 0, 0, 1]]) transform = Transform.from_matrix(matrix) np.testing.assert_almost_equal(transform.to_parameters(), np.array([10, 1, 2.2, 1, 0.707, 3.1]), 4)
def icp(A, B, init_pose=None, max_iterations=20, tolerance=0.001): ''' The Iterative Closest Point method Input: A: Nx3 numpy array of source 3D points B: Nx3 numpy array of destination 3D point init_pose: 4x4 homogeneous transformation max_iterations: exit algorithm after max_iterations tolerance: convergence criteria Output: T: final homogeneous transformation distances: Euclidean distances (errors) of the nearest neighbor ''' # make points homogeneous, copy them so as to maintain the originals src = np.ones((4, A.shape[0])) dst = np.ones((4, B.shape[0])) src[0:3, :] = np.copy(A.T) dst[0:3, :] = np.copy(B.T) # apply the initial pose estimation if init_pose is not None: src = np.dot(init_pose, src) prev_error = 0 for i in range(max_iterations): # find the nearest neighbours between the current source and destination points distances, indices = nearest_neighbor(src[0:3, :].T, dst[0:3, :].T) # compute the transformation between the current source and nearest destination points T, _, _ = best_fit_transform(src[0:3, :].T, dst[0:3, indices].T) # update the current source src = np.dot(T, src) # check error mean_error = np.sum(distances) / distances.size if abs(prev_error - mean_error) < tolerance: break prev_error = mean_error # calculate final transformation T, _, _ = best_fit_transform(A, src[0:3, :].T) return Transform.from_matrix(T), distances
window, (camera.width, camera.height)) vpRender.load_ambiant_occlusion_map(MODELS[0]["ambiant_occlusion_model"]) cv2.namedWindow('image') cv2.createTrackbar('transparency', 'image', 0, 100, trackbar) # todo, read from file? detection_offset = Transform() rgbd_record = False save_next_rgbd_pose = False lock_offset = False if PRELOAD: dataset.load() offset_path = os.path.join(dataset.path, "offset.npy") if os.path.exists(offset_path): detection_offset = Transform.from_matrix(np.load(offset_path)) lock_offset = True while True: start_time = time.time() bgr, depth = sensor.get_frame() bgr = cv2.resize(bgr, (int(1920 / ratio), int(1080 / ratio))) depth = cv2.resize(depth, (int(1920 / ratio), int(1080 / ratio))) screen = bgr.copy() if rgbd_record: # here we add a dummy pose, we will compute the pose as a post operation dataset.add_pose(bgr, depth, Transform()) else: detection = detector.detect(screen) # Draw a color rectangle around screen : red no detection, green strong detection
vpRender = ModelRenderer(MODELS[0]["model_path"], SHADER_PATH, camera, window) vpRender.load_ambiant_occlusion_map(MODELS[0]["ambiant_occlusion_model"]) cv2.namedWindow('image') cv2.createTrackbar('transparency', 'image', 0, 100, trackbar) # todo, read from file? detection_offset = Transform() rgbd_record = False save_next_rgbd_pose = False lock_offset = False if PRELOAD: dataset.load() if dataset.size(): detection_offset = Transform.from_matrix( np.load(os.path.join(dataset.path, "offset.npy"))) lock_offset = True while True: start_time = time.time() bgr, depth = sensor.get_frame() bgr = cv2.resize(bgr, (int(1920 / ratio), int(1080 / ratio))) depth = cv2.resize(depth, (int(1920 / ratio), int(1080 / ratio))) screen = bgr.copy() if rgbd_record: # here we add a dummy pose, we will compute the pose as a post operation dataset.add_pose(bgr, depth, Transform()) else: screen = bgr.copy() detection = detector.detect(screen)