def test_point_to_plane_gradICP_transform1(self): device = torch.device("cuda") channels_first = False colors, depths, intrinsics, poses = load_test_data(channels_first, batch_size=1) rgbdimages = RGBDImages( colors.to(device), depths.to(device), intrinsics.to(device), poses.to(device), channels_first=channels_first, ) sigma = 0.6 src_pointclouds = pointclouds_from_rgbdimages(rgbdimages[:, 0]).to(device) rad = 0.2 transform = torch.tensor( [ [1.0, 0.0, 0.0, 0.05], [0.0, np.cos(rad), -np.sin(rad), 0.03], [0.0, np.sin(rad), np.cos(rad), 0.01], [0.0, 0.0, 0.0, 1.0], ], device=device, dtype=colors.dtype, ) # transform = torch.tensor( # [ # [np.cos(rad), -np.sin(rad), 0.0, 0.05], # [np.sin(rad), np.cos(rad), 0.0, 0.03], # [0.0, 0.0, 1.0, 0.01], # [0.0, 0.0, 0.0, 1.0], # ], # device=device, # dtype=colors.dtype, # ) tgt_pointclouds = src_pointclouds.transform(transform) src_pc = src_pointclouds.points_padded tgt_pc = tgt_pointclouds.points_padded tgt_normals = tgt_pointclouds.normals_padded initial_transform = torch.eye(4, device=device) numiters = 100 damp = 1e-8 dist_thresh = None t, idx = point_to_plane_gradICP( src_pc, tgt_pc, tgt_normals, initial_transform, numiters, damp, dist_thresh, ) assert t.shape == transform.shape assert_allclose(t, transform)
def test_gradICP_provide(self): device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") channels_first = False colors, depths, intrinsics, poses = load_test_data(channels_first, batch_size=1) rgbdimages = RGBDImages( colors.to(device), depths.to(device), intrinsics.to(device), poses.to(device), channels_first=channels_first, ) sigma = 0.6 src_pointclouds = pointclouds_from_rgbdimages(rgbdimages[:, 0]).to(device) rad = 0.1 transform = torch.tensor( [ [np.cos(rad), -np.sin(rad), 0.0, 0.05], [np.sin(rad), np.cos(rad), 0.0, 0.03], [0.0, 0.0, 1.0, 0.01], [0.0, 0.0, 0.0, 1.0], ], device=device, dtype=colors.dtype, ) tgt_pointclouds = src_pointclouds.transform(transform) numiters = 30 damp = 1e-8 dist_thresh = 0.2 lambda_max = 2.0 B = 1.0 B2 = 1.0 nu = 200.0 odom = GradICPOdometryProvider( numiters=numiters, damp=damp, dist_thresh=dist_thresh, lambda_max=lambda_max, B=B, B2=B2, nu=nu, ) odom_transform = odom.provide(tgt_pointclouds, src_pointclouds) odom_transform = odom_transform.squeeze(1).squeeze(0) assert odom_transform.shape == transform.shape assert_allclose(odom_transform, transform)
def test_pointclouds_from_rgbdimages(self, channels_first, device): device = default_to_cpu_if_no_gpu(device) colors, depths, intrinsics, poses = load_test_data(channels_first) rgbdimages = RGBDImages( colors.to(device), depths.to(device), intrinsics.to(device), poses.to(device), channels_first=channels_first, ) pointclouds = pointclouds_from_rgbdimages(rgbdimages[:, 0]).to(device) projected_pointclouds = pointclouds.pinhole_projection( intrinsics.to(device).squeeze(1)) proj0 = projected_pointclouds.points_list[0][..., :-1] meshgrid = (create_meshgrid(rgbdimages.shape[2], rgbdimages.shape[3], False).to(device).squeeze(0)) meshgrid = torch.cat( [ meshgrid[..., 1:], meshgrid[..., 0:1], ], -1, ) groundtruth = meshgrid[rgbdimages[0, 0].valid_depth_mask.squeeze()] assert_allclose(proj0.round().float(), groundtruth.float()) # without filtering missing depths pointclouds2 = pointclouds_from_rgbdimages( rgbdimages[:, 0], filter_missing_depths=False).to(device) for b in range(len(pointclouds)): filtered_points = pointclouds.points_list[b] unfiltered_points = pointclouds2.points_list[b] m = 0 for n in range(len(filtered_points)): while (not ((filtered_points[n] - unfiltered_points[m])** 2).sum() < 1e-12): m += 1 assert ((filtered_points[n] - unfiltered_points[m])** 2).sum() < 1e-12 m += 1
def test_raises_errors(self, device): device = default_to_cpu_if_no_gpu(device) channels_first = False colors, depths, intrinsics, poses = load_test_data(channels_first) rgbdimages = RGBDImages( colors.to(device), depths.to(device), intrinsics.to(device), poses.to(device), channels_first=channels_first, ) # .to(device) sigma = 0.6 with pytest.raises( TypeError, match="Expected rgbdimages to be of type gradslam.RGBDImages"): pointclouds = pointclouds_from_rgbdimages(depths).to(device) with pytest.raises( ValueError, match="Expected rgbdimages to have sequence length of 1"): pointclouds = pointclouds_from_rgbdimages(rgbdimages).to(device)
def init_rgbdimages( use_poses=True, channels_first=False, device: str = "cpu", ): device = torch.device(device) colors, depths, intrinsics, poses = load_test_data(channels_first) if use_poses: rgbdimages = RGBDImages( colors.to(device), depths.to(device), intrinsics.to(device), poses.to(device), channels_first=channels_first, ) else: rgbdimages = RGBDImages( colors.to(device), depths.to(device), intrinsics.to(device), channels_first=channels_first, ) return rgbdimages, colors, depths, intrinsics, poses