예제 #1
0
 def test_groundtruth_raises_value_error(self, device):
     device = default_to_cpu_if_no_gpu(device)
     channels_first = False
     rgbdimages = TestGroundTruth.init_rgbdimages(
         device=device, channels_first=channels_first)
     nopose_rgbdimages = TestGroundTruth.init_rgbdimages(device=device)
     nopose_rgbdimages.poses = None
     batch_rgbdimages = RGBDImages(
         rgbdimages.rgb_image.repeat(2, 1, 1, 1, 1),
         rgbdimages.depth_image.repeat(2, 1, 1, 1, 1),
         rgbdimages.intrinsics.repeat(2, 1, 1, 1),
         rgbdimages.poses.repeat(2, 1, 1, 1),
         channels_first=channels_first,
     ).to(device)
     tensor = torch.rand(1, 1, 4, 4)
     odom = GroundTruthOdometryProvider()
     with pytest.raises(ValueError):
         odom = odom.provide(rgbdimages[:, 0], nopose_rgbdimages[:, 1])
     with pytest.raises(ValueError):
         odom = odom.provide(nopose_rgbdimages[:, 0], rgbdimages[:, 1])
     with pytest.raises(ValueError):
         odom = odom.provide(rgbdimages[:, 0], rgbdimages)
     with pytest.raises(ValueError):
         odom = odom.provide(rgbdimages, rgbdimages[:, 1])
     with pytest.raises(ValueError):
         odom = odom.provide(rgbdimages, batch_rgbdimages)
예제 #2
0
    def test_point_to_plane_gradICP_transform1(self):
        device = torch.device("cuda")
        channels_first = False
        colors, depths, intrinsics, poses = load_test_data(channels_first,
                                                           batch_size=1)
        rgbdimages = RGBDImages(
            colors.to(device),
            depths.to(device),
            intrinsics.to(device),
            poses.to(device),
            channels_first=channels_first,
        )
        sigma = 0.6
        src_pointclouds = pointclouds_from_rgbdimages(rgbdimages[:,
                                                                 0]).to(device)
        rad = 0.2
        transform = torch.tensor(
            [
                [1.0, 0.0, 0.0, 0.05],
                [0.0, np.cos(rad), -np.sin(rad), 0.03],
                [0.0, np.sin(rad), np.cos(rad), 0.01],
                [0.0, 0.0, 0.0, 1.0],
            ],
            device=device,
            dtype=colors.dtype,
        )
        # transform = torch.tensor(
        #     [
        #         [np.cos(rad), -np.sin(rad), 0.0, 0.05],
        #         [np.sin(rad), np.cos(rad), 0.0, 0.03],
        #         [0.0, 0.0, 1.0, 0.01],
        #         [0.0, 0.0, 0.0, 1.0],
        #     ],
        #     device=device,
        #     dtype=colors.dtype,
        # )
        tgt_pointclouds = src_pointclouds.transform(transform)

        src_pc = src_pointclouds.points_padded
        tgt_pc = tgt_pointclouds.points_padded
        tgt_normals = tgt_pointclouds.normals_padded
        initial_transform = torch.eye(4, device=device)
        numiters = 100
        damp = 1e-8
        dist_thresh = None
        t, idx = point_to_plane_gradICP(
            src_pc,
            tgt_pc,
            tgt_normals,
            initial_transform,
            numiters,
            damp,
            dist_thresh,
        )

        assert t.shape == transform.shape
        assert_allclose(t, transform)
예제 #3
0
 def init_rgbdimages(
     use_poses=True,
     channels_first=False,
     device: str = "cpu",
 ):
     device = torch.device(device)
     colors, depths, intrinsics, poses = load_test_data(channels_first)
     if use_poses:
         rgbdimages = RGBDImages(
             colors.to(device),
             depths.to(device),
             intrinsics.to(device),
             poses.to(device),
             channels_first=channels_first,
         )
     else:
         rgbdimages = RGBDImages(
             colors.to(device),
             depths.to(device),
             intrinsics.to(device),
             channels_first=channels_first,
         )
     return rgbdimages, colors, depths, intrinsics, poses
예제 #4
0
    def test_gradICP_provide(self):
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        channels_first = False
        colors, depths, intrinsics, poses = load_test_data(channels_first,
                                                           batch_size=1)
        rgbdimages = RGBDImages(
            colors.to(device),
            depths.to(device),
            intrinsics.to(device),
            poses.to(device),
            channels_first=channels_first,
        )
        sigma = 0.6
        src_pointclouds = pointclouds_from_rgbdimages(rgbdimages[:,
                                                                 0]).to(device)
        rad = 0.1
        transform = torch.tensor(
            [
                [np.cos(rad), -np.sin(rad), 0.0, 0.05],
                [np.sin(rad), np.cos(rad), 0.0, 0.03],
                [0.0, 0.0, 1.0, 0.01],
                [0.0, 0.0, 0.0, 1.0],
            ],
            device=device,
            dtype=colors.dtype,
        )
        tgt_pointclouds = src_pointclouds.transform(transform)

        numiters = 30
        damp = 1e-8
        dist_thresh = 0.2
        lambda_max = 2.0
        B = 1.0
        B2 = 1.0
        nu = 200.0
        odom = GradICPOdometryProvider(
            numiters=numiters,
            damp=damp,
            dist_thresh=dist_thresh,
            lambda_max=lambda_max,
            B=B,
            B2=B2,
            nu=nu,
        )
        odom_transform = odom.provide(tgt_pointclouds, src_pointclouds)
        odom_transform = odom_transform.squeeze(1).squeeze(0)

        assert odom_transform.shape == transform.shape
        assert_allclose(odom_transform, transform)
예제 #5
0
    def test_pointclouds_from_rgbdimages(self, channels_first, device):
        device = default_to_cpu_if_no_gpu(device)
        colors, depths, intrinsics, poses = load_test_data(channels_first)
        rgbdimages = RGBDImages(
            colors.to(device),
            depths.to(device),
            intrinsics.to(device),
            poses.to(device),
            channels_first=channels_first,
        )

        pointclouds = pointclouds_from_rgbdimages(rgbdimages[:, 0]).to(device)
        projected_pointclouds = pointclouds.pinhole_projection(
            intrinsics.to(device).squeeze(1))
        proj0 = projected_pointclouds.points_list[0][..., :-1]
        meshgrid = (create_meshgrid(rgbdimages.shape[2], rgbdimages.shape[3],
                                    False).to(device).squeeze(0))
        meshgrid = torch.cat(
            [
                meshgrid[..., 1:],
                meshgrid[..., 0:1],
            ],
            -1,
        )
        groundtruth = meshgrid[rgbdimages[0, 0].valid_depth_mask.squeeze()]

        assert_allclose(proj0.round().float(), groundtruth.float())

        # without filtering missing depths
        pointclouds2 = pointclouds_from_rgbdimages(
            rgbdimages[:, 0], filter_missing_depths=False).to(device)

        for b in range(len(pointclouds)):
            filtered_points = pointclouds.points_list[b]
            unfiltered_points = pointclouds2.points_list[b]
            m = 0
            for n in range(len(filtered_points)):
                while (not ((filtered_points[n] - unfiltered_points[m])**
                            2).sum() < 1e-12):
                    m += 1
                assert ((filtered_points[n] - unfiltered_points[m])**
                        2).sum() < 1e-12
                m += 1
예제 #6
0
    def test_downsample_rgbdimages_raises_type_error(self):
        device = default_to_cpu_if_no_gpu("cuda")
        image = (torch.tensor(
            [
                [
                    [0.0, 0.0, 0.0],
                    [1.0, 1.0, 1.0],
                    [2.0, 2.0, 2.0],
                    [3.0, 3.0, 3.0],
                ],
                [
                    [4.0, 4.0, 4.0],
                    [5.0, 5.0, 5.0],
                    [6.0, 6.0, 6.0],
                    [7.0, 7.0, 7.0],
                ],
                [
                    [8.0, 8.0, 8.0],
                    [9.0, 9.0, 9.0],
                    [10.0, 10.0, 10.0],
                    [11.0, 11.0, 11.0],
                ],
            ],
            device=device,
            dtype=torch.float,
        ).unsqueeze(0).unsqueeze(0))

        depth = torch.ones_like(image[..., :1])
        intrinsics = torch.eye(4).unsqueeze(0).unsqueeze(0).to(device)
        poses = torch.eye(4).unsqueeze(0).unsqueeze(0).to(device)
        rgbdimages = RGBDImages(image,
                                depth,
                                intrinsics,
                                poses,
                                channels_first=False).to(device)
        ds_ratio = 2
        ds_pointclouds = downsample_rgbdimages(rgbdimages, ds_ratio)
        with pytest.raises(TypeError):
            downsample_rgbdimages("a", ds_ratio)
        with pytest.raises(TypeError):
            downsample_rgbdimages(rgbdimages, "a")
예제 #7
0
    def test_raises_errors(self, device):
        device = default_to_cpu_if_no_gpu(device)
        channels_first = False
        colors, depths, intrinsics, poses = load_test_data(channels_first)
        rgbdimages = RGBDImages(
            colors.to(device),
            depths.to(device),
            intrinsics.to(device),
            poses.to(device),
            channels_first=channels_first,
        )  # .to(device)

        sigma = 0.6
        with pytest.raises(
                TypeError,
                match="Expected rgbdimages to be of type gradslam.RGBDImages"):
            pointclouds = pointclouds_from_rgbdimages(depths).to(device)

        with pytest.raises(
                ValueError,
                match="Expected rgbdimages to have sequence length of 1"):
            pointclouds = pointclouds_from_rgbdimages(rgbdimages).to(device)
예제 #8
0
 def init_rgbdimages(
     channels_first: bool = False,
     device: str = "cpu",
 ):
     device = torch.device(device)
     channels_first = False
     colors = torch.rand(1, 2, 32, 32, 3)
     depths = torch.rand(1, 2, 32, 32, 1)
     intrinsics = torch.rand(1, 1, 4, 4)
     rad1 = 0.1
     rad2 = 0.7
     poses = torch.tensor(
         [
             [
                 [np.cos(rad1), -np.sin(rad1), 0.0, 0.05],
                 [np.sin(rad1), np.cos(rad1), 0.0, 0.03],
                 [0.0, 0.0, 1.0, 0.01],
                 [0.0, 0.0, 0.0, 1.0],
             ],
             [
                 [np.cos(rad2), -np.sin(rad2), 0.0, 0.05],
                 [np.sin(rad2), np.cos(rad2), 0.0, 0.03],
                 [0.0, 0.0, 1.0, 0.01],
                 [0.0, 0.0, 0.0, 1.0],
             ],
         ],
         device=device,
         dtype=colors.dtype,
     ).unsqueeze(0)
     return RGBDImages(
         colors.to(device),
         depths.to(device),
         intrinsics.to(device),
         poses.to(device),
         channels_first=channels_first,
     ).to(device)
예제 #9
0
    # Scannet needs to have been extracted in our format
    dataset = Scannet(
        args.scannet_path,
        args.scannet_meta_path,
        (
            "scene0333_00",
            "scene0636_00",
        ),
        start=0,
        end=4,
        height=240,
        width=320,
    )
    loader = DataLoader(dataset=dataset, batch_size=2)
    colors, depths, intrinsics, poses, *_ = next(iter(loader))

    # create rgbdimages object
    rgbdimages = RGBDImages(colors,
                            depths,
                            intrinsics,
                            poses,
                            channels_first=False)

    # SLAM
    slam = PointFusion(odom=args.odometry, dsratio=4, device=device)
    pointclouds, recovered_poses = slam(rgbdimages)

    # visualization
    o3d.visualization.draw_geometries([pointclouds.open3d(0)])
    o3d.visualization.draw_geometries([pointclouds.open3d(1)])
예제 #10
0
    help="Odometry method to use. Supported options:\n"
    " gt = Ground Truth odometry\n"
    " icp = Iterative Closest Point\n"
    " gradicp (*default) = Differentiable Iterative Closest Point\n",
)
args = parser.parse_args()

if __name__ == "__main__":
    # select device
    device = torch.device(
        "cuda") if torch.cuda.is_available() else torch.device("cpu")

    # load dataset
    if args.dataset == "icl":
        dataset = ICL(args.dataset_path, seqlen=10, height=120, width=160)
    elif args.dataset == "tum":
        dataset = TUM(args.dataset_path, seqlen=10, height=120, width=160)
    loader = DataLoader(dataset=dataset, batch_size=2)
    colors, depths, intrinsics, poses, *_ = next(iter(loader))

    # create rgbdimages object
    rgbdimages = RGBDImages(colors, depths, intrinsics, poses)

    # SLAM
    slam = ICPSLAM(odom=args.odometry, dsratio=4, device=device)
    pointclouds, recovered_poses = slam(rgbdimages)

    # visualization
    o3d.visualization.draw_geometries([pointclouds.open3d(0)])
    o3d.visualization.draw_geometries([pointclouds.open3d(1)])
예제 #11
0
    def test_downsample_rgbdimages(self, device):
        device = default_to_cpu_if_no_gpu(device)
        image = (torch.tensor(
            [
                [
                    [0.0, 0.0, 0.0],
                    [1.0, 1.0, 1.0],
                    [2.0, 2.0, 2.0],
                    [3.0, 3.0, 3.0],
                ],
                [
                    [4.0, 4.0, 4.0],
                    [5.0, 5.0, 5.0],
                    [6.0, 6.0, 6.0],
                    [7.0, 7.0, 7.0],
                ],
                [
                    [8.0, 8.0, 8.0],
                    [9.0, 9.0, 9.0],
                    [10.0, 10.0, 10.0],
                    [11.0, 11.0, 11.0],
                ],
            ],
            device=device,
            dtype=torch.float,
        ).unsqueeze(0).unsqueeze(0))

        depth = torch.ones_like(image[..., :1])
        intrinsics = torch.eye(4).unsqueeze(0).unsqueeze(0).to(device)
        poses = torch.eye(4).unsqueeze(0).unsqueeze(0).to(device)
        rgbdimages = RGBDImages(image,
                                depth,
                                intrinsics,
                                poses,
                                channels_first=False).to(device)
        ds_ratio = 2
        ds_pointclouds = downsample_rgbdimages(rgbdimages, ds_ratio)
        groundtruth_points = torch.tensor(
            [
                [0.0, 0.0, 1.0],
                [2.0, 0.0, 1.0],
                [0.0, 2.0, 1.0],
                [2.0, 2.0, 1.0],
            ],
            device=device,
            dtype=torch.float,
        ).unsqueeze(0)
        groundtruth_colors = torch.tensor(
            [
                [0.0, 0.0, 0.0],
                [2.0, 2.0, 2.0],
                [8.0, 8.0, 8.0],
                [10.0, 10.0, 10.0],
            ],
            device=device,
            dtype=torch.float,
        ).unsqueeze(0)
        groundtruth_normals = rgbdimages.normal_map[
            ..., ::ds_ratio, ::ds_ratio, :]
        groundtruth_normals = groundtruth_normals.reshape(
            1, ds_pointclouds.normals_padded.shape[1], 3)

        assert ds_pointclouds.points_padded.shape == groundtruth_points.shape
        assert ds_pointclouds.colors_padded.shape == groundtruth_colors.shape
        assert ds_pointclouds.normals_padded.shape == groundtruth_normals.shape
        assert_allclose(ds_pointclouds.points_padded, groundtruth_points)
        assert_allclose(ds_pointclouds.colors_padded, groundtruth_colors)
        assert_allclose(ds_pointclouds.normals_padded, groundtruth_normals)