示例#1
0
    def test_cached_synchronized_scene_dataset(self):
        """Test cached synchronized scene dataset"""

        # Initialize synchronized dataset with 2 datums
        scenes_dataset_json = os.path.join(self.DGP_TEST_DATASET_DIR,
                                           "test_scene",
                                           "scene_dataset_v1.0.json")

        # Intialize dataset, and check to see if we have cached any new files.
        dataset_args = (scenes_dataset_json, )
        dataset_kwargs = dict(split='train',
                              datum_names=('LIDAR', 'CAMERA_01'),
                              requested_annotations=("bounding_box_2d",
                                                     "bounding_box_3d"))
        dataset = diskcache(protocol='pkl')(SynchronizedSceneDataset)(
            *dataset_args, **dataset_kwargs)
        cached_files = set(glob.glob(os.path.join(DGP_CACHE_DIR, '*.pkl')))

        # There are only 2 secnes, 6 samples in the train and val split.
        assert_true(len(dataset) == 6)

        # Reinitialize dataset, this should load the cached version.
        cached_dataset = diskcache(protocol='pkl')(SynchronizedSceneDataset)(
            *dataset_args, **dataset_kwargs)
        # Check to see if the number of cached files have not changed.
        assert_true(
            set(cached_files) == set(
                glob.glob(os.path.join(DGP_CACHE_DIR, '*.pkl'))))
        assert_true(len(cached_dataset) == len(dataset))
        assert_true(cached_dataset.datum_index == dataset.datum_index)
        assert_true(
            cached_dataset.dataset_item_index == dataset.dataset_item_index)
示例#2
0
    def test_camera_utils(self):
        """Test camera class in dgp.utils.torch_extension.Camera"""
        fx = fy = 500.
        B, H, W = 10, 480, 640
        cx = W / 2 - 0.5
        cy = H / 2 - 0.5
        inv_depth = torch.rand((B, 1, H, W))

        # Create a camera at identity and reconstruct point cloud from depth
        cam = Camera.from_params(fx, fy, cx, cy, p_cw=None, B=B)
        X = cam.reconstruct(1. / (inv_depth + 1e-6))
        assert_true(tuple(X.shape) == (B, 3, H, W))

        # Project the point cloud back into the image
        uv_pred = cam.project(X)
        assert_true(tuple(uv_pred.shape) == (B, 2, H, W))

        # Image grid and the projection should be identical since we
        # reconstructed and projected without any rotation/translation.
        grid = image_grid(B,
                          H,
                          W,
                          inv_depth.dtype,
                          inv_depth.device,
                          normalized=True)
        uv = grid[:, :2]
        assert_true(np.allclose(uv.numpy(), uv_pred.numpy(), atol=1e-6))

        # Backproject ray from the sampled 2d image points
        sparse_uv2d = image_grid(B,
                                 H,
                                 W,
                                 inv_depth.dtype,
                                 inv_depth.device,
                                 normalized=False)[:, :2, ::10, ::10]
        sparse_uv2d = sparse_uv2d.contiguous().view(B, 2, -1)

        # Unproject to 3d rays (x, y, 1): B3N
        sparse_rays = cam.unproject(sparse_uv2d)
        sparse_inv_depth = torch.rand((B, 1, sparse_uv2d.shape[-1]))
        sparse_X = sparse_rays * sparse_inv_depth.repeat([1, 3, 1])
        assert_true(tuple(sparse_X.shape) == (B, 3, sparse_uv2d.shape[-1]))

        # Check if cam.project() without input shape raises an error
        with assert_raises(AssertionError) as _:
            sparse_uv2d_pred = cam.project(sparse_rays)

        # Camera project provides uv in normalized coordinates
        sparse_uv2d_pred = cam.project(sparse_X, shape=(H, W))

        # Normalize uv2d
        sparse_uv2d_norm = sparse_uv2d.clone()
        sparse_uv2d_norm[:, 0] = 2 * sparse_uv2d[:, 0] / (W - 1) - 1.
        sparse_uv2d_norm[:, 1] = 2 * sparse_uv2d[:, 1] / (H - 1) - 1.
        assert_true(
            np.allclose(sparse_uv2d_norm.numpy(),
                        sparse_uv2d_pred.numpy(),
                        atol=1e-6))
示例#3
0
 def test_quaternion_rotation_conversions(self):
     q_wxyz = torch.from_numpy(make_random_quaternion())
     R = quaternion_to_rotation_matrix(q_wxyz)
     q_wxyz_ = rotation_matrix_to_quaternion(R)
     # Check if either q == q' or q == -q' (since q == -q represents the
     # same rotation)
     assert_true(
         np.allclose(q_wxyz.numpy(), q_wxyz_.numpy(), atol=1e-6)
         or np.allclose(q_wxyz.numpy(), -q_wxyz_.numpy(), atol=1e-6))
示例#4
0
    def test_pose_utils_equivalence(self):
        """Test pose transform equivalance with dgp.utils.geometry"""
        poses_np = [
            NumpyPose(wxyz=pvec[:4], tvec=pvec[4:]) for pvec in self.pvecs
        ]
        poses = [Pose(tf) for tf in self.tfs]
        qposes = [QuaternionPose.from_matrix(tf) for tf in self.tfs]

        # Check if the pose construction and conversion to homogeneous matrices
        # are consistent across all implementations
        for p1, p2, p3 in zip(poses_np, poses, qposes):
            assert_true(np.allclose(p1.matrix, p2.matrix.numpy(), atol=1e-6))
            assert_true(np.allclose(p1.matrix, p3.matrix.numpy(), atol=1e-6))
示例#5
0
    def test_prediction_agent_dataset_lite(self):
        #Test agent dataset loading
        expected_lidar_fields = set([
            'timestamp',
            'datum_name',
            'extrinsics',
            'pose',
            'point_cloud',
            'extra_channels',
            'datum_type',
        ])

        expected_camera_fields = set([
            'timestamp',
            'datum_name',
            'rgb',
            'intrinsics',
            'extrinsics',
            'pose',
            'datum_type',
        ])

        dataset = AgentDatasetLite(self.test_scene_json,
                                   self.agent_json,
                                   split='train',
                                   requested_agent_type='agent_3d',
                                   datum_names=['lidar', 'CAMERA_01'],
                                   requested_main_agent_classes=('Car',
                                                                 'Person'),
                                   requested_feature_types=("parked_car", ),
                                   batch_per_agent=False)
        # Check length of dataset
        assert len(dataset) == 6

        for item in dataset:
            for datum in item[0]['datums']:
                if datum['datum_name'] == 'LIDAR':
                    # Check LIDAR fields
                    assert_true(set(datum.keys()) == expected_lidar_fields)
                elif datum['datum_name'].startswith('CAMERA_'):
                    # CAMERA_01 should have intrinsics/extrinsics set
                    assert_true(datum['intrinsics'].shape == (3, 3))
                    assert_true(datum['extrinsics'].matrix.shape == (4, 4))
                    # Check CAMERA fields
                    assert_true(set(datum.keys()) == expected_camera_fields)
                else:
                    raise RuntimeError('Unexpected datum_name {}'.format(
                        datum['datum_name']))
示例#6
0
    def test_labeled_synchronized_scene_dataset(self):
        """Test synchronized scene dataset"""
        expected_camera_fields = set([
            'rgb',
            'timestamp',
            'datum_name',
            'pose',
            'intrinsics',
            'extrinsics',
            'bounding_box_2d',
            'bounding_box_3d',
            'depth',
            'datum_type',
        ])
        expected_lidar_fields = set([
            'point_cloud',
            'timestamp',
            'datum_name',
            'pose',
            'extrinsics',
            'bounding_box_2d',
            'bounding_box_3d',
            'extra_channels',
            'datum_type',
        ])
        expected_metadata_fields = set([
            'scene_index', 'sample_index_in_scene', 'log_id', 'timestamp',
            'scene_name', 'scene_description'
        ])

        # Initialize synchronized dataset with 2 datums
        scenes_dataset_json = os.path.join(self.DGP_TEST_DATASET_DIR,
                                           "test_scene",
                                           "scene_dataset_v1.0.json")
        dataset = SynchronizedSceneDataset(
            scenes_dataset_json,
            split='train',
            datum_names=['LIDAR', 'CAMERA_01'],
            forward_context=1,
            backward_context=1,
            generate_depth_from_datum='LIDAR',
            requested_annotations=("bounding_box_2d", "bounding_box_3d"))

        # There are only 3 samples in the train and val split.
        # With a forward and backward context of 1 each, the number of
        # items in the dataset with the desired context frames is 1.
        assert len(dataset) == 2

        # Iterate through labeled dataset and check expected fields
        assert dataset.calibration_table is not None
        for idx, item in enumerate(dataset):
            # Context size is 3 (forward + backward + reference)
            assert_true(len(item) == 3)

            # Check both datum and time-dimensions for expected fields
            im_size = None
            for t_item in item:
                # Two selected datums
                assert_true(len(t_item) == 2)
                for datum in t_item:
                    if datum['datum_name'] == 'LIDAR':
                        # LIDAR should have point_cloud set
                        assert_true(set(datum.keys()) == expected_lidar_fields)
                        assert_true(isinstance(datum, OrderedDict))
                    elif datum['datum_name'].startswith('CAMERA_'):
                        # CAMERA_01 should have intrinsics/extrinsics set
                        assert_true(isinstance(datum, OrderedDict))
                        assert_true(datum['intrinsics'].shape == (3, 3))
                        assert_true(isinstance(datum['extrinsics'], Pose))
                        assert_true(isinstance(datum['pose'], Pose))
                        # Check image sizes for context frames
                        assert_true(
                            set(datum.keys()) == expected_camera_fields)
                        if im_size is None:
                            im_size = datum['rgb'].size
                        assert_true(datum['rgb'].size == im_size)
                    else:
                        raise RuntimeError('Unexpected datum_name {}'.format(
                            datum['datum_name']))

            # Retrieve metadata about dataset item at index=idx
            metadata = dataset.get_scene_metadata(idx)
            assert_true(metadata.keys() == expected_metadata_fields)
示例#7
0
    def _test_labeled_dataset(dataset):
        expected_camera_fields = set([
            'rgb',
            'timestamp',
            'datum_name',
            'pose',
            'intrinsics',
            'extrinsics',
            'bounding_box_2d',
            'bounding_box_3d',
            'datum_type',
        ])
        expected_lidar_fields = set([
            'point_cloud',
            'timestamp',
            'datum_name',
            'pose',
            'extrinsics',
            'bounding_box_2d',
            'bounding_box_3d',
            'extra_channels',
            'datum_type',
        ])

        # Iterate through labeled dataset and check expected fields
        assert dataset.calibration_table is not None
        for _, item in enumerate(dataset):
            # Context size is 3 (forward + backward + reference)
            assert_true(len(item) == 3)

            # Check both datum and time-dimensions for expected fields
            im_size = None
            for t_item in item:
                # Four selected datums
                assert_true(len(t_item) == 4)
                for datum in t_item:
                    if datum['datum_name'] == 'LIDAR':
                        # LIDAR should have point_cloud set
                        assert_true(set(datum.keys()) == expected_lidar_fields)
                    elif datum['datum_name'].startswith('CAMERA_'):
                        # CAMERA_01 should have intrinsics/extrinsics set
                        assert_true(datum['intrinsics'].shape == (3, 3))
                        assert_true(datum['extrinsics'].matrix.shape == (4, 4))
                        # Check image sizes for context frames
                        assert_true(
                            set(datum.keys()) == expected_camera_fields)
                        if im_size is None:
                            im_size = datum['rgb'].size
                        assert_true(datum['rgb'].size == im_size)
                    else:
                        raise RuntimeError('Unexpected datum_name {}'.format(
                            datum['datum_name']))
示例#8
0
    def test_pose_utils(self):
        """Test pose class in dgp.utils.torch_extension.Pose and dgp.utils.torch_extension.QuaternionPose"""

        # Test pose transforms
        npposes = [
            NumpyPose(wxyz=pvec[:4], tvec=pvec[4:]) for pvec in self.pvecs
        ]
        poses = [Pose(tf) for tf in self.tfs]
        qposes = [QuaternionPose.from_matrix(tf) for tf in self.tfs]

        # Test matrix composition of transformations
        final_pose_np = functools.reduce(lambda x, y: x @ y,
                                         [tf.numpy() for tf in self.tfs])
        final_pose_torch = functools.reduce(lambda x, y: x @ y, self.tfs)
        assert_true(
            np.allclose(final_pose_np, final_pose_torch.numpy(), atol=1e-6))

        # Test Pose manifold composition of transformations
        final_pose_NumpyPose = functools.reduce(lambda x, y: x * y, npposes)
        final_pose_Pose = functools.reduce(lambda x, y: x * y, poses)
        final_pose_QuaternionPose = functools.reduce(lambda x, y: x * y,
                                                     qposes)
        assert_true(
            np.allclose(final_pose_np, final_pose_NumpyPose.matrix, atol=1e-6))
        assert_true(
            np.allclose(final_pose_np,
                        final_pose_Pose.matrix.numpy(),
                        atol=1e-6))
        assert_true(
            np.allclose(final_pose_np,
                        final_pose_QuaternionPose.matrix.numpy(),
                        atol=1e-6))

        def make_random_points(B=1, N=100):
            return torch.from_numpy(np.random.rand(B, 3, N)).type(torch.float)

        # Test single point cloud transformations for some implementations
        X = make_random_points()
        Xt_ = X[0].numpy()
        X_ = Xt_.T

        # Test point cloud transformations
        X1 = final_pose_Pose * X
        X2 = final_pose_QuaternionPose * X
        X3 = final_pose_NumpyPose * X_
        X4 = final_pose_np.dot(np.vstack([Xt_, np.ones((1, len(X_)))]))

        assert_true(np.allclose(X1.numpy(), X2.numpy(), atol=1e-6))
        assert_true(np.allclose(X1.squeeze().numpy().T, X3, atol=1e-6))
        assert_true(np.allclose(X1.squeeze().numpy(), X4[:3, :], atol=1e-6))
示例#9
0
    def test_labeled_synchronized_scene_dataset(self):
        """Test synchronized scene dataset"""
        expected_camera_fields = set([
            'rgb', 'timestamp', 'datum_name', 'pose', 'intrinsics',
            'extrinsics', 'bounding_box_2d', 'bounding_box_3d', 'class_ids',
            'instance_ids', 'depth'
        ])
        expected_lidar_fields = set([
            'point_cloud', 'timestamp', 'datum_name', 'pose', 'extrinsics',
            'bounding_box_3d', 'class_ids', 'instance_ids', 'extra_channels'
        ])
        expected_metadata_fields = set([
            'scene_index', 'sample_index_in_scene', 'log_id', 'timestamp',
            'scene_name', 'scene_description'
        ])

        # Initialize synchronized dataset with 2 datums
        scenes_dataset_json = os.path.join(self.DGP_TEST_DATASET_DIR,
                                           "test_scene",
                                           "scene_dataset_v1.0.json")
        dataset = SynchronizedSceneDataset(
            scenes_dataset_json,
            split='train',
            forward_context=1,
            backward_context=1,
            generate_depth_from_datum='LIDAR',
            requested_annotations=("bounding_box_2d", "bounding_box_3d"))
        dataset.select_datums(['LIDAR', 'CAMERA_01'])
        dataset.prefetch()

        # There are only 3 samples in the train and val split.
        # With a forward and backward context of 1 each, the number of
        # items in the dataset with the desired context frames is 1.
        assert len(dataset) == 2

        # Iterate through labeled dataset and check expected fields
        assert dataset.calibration_table is not None
        for idx, item in enumerate(dataset):
            # Context size is 3 (forward + backward + reference)
            assert_true(len(item) == 3)

            # Two selected datums
            for t_item in item:
                assert_true(len(t_item) == 2)

            # LIDAR should have point_cloud set
            for t_item in item:
                assert_true(set(t_item[0].keys()) == expected_lidar_fields)
                assert_true(isinstance(t_item[0], OrderedDict))

            # CAMERA_01 should have intrinsics/extrinsics set
            im_size = None
            for t_item in item:
                assert_true(isinstance(t_item[1], OrderedDict))
                assert_true(t_item[1]['intrinsics'].shape == (3, 3))
                assert_true(isinstance(t_item[1]['extrinsics'], Pose))
                assert_true(isinstance(t_item[1]['pose'], Pose))
                # Check image sizes for context frames
                assert_true(set(t_item[1].keys()) == expected_camera_fields)
                if im_size is None:
                    im_size = t_item[1]['rgb'].size
                assert_true(t_item[1]['rgb'].size == im_size)

            # Retrieve metadata about dataset item at index=idx
            metadata = dataset.get_scene_metadata(idx)
            assert_true(metadata.keys() == expected_metadata_fields)

        # Make sure you cannot select unavailable datums
        with assert_raises(AssertionError) as _:
            dataset.select_datums(['FAKE_LIDAR_NAME'])
示例#10
0
    def _test_labeled_dataset(dataset):
        expected_camera_fields = set([
            'rgb', 'timestamp', 'datum_name', 'pose', 'intrinsics',
            'extrinsics', 'bounding_box_2d', 'bounding_box_3d', 'class_ids',
            'instance_ids'
        ])
        expected_lidar_fields = set([
            'point_cloud', 'timestamp', 'datum_name', 'pose', 'extrinsics',
            'bounding_box_3d', 'class_ids', 'instance_ids', 'extra_channels'
        ])

        # Iterate through labeled dataset and check expected fields
        assert dataset.calibration_table is not None
        for _, item in enumerate(dataset):
            # Context size is 3 (forward + backward + reference)
            assert_true(len(item) == 3)

            # Check both datum and time-dimensions for expected fields
            for t_item in item:
                # Four selected datums
                assert_true(len(t_item) == 4)

            # LIDAR should have point_cloud set
            for t_item in item:
                assert_true(set(t_item[0].keys()) == expected_lidar_fields)

            # CAMERA_01 should have intrinsics/extrinsics set
            im_size = None
            for t_item in item:
                assert_true(t_item[1]['intrinsics'].shape == (3, 3))
                assert_true(t_item[1]['extrinsics'].matrix.shape == (4, 4))
                # Check image sizes for context frames
                assert_true(set(t_item[1].keys()) == expected_camera_fields)
                if im_size is None:
                    im_size = t_item[1]['rgb'].size
                assert_true(t_item[1]['rgb'].size == im_size)