Пример #1
0
 def test_transform_from(self):
     '''Test transform_from.'''
     t = Vector3(1, 1, 1)
     R = Rot3()
     T = Pose3(R, t).inverse()
     cloud = PointCloud(np.zeros((3, 1)))
     new_cloud = cloud.transform_from(T)
     np.testing.assert_array_equal(new_cloud.points(),
                                   [[-1.0], [-1.0], [-1.0]])
Пример #2
0
    def test_to_xml(self):
        '''
        Test conversion to xml.
        '''
        rot = Rot3(R=np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
        rot_xml = rot.to_xml()
        expected_xml = '<rotation><c1>1, 4, 7</c1><c2>2, 5, 8</c2><c3>3, 6, 9</c3>\
</rotation>'

        self.assertEqual(ET.tostring(rot_xml, encoding='unicode'), expected_xml)
Пример #3
0
 def test_register(self):
     ''' Test registering point clouds in different frames. '''
     t = Vector3(1, 1, 1)
     R = Rot3()
     T = Pose3(R, t).inverse()
     cloud = PointCloud(np.zeros((3, 1)))
     registred_cloud = PointCloud.register([(Pose3(), cloud), (T, cloud)])
     np.testing.assert_array_equal(
         registred_cloud.points(),
         np.column_stack([Vector3(0, 0, 0),
                          Vector3(-1.0, -1.0, -1.0)]))
Пример #4
0
    def test_ENU_camera_w_photosphere(self):
        # Make sure we agree with conventions at
        # https://developers.google.com/streetview/spherical-metadata
        pitch = 0.1
        roll = 0.1
        wRt = Rot3.ENU_camera(pitch=pitch, roll=roll)

        expected_wRr = Rot3.Rx(pitch) * Rot3.Ry(roll)  # from rotated to ENU
        # But our tilted frame uses the camera convention.
        # TODO: maybe it should not! Why do we do this?
        expected_wRt = Rot3(expected_wRr * ENU_R_CAMERA)
        np.testing.assert_array_almost_equal(
            wRt.matrix(), expected_wRt.matrix(), decimal=2
        )
Пример #5
0
    def test_from_xml(self):
        '''Conversion from xml'''

        # test common case
        s = '<rotation><c1>1, 4, 7</c1><c2>2, 5, 8</c2><c3>3, 6, 9</c3></rotation>'
        rot_xml = ET.fromstring(s)
        rot = Rot3.from_xml(rot_xml)
        expected_rot = Rot3(R=np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
        rot.assert_almost_equal(expected_rot)

        # test missing field
        s = '<rotation><c1>1, 4, 7</c1><c3>3, 6, 9</c3></rotation>'
        self.assertRaises(ValueError, Rot3.from_xml, ET.fromstring(s))

        # test incorrect field
        s = '<rotation><foobar>1, 4, 7</foobar><c2>2, 5, 8</c2><c3>3, 6, 9</c3>\
</rotation>'

        self.assertRaises(ValueError, Rot3.from_xml, ET.fromstring(s))
Пример #6
0
    def cylindrical_symmetric_rot(self, det_rot, gt_rot, axis):
        """
        Compute rot that zeroes out the rotation error between 
        <det_rot> and <gt_rot> along axis <axis>.

        Inputs
        det_rot (Rot3) - rotation for detection
        gt_rot (Rot3) - rotation for ground truth
        axis (int) - cylindrical rotation axis (0=x, 1=y, 2=z)

        Return:
        Rot3 - new rotation matrix with gt set to det rotation on target axis

        Algorithm: 
        1. convert to zyx Euler angles
        2. set gt to detection value for target axis
        3. convert back to matrix rep
        """
        det_euler = utils.matrix_to_euler(det_rot.R)
        gt_euler = utils.matrix_to_euler(gt_rot.R)
        # note e[0] is z
        det_euler[2 - axis] = gt_euler[2 - axis]
        return Rot3(utils.euler_to_matrix(det_euler))
Пример #7
0
 def setUp(self):
     # Camera with z-axis looking at world y-axis
     wRc = np.transpose(
         np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]], dtype=float)
     )
     self.rot = Rot3(wRc)
Пример #8
0
print('colored_category.category_id_to_rgb(133):\n',
      colored_category.category_id_to_rgb(133))
print('colored_category.category_name_to_rgb(\'shoes\'):\n',
      colored_category.category_name_to_rgb('shoes'))
print('colored_category.LUT:\n', colored_category.LUT)
lut = colored_category.LUT
print('lut.shape[0]: ', lut.shape[0], 'lut.shape[1]: ', lut.shape[1])

from sumo.base.vector import Vector2, Vector2f, Vector3, Vector3f, on_left, unitize
print('unitize(Vector3(100, 201, 50)): ', unitize(Vector3(100, 201, 50)))

# rot3_tests
from sumo.geometry.rot3 import Rot3, ENU_R_CAMERA
import numpy as np
wRc = np.transpose(np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]], dtype=float))
rot = Rot3(wRc)
print(rot.matrix())

# MultiImageTiff
from sumo.geometry.inverse_depth import depth_image_of_inverse_depth_map
from sumo.images.multi_image_tiff import MultiImageTiff, MultiImagePageType

tiff_path = parutil.get_file_path(
    '/mnt/lustre/sunjiankai/Dataset/sample_data/sumo-input/sumo-input.tif')
multi = MultiImageTiff.load(tiff_path)
print('multi.rgb.shape: ', multi.rgb.shape, 'multi.range.shape (Depth): ',
      multi.range.shape, 'multi.category.shape (Category): ',
      multi.category.shape, 'multi.instance.shape (Instance): ',
      multi.instance.shape)
# multi.rgb.shape:  (1024, 6144, 3) multi.range.shape:  (1024, 6144) multi.category.shape:  (1024, 6144) multi.instance.shape:  (1024, 6144)