コード例 #1
0
    def _anno_to_2d_bbox(self, anno, pc_file, cam_front, lidar_top,
                         ego_pose_cam, ego_pose_lidar, cam_intrinsic):
        # Make pixel indexes 0-based
        dists = []
        nusc_box = self.nusc.get_box(anno['token'])

        # Move them to the ego-pose frame.
        nusc_box.translate(-np.array(ego_pose_cam['translation']))
        nusc_box.rotate(Quaternion(ego_pose_cam['rotation']).inverse)

        # Move them to the calibrated sensor frame.
        nusc_box.translate(-np.array(cam_front['translation']))
        nusc_box.rotate(Quaternion(cam_front['rotation']).inverse)

        dists.append(np.linalg.norm(nusc_box.center))
        # Filter out the corners that are not in front of the calibrated sensor.
        #Corners is a 3x8 matrix, first four corners are the ones facing forward, last 4 are ons facing backward
        #(0,1) top, forward
        #(2,3) bottom, forward
        #(4,5) top, backward
        #(6,7) bottom, backward
        corners_3d = nusc_box.corners()
        #Getting first 4 values of Z
        dists.append(np.mean(corners_3d[2, :4]))
        # z is height of object for ego pose or lidar
        # y is height of object for camera frame
        #TODO: Discover why this is taking the Z axis
        in_front = np.argwhere(corners_3d[2, :] > 0).flatten()
        corners_3d = corners_3d[:, in_front]
        #print(corners_3d)
        #above    = np.argwhere(corners_3d[2, :] > 0).flatten()
        #corners_3d = corners_3d[:, above]
        # Project 3d box to 2d.
        corner_coords = view_points(corners_3d, cam_intrinsic,
                                    True).T[:, :2].tolist()
        #print(corner_coords)
        # Keep only corners that fall within the image.

        polygon_from_2d_box = MultiPoint(corner_coords).convex_hull
        img_canvas = box(0, 0, self._imwidth - 1, self._imheight - 1)

        if polygon_from_2d_box.intersects(img_canvas):
            img_intersection = polygon_from_2d_box.intersection(img_canvas)
            intersection_coords = np.array(
                [coord for coord in img_intersection.exterior.coords])

            min_x = min(intersection_coords[:, 0])
            min_y = min(intersection_coords[:, 1])
            max_x = max(intersection_coords[:, 0])
            max_y = max(intersection_coords[:, 1])
            #print('contained pts {}'.format(contained_points))
            return [min_x, min_y, max_x, max_y], dists
        else:
            return None, dists
コード例 #2
0
ファイル: relative_cambridge.py プロジェクト: radaimi/RPNet
def get_relative_pose_np(r1, r2, t1, t2):
    
    r1, r2, t1, t2 = r1.astype('f4'), r2.astype('f4'), t1.astype('f4'), t2.astype('f4')
    pose = []
    for i in range(t1.shape[0]):
        
        rr = (Quaternion(r2[i]) * Quaternion(r1[i]).inverse).elements
        R2 = Quaternion(r2[i]).rotation_matrix
        new_c2 = - np.dot(R2, t2[i])
        rt = np.dot(R2, t1[i]) + new_c2   
        
        pose.append(np.concatenate((rt / np.linalg.norm(rt), rr)))
    return np.array(pose)
コード例 #3
0
    def test_x_yaw(self):
        omega_body = np.array([0.1, 0, 0])
        roll, pitch, yaw = [0, 0, 90]
        omega_ned = np.array([0, 0.1, 0])

        q = helper.degrees_to_quat_rotation(roll, pitch, yaw)
        T = helper.T(q)
        q_dot_ned = T @ omega_body

        py_quat = Quaternion(*q)
        py_quat_dot = Quaternion(*q_dot_ned)
        omega_ned_res = (2 * py_quat_dot * py_quat.conjugate).elements[1:]

        for a, b in zip(omega_ned, omega_ned_res):
            self.assertAlmostEqual(a, b)
コード例 #4
0
 def __init__(self):
     # State matrix
     # The first four are quaternions and the last three are gyro biases.
     self.x = np.zeros(shape=(State.Size, 1))
     # Uncertainitiy covariance matrix
     self.P = np.eye(State.Size)
     # State transition matrix
     self.F = np.zeros(shape=(State.Size, State.Size))
     # Measurement jacobian matrix
     self.H = np.zeros(shape=(Meas.Size, State.Size))
     # Measurement covariance matrix
     self.R = np.eye(Meas.Size)
     # Process noise covariance matrix
     self.Q = np.eye(State.Size)
     self.initializeMatrices()
     # IMU rotation
     self.imuRotation = Quaternion(axis=[1., 0., 0.], degrees=180.0).rotation_matrix
     # Initialize node
     self.ekfNodeHandle = rospy.init_node('ekf_estimator')
     # Subscribers
     # We will be listensing on IMU topic
     self.imuSubscriber = rospy.Subscriber('imu/data_raw', Imu, self.omImuMessageReceived)
     # Sort of treated as a ground truth for comparison of accuracy. 
     # You would need imu_tools package.
     self.magdwickFilterSub = rospy.Subscriber('imu/data', Imu, self.onMagdwickOutputReceived)
     # Publisher
     self.cubePublisher = rospy.Publisher('cubevis', Marker, queue_size=10)
     self.posePublisher = rospy.Publisher('pose', PoseStamped, queue_size=10)
     # Magdwick
     self.magdwickOutPublisher = rospy.Publisher('magdwick', PoseStamped, queue_size=10)
     self.cubeMagdwickPublisher = rospy.Publisher('cubeMadgwick', Marker, queue_size=10)
     self.timeStamp = None
     rospy.spin()
コード例 #5
0
def draw_obj(obj_config, obj_cat, ax):
    for i in range(len(obj_config)):
        T3, T2, T1, D, W, H, R10, R11, R12, R13, R14, R15, dx, dy, dz, R20, R21, R22, R23, R24, R25 = obj_config[
            i]

        R = rotation_matrix(R10, R11, R12, R13, R14, R15)
        R2 = rotation_matrix(R20, R21, R22, R23, R24, R25)
        R = Q._from_matrix(R2) * Q._from_matrix(R)
        verts = np.array([[T3, T2, T1], [T3, T2, H + T1], [T3, W + T2, T1],
                          [T3, W + T2, H + T1], [D + T3, T2, T1],
                          [D + T3, T2, H + T1], [D + T3, W + T2, T1],
                          [D + T3, W + T2, H + T1]])
        verts = np.matmul(R.rotation_matrix, verts.T).T
        verts += np.array([[dx, dy, dz]])
        edges = [[0, 1, 3, 2, 0], [4, 5, 7, 6, 4], [0, 4], [1, 5], [2, 6],
                 [3, 7]]

        for e in edges:
            ax.plot(verts[e, 0], verts[e, 1], verts[e, 2], color='red')
コード例 #6
0
def get_frame_data( data ):
    """ -------------------------------------------------------------------------------------------------------------
    get the necessary data for a given sample from the nuScenes database
    see https://www.nuscenes.org/data-format for the various dictionary keys used here

    data:           [dir] nuScenes dictionary of a sweep data

    return:         [tuple] <cam_trans> <cam_rot> <cam_model> <ego_trans> <ego_rot>

    ------------------------------------------------------------------------------------------------------------- """
    cam_token   = data[ 'calibrated_sensor_token' ]                 # camera data token
    cam_data    = nu.get( 'calibrated_sensor', cam_token )          # data related with the camera in this frame
    cam_trans   = np.array( cam_data[ 'translation' ] )             # camera 3D translation
    cam_rot     = Quaternion( cam_data[ 'rotation' ] )              # camera 3D rotation
    cam_model   = np.array( cam_data[ 'camera_intrinsic' ] )        # camera intrinsic model
    ego_token   = data[ 'ego_pose_token' ]                          # ego pose token
    ego_pose    = nu.get( 'ego_pose', ego_token )                   # ego pose during this frame
    ego_trans   = np.array( ego_pose[ 'translation' ] )             # ego 3D translation
    ego_rot     = Quaternion( ego_pose[ 'rotation' ] )              # ego 3D rotation

    return cam_trans, cam_rot, cam_model, ego_trans, ego_rot
コード例 #7
0
 def publishVisualizationMarker(self):
     # Visualization marker
     marker = Marker()
     marker.header.frame_id = "odom"
     marker.header.stamp = rospy.Time()
     marker.type = Marker.CUBE
     marker.action = Marker.ADD
     # Position
     marker.pose.position.x = 1.
     marker.pose.position.y = 1.
     marker.pose.position.z = 1.
     # Orientation
     flip = Quaternion([0., 0., 0., 1.])
     # markerOrient = flip * Quaternion(self.x[0:4, 0])
     markerOrient = Quaternion(self.x[0:4, 0])
     marker.pose.orientation.w = markerOrient[0]
     marker.pose.orientation.x = markerOrient[1]
     marker.pose.orientation.y = markerOrient[2]
     marker.pose.orientation.z = markerOrient[3]
     marker.scale.x = 0.7
     marker.scale.y = 1.
     marker.scale.z = 0.1
     marker.color.a = 1.0
     marker.color.r = 0.0
     marker.color.g = 1.0
     marker.color.b = 0.0
     # Publish the axes marker
     self.cubePublisher.publish(marker)
     # Pose 
     poseMsg = PoseStamped()
     poseMsg.header.frame_id = "odom"
     poseMsg.pose.position.x = 1.
     poseMsg.pose.position.y = 1.
     poseMsg.pose.position.z = 1.
     # Orientation
     poseMsg.pose.orientation.w = markerOrient[0]
     poseMsg.pose.orientation.x = markerOrient[1]
     poseMsg.pose.orientation.y = markerOrient[2]
     poseMsg.pose.orientation.z = markerOrient[3]
     self.posePublisher.publish(poseMsg)
コード例 #8
0
 def predict(self, gyro, dt):
     # Using euler integration
     # If you want to use the more inaccurate euler integration instead of integrating
     # quaternion using the axis angle representation uncomment this line. Make sure that
     # the exponential integration is deactivated.
     # self.useEulerIntegration(gyro, dt)
     unbiasedGyro = np.subtract(gyro, self.x[State.BX:])
     # Quaternion integration
     # Integrate quaternion using the axis angle representation
     rotationVec = unbiasedGyro * dt
     angle = np.linalg.norm(rotationVec)
     if not np.isclose(angle, 0):
         quat = Quaternion(axis=rotationVec, angle=angle)
     else:
         quat = Quaternion([1., 0., 0., 0.])
     result = Quaternion(self.x[State.QW:State.BX, 0]) * quat
     self.x[State.QW:State.BX, 0] = result.normalised.elements
     if(self.x[0, 0] < 0):
         self.x[0, 0] = -self.x[0, 0]
         self.x[1, 0] = -self.x[1, 0]
         self.x[2, 0] = -self.x[2, 0]
         self.x[3, 0] = -self.x[3, 0]
     self.computeStateTransitionMatrix(gyro, dt)
コード例 #9
0
 def onMagdwickOutputReceived(self, imuData:Imu):
     """
     Publish a cube and pose marker to visualize the magdwick algorithm
     result
     """
     # Visualization marker
     marker = Marker()
     marker.header.frame_id = "odom"
     marker.header.stamp = rospy.Time()
     marker.type = Marker.CUBE
     marker.action = Marker.ADD
     # Position
     marker.pose.position.x = 0.
     marker.pose.position.y = 0.
     marker.pose.position.z = 0.
     # Orientation
     flip = Quaternion([0., 0., 0., 1.])
     # markerOrient = flip * Quaternion(self.x[0:4, 0])
     marker.pose.orientation.w = imuData.orientation.w
     marker.pose.orientation.x = imuData.orientation.x
     marker.pose.orientation.y = imuData.orientation.y
     marker.pose.orientation.z = imuData.orientation.z
     marker.scale.x = 0.7
     marker.scale.y = 1.
     marker.scale.z = 0.1
     marker.color.a = 1.0
     marker.color.r = 0.0
     marker.color.g = 1.0
     marker.color.b = 0.0
     # Publish the axes marker
     self.cubeMagdwickPublisher.publish(marker)
     # Pose 
     poseMsg = PoseStamped()
     poseMsg.header.frame_id = "odom"
     poseMsg.pose.position.x = 0.
     poseMsg.pose.position.y = 0.
     poseMsg.pose.position.z = 0.
     # Orientation
     poseMsg.pose.orientation.w = imuData.orientation.w
     poseMsg.pose.orientation.x = imuData.orientation.x
     poseMsg.pose.orientation.y = imuData.orientation.y
     poseMsg.pose.orientation.z = imuData.orientation.z
     self.magdwickOutPublisher.publish(poseMsg)
コード例 #10
0
    def _rel_pose_after_rotation(self, each_pose):
        assert each_pose.shape[0] == 14
        
        rotat0 = Quaternion(axis=[0, 0, 1], degrees=0)
        rotat1 = Quaternion(axis=[0, 0, 1], degrees=-90)
        rotat2 = Quaternion(axis=[0, 0, 1], degrees=-180)
        rotat3 = Quaternion(axis=[0, 0, 1], degrees=-270)
        
        rotats = [rotat0, rotat1, rotat2, rotat3]
        
        q1 = Quaternion(each_pose[3:7])
        q2 = Quaternion(each_pose[10:14])
        t1 = each_pose[:3]
        t2 = each_pose[7:10]
        relative_rotation, relative_translation = [], []

        pose1 = []
        pose2 = []
        for i in range(4):
            new_q1 =  rotats[i] * q1
            pose1.append(np.concatenate((t1, new_q1.elements)))
            for j in range(4):
                new_q2 = rotats[j] * q2

                if i == 0:
                    pose2.append(np.concatenate((t2, new_q2.elements)))
                
                relative_rotation.append((new_q2 * new_q1.inverse).elements)
                relative_translation.append(self.get_relative_T_in_cam2_ref(new_q2.rotation_matrix, t1, t2))

        relative_rotation = np.array(relative_rotation)
        relative_translation = np.array(relative_translation)
        
        pose1 = np.array(pose1)
        pose2 = np.array(pose2)
        
        assert pose1.shape[0] == pose2.shape[0] == 4 and pose2.shape[1] == pose1.shape[1] == 7

        rel_pose = np.hstack((relative_translation, relative_rotation)).reshape((16, 7))
        rel_pose = np.vstack((rel_pose, pose1, pose2, each_pose.reshape((2, 7)) ))    
        return rel_pose.reshape((1, 26, 7))
コード例 #11
0
def convert_bvh_to_quaternion(bvh_q) -> Quaternion:
    return Quaternion(w=bvh_q[0], x=bvh_q[1], y=bvh_q[2], z=bvh_q[3])
コード例 #12
0
def get_relative_pose(idx):
    img1, img2 = annot[idx, 0], annot[idx, 8]

    f1 = txt['/'.join(img1.split('/')[-2:]).replace('.png', '.jpg')][0]
    f2 = txt['/'.join(img2.split('/')[-2:]).replace('.png', '.jpg')][0]

    im1 = cv2.imread(img1, 0)  # @UndefinedVariable
    im2 = cv2.imread(img2, 0)  # @UndefinedVariable

    assert im1.shape[0] < im1.shape[1]

    sift = cv2.xfeatures2d.SIFT_create()  # @UndefinedVariable

    kp1, des1 = sift.detectAndCompute(im1, None)
    kp2, des2 = sift.detectAndCompute(im2, None)

    # create BFMatcher object
    bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)  # @UndefinedVariable

    # Match descriptors.
    try:
        matches = bf.match(des1, des2)
    except cv2.error:  # @UndefinedVariable
        return np.array([0.333, 0.333, 0.3334, 0.25, 0.25, 0.25, 0.25], 'f4')

    # Sort them in the order of their distance.
    matches = sorted(matches, key=lambda x: x.distance)[:100]

    # Draw first 10 matches.
    #     draw_params = dict(#matchColor = (0,255,0), # draw matches in green color
    #                            singlePointColor = None,
    #                             flags = 2)
    #     img3 = cv2.drawMatches(im1, kp1, im2, kp2, matches[:20], None, **draw_params)
    #     plt.figure(figsize=(20,20))
    #     plt.imshow(img3)
    #     plt.show()

    mat1 = get_cameraMatrix(f1, cx=1920, cy=1080)
    mat2 = get_cameraMatrix(f2, cx=1920, cy=1080)

    src = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 2)
    dst = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 2)

    src = normalize_point(src, mat1)
    dst = normalize_point(dst, mat2)

    src = src[:, :2]
    dst = dst[:, :2]

    tmp_results = []

    for _threshold in [1e-2, 1e-3, 1e-4]:

        try:
            E, mask = cv2.findEssentialMat(
                np.copy(src),
                np.copy(dst),
                cameraMatrix=np.identity(3, 'f4'),
                threshold=_threshold)  # @UndefinedVariable
            _, R, t, mask, _ = cv2.recoverPose(
                E,
                np.copy(src),
                np.copy(dst),
                mask=mask,
                cameraMatrix=np.identity(3, 'f4'),
                distanceThresh=50)  # @UndefinedVariable
        except cv2.error:  # @UndefinedVariable
            tmp_results.append(
                np.array([0.333, 0.333, 0.3334, 0.25, 0.25, 0.25, 0.25], 'f4'))
            continue
        tmp_results.append(
            np.concatenate((t.flatten(), np.array(
                Quaternion(matrix=R).elements).astype('f4').flatten())))

    return np.array(tmp_results)
コード例 #13
0
def get_2d_boxes(sample_data_token: str,
                 visibilities: List[str]) -> List[OrderedDict]:
    """
    Get the 2D annotation records for a given `sample_data_token`.
    :param sample_data_token: Sample data token belonging to a camera keyframe.
    :param visibilities: Visibility filter.
    :return: List of 2D annotation record that belongs to the input `sample_data_token`
    """

    # Get the sample data and the sample corresponding to that sample data.
    sd_rec = nusc.get('sample_data', sample_data_token)

    assert sd_rec[
        'sensor_modality'] == 'camera', 'Error: get_2d_boxes only works for camera sample_data!'
    if not sd_rec['is_key_frame']:
        raise ValueError(
            'The 2D re-projections are available only for keyframes.')

    s_rec = nusc.get('sample', sd_rec['sample_token'])

    # Get the calibrated sensor and ego pose record to get the transformation matrices.
    cs_rec = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token'])
    pose_rec = nusc.get('ego_pose', sd_rec['ego_pose_token'])
    camera_intrinsic = np.array(cs_rec['camera_intrinsic'])

    # Get all the annotation with the specified visibilties.
    ann_recs = [
        nusc.get('sample_annotation', token) for token in s_rec['anns']
    ]
    ann_recs = [
        ann_rec for ann_rec in ann_recs
        if (ann_rec['visibility_token'] in visibilities)
    ]

    repro_recs = []

    for ann_rec in ann_recs:
        # Augment sample_annotation with token information.
        ann_rec['sample_annotation_token'] = ann_rec['token']
        ann_rec['sample_data_token'] = sample_data_token

        # Get the box in global coordinates.
        box = nusc.get_box(ann_rec['token'])

        # Move them to the ego-pose frame.
        box.translate(-np.array(pose_rec['translation']))
        box.rotate(Quaternion(pose_rec['rotation']).inverse)

        # Move them to the calibrated sensor frame.
        box.translate(-np.array(cs_rec['translation']))
        box.rotate(Quaternion(cs_rec['rotation']).inverse)

        # Filter out the corners that are not in front of the calibrated sensor.
        corners_3d = box.corners()
        in_front = np.argwhere(corners_3d[2, :] > 0).flatten()
        corners_3d = corners_3d[:, in_front]

        # Project 3d box to 2d.
        corner_coords = view_points(corners_3d, camera_intrinsic,
                                    True).T[:, :2].tolist()

        # Keep only corners that fall within the image.
        final_coords = post_process_coords(corner_coords)

        # Skip if the convex hull of the re-projected corners does not intersect the image canvas.
        if final_coords is None:
            continue
        else:
            min_x, min_y, max_x, max_y = final_coords

        # Generate dictionary record to be included in the .json file.
        repro_rec = generate_record(ann_rec, min_x, min_y, max_x, max_y,
                                    sample_data_token, sd_rec['filename'])
        repro_recs.append(repro_rec)

    return repro_recs
コード例 #14
0
ファイル: relative_cambridge.py プロジェクト: radaimi/RPNet
def get_relative_pose(idx):
    img1, img2 = annot[idx, 0], annot[idx, 8]

    f1 = txt['/'.join(img1.split('/')[-2:]).replace('.png', '.jpg')][0]
    f2 = txt['/'.join(img2.split('/')[-2:]).replace('.png', '.jpg')][0]
    
    im1 = images[idx, 0]
    im2 = images[idx, 1]

    assert im1.shape[0] < im1.shape[1]

    if center_crop:
        im1 = im1[16:240, 116:340]
        im2 = im2[16:240, 116:340]
    
    sift = cv2.xfeatures2d.SIFT_create()  # @UndefinedVariable
    
    kp1, des1 = sift.detectAndCompute(im1, None)
    kp2, des2 = sift.detectAndCompute(im2, None)
    
    # create BFMatcher object
    bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)  # @UndefinedVariable

    # Match descriptors.
    try:
        matches = bf.match(des1, des2)
    except cv2.error:  # @UndefinedVariable
        return np.array([0.333, 0.333, 0.3334, 0.25, 0.25, 0.25, 0.25], 'f4')


    # Sort them in the order of their distance.
    matches = sorted(matches, key = lambda x:x.distance)[:100]

    # Draw first 10 matches.
#     draw_params = dict(#matchColor = (0,255,0), # draw matches in green color
#                            singlePointColor = None,
#                             flags = 2)
#     img3 = cv2.drawMatches(im1, kp1, im2, kp2, matches[:20], None, **draw_params)
#     plt.figure(figsize=(20,20))
#     plt.imshow(img3)
#     plt.show()
    if center_crop:
        mat1 = get_cameraMatrix(f1, cx = 224-0.5, cy = 224)
        mat2 = get_cameraMatrix(f2, cx = 224-0.5, cy = 224)        
    else:
        mat1 = get_cameraMatrix(f1, cx = 455, cy = 256)
        mat2 = get_cameraMatrix(f2, cx = 455, cy = 256)
    
    src = np.float32([ kp1[m.queryIdx].pt for m in matches ]).reshape(-1,2)
    dst = np.float32([ kp2[m.trainIdx].pt for m in matches ]).reshape(-1,2)
    
    src = normalize_point(src, mat1)
    dst = normalize_point(dst, mat2)
    
    src = src[:, :2]
    dst = dst[:, :2]
    
    try:
        E, mask = cv2.findEssentialMat(src, dst, cameraMatrix = np.identity(3, 'f4'), threshold = _threshold)  # @UndefinedVariable

        _, R, t, mask, _ = cv2.recoverPose(E, src, dst, mask = mask, cameraMatrix = np.identity(3, 'f4'), distanceThresh = 50)  # @UndefinedVariable
    except cv2.error:  # @UndefinedVariable
        return np.array([0.333, 0.333, 0.3334, 0.25, 0.25, 0.25, 0.25], 'f4')

#     matchesMask = mask.ravel().tolist()
#     draw_params = dict(#matchColor = (0,255,0), # draw matches in green color
#                            singlePointColor = None,
#                            matchesMask = matchesMask, # draw only inliers
#                            flags = 2)
#     img3 = cv2.drawMatches(im1, kp1, im2, kp2, matches, None, **draw_params)
#     plt.figure(figsize=(20,20))
#     plt.imshow(img3)
#     plt.show()

    return np.concatenate((t.flatten(), np.array(Quaternion(matrix=R).elements).astype('f4').flatten()))
コード例 #15
0
def convert_xyz_euler_to_quaternion(euler) -> Quaternion:
    q = p.getQuaternionFromEuler(euler)
    return Quaternion(x=q[1], y=q[2], z=q[3], w=q[0])
コード例 #16
0
LeftToeBase = 7  # type= JOINT_SPHERICAL
Spine = 8  # type= JOINT_FIXED
Spine1 = 9  # type= JOINT_SPHERICAL
Spine2 = 10  # type= JOINT_SPHERICAL
Neck = 11  # type= JOINT_SPHERICAL
Head = 12  # type= JOINT_SPHERICAL
RightShoulder = 13  # type= JOINT_SPHERICAL
RightArm = 14  # type= JOINT_SPHERICAL
RightForeArm = 15  # type= JOINT_SPHERICAL
RightHand = 16  # type= JOINT_FIXED
LeftShoulder = 17  # type= JOINT_SPHERICAL
LeftArm = 18  # type= JOINT_SPHERICAL
LeftForeArm = 19  # type= JOINT_SPHERICAL
LeftHand = 20  # type= JOINT_FIXED

LegOrientation = Quaternion(w=0, x=-0.707, y=0, z=0.707)  # x -90 z 180

FootOrientation = Quaternion(w=0.422618, x=-0.906308, y=0, z=0)  # x -130
ToeBaseOrientation = Quaternion(w=0.707, x=-0.707, y=0, z=0)  # x -90
SpineOrientation = Quaternion(w=0.999048, x=0.043619, y=0, z=0)  # x 5
Spine1Orientation = Quaternion(w=0.999048, x=0.043619, y=0, z=0)  # x 5
Spine2Orientation = Quaternion(w=0.997564, x=0.069756, y=0, z=0)  # x 8
NeckOrientation = Quaternion(w=0.996195, x=-0.087156, y=0, z=0)  # x -10
HeadOrientation = Quaternion(w=0.92388, x=-0.382683, y=0, z=0)  # x -45

# RightShoulderOrientation = Quaternion(w=-0.5, x=0.5, y=-0.5, z=0.5)   # x -90 z 270
# RightArmOrientation = Quaternion(w=-0.5, x=0.5, y=-0.5, z=0.5)
# RightForeArmOrientation = Quaternion(w=-0.5, x=0.5, y=-0.5, z=0.5)
# RightHandOrientation = Quaternion(w=-0.5, x=0.5, y=-0.5, z=0.5)
# LeftShoulderOrientation = Quaternion(w=0.5, x=-0.5, y=-0.5, z=0.5)   # x -90 z 90
# LeftArmOrientation = Quaternion(w=0.5, x=-0.5, y=-0.5, z=0.5)
コード例 #17
0
            est_pose = []
            for each_annot in annot:
                name1, name2 = '/'.join(
                    each_annot[0].split('/')[-2:]), '/'.join(
                        each_annot[8].split('/')[-2:])

                pose1 = abspose[name1.replace('relative_cambridge',
                                              'absolute_cambridge')]
                pose2 = abspose[name2.replace('relative_cambridge',
                                              'absolute_cambridge')]

                q1, q2 = pose1[3:], pose2[3:]
                t1, t2 = pose1[:3], pose2[:3]

                rr = (Quaternion(q2) * Quaternion(q1).inverse).normalised

                R2 = Quaternion(q2).rotation_matrix
                new_c2 = -np.dot(R2, t2)
                rt = np.dot(R2, t1) + new_c2
                est_pose.append(np.concatenate((rt, rr.elements)))

            est_pose = np.array(est_pose)

            tm = np.linalg.norm(est_pose[:, :3] - gt_pose[:, :3], axis=1)
            d = np.abs(np.sum(est_pose[:, 3:] * gt_pose[:, 3:], axis=1))
            np.putmask(d, d > 1, 1)
            np.putmask(d, d < -1, -1)
            rd = 2 * np.arccos(d) * 180 / math.pi

            est_pose[:, :3] = est_pose[:, :3] / np.linalg.norm(
コード例 #18
0
def get_2d_boxes(sample_data_token: str, visibilities: List[str], mode: str) -> List[OrderedDict]:
    """
    Get the 2D annotation records for a given `sample_data_token`.
    :param sample_data_token: Sample data token belonging to a camera keyframe.
    :param visibilities: Visibility filter.
    :param mode: 'xywh' or 'xyxy'
    :return: List of 2D annotation record that belongs to the input `sample_data_token`
    """

    # Get the sample data and the sample corresponding to that sample data.
    sd_rec = nusc.get('sample_data', sample_data_token)

    assert sd_rec['sensor_modality'] == 'camera', 'Error: get_2d_boxes only works for camera sample_data!'
    if not sd_rec['is_key_frame']:
        raise ValueError('The 2D re-projections are available only for keyframes.')

    s_rec = nusc.get('sample', sd_rec['sample_token'])

    # Get the calibrated sensor and ego pose record to get the transformation matrices.
    cs_rec = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token'])
    pose_rec = nusc.get('ego_pose', sd_rec['ego_pose_token'])
    camera_intrinsic = np.array(cs_rec['camera_intrinsic'])

    # Get all the annotation with the specified visibilties.
    ann_recs = [nusc.get('sample_annotation', token) for token in s_rec['anns']]
    ann_recs = [ann_rec for ann_rec in ann_recs if (ann_rec['visibility_token'] in visibilities)]

    sd_annotations = []

    for ann_rec in ann_recs:
        # Get the 3D box in global coordinates.
        box = nusc.get_box(ann_rec['token'])

        # Calculate distance from vehicle to box
        ego_translation = (box.center[0] - pose_rec['translation'][0],
                           box.center[1] - pose_rec['translation'][1],
                           box.center[2] - pose_rec['translation'][2])
        ego_dist = np.sqrt(np.sum(np.array(ego_translation[:2]) ** 2))
        dist = round(ego_dist,2)

        # Move them to the ego-pose frame.
        box.translate(-np.array(pose_rec['translation']))
        box.rotate(Quaternion(pose_rec['rotation']).inverse)

        # Move them to the calibrated sensor frame.
        box.translate(-np.array(cs_rec['translation']))
        box.rotate(Quaternion(cs_rec['rotation']).inverse)

        # Filter out the corners that are not in front of the calibrated sensor.
        corners_3d = box.corners()
        in_front = np.argwhere(corners_3d[2, :] > 0).flatten()
        corners_3d = corners_3d[:, in_front]

        # Project 3d box to 2d.
        corner_coords = view_points(corners_3d, camera_intrinsic, True).T[:, :2].tolist()

        # Keep only corners that fall within the image.
        final_coords = post_process_coords(corner_coords)

        # Skip if the convex hull of the re-projected corners does not intersect the image canvas.
        if final_coords is None:
            continue

        min_x, min_y, max_x, max_y = final_coords
        if mode == 'xywh':
            bbox = [min_x, min_y, max_x-min_x, max_y-min_y]
        else:
            bbox = [min_x, min_y, max_x, max_y]
        
        ## Generate 2D record to be included in the .json file.
        ann_2d = {}
        ann_2d['sample_data_token'] = sample_data_token
        ann_2d['bbox'] = np.around(bbox, 2).tolist()
        ann_2d['distance'] = dist
        ann_2d['category_name'] = ann_rec['category_name']
        ann_2d['num_lidar_pts'] = ann_rec['num_lidar_pts']
        ann_2d['num_radar_pts'] = ann_rec['num_radar_pts']
        ann_2d['visibility_token'] = ann_rec['visibility_token']

        sd_annotations.append(ann_2d)

    return sd_annotations
コード例 #19
0
def change_of_basis(orientation: Quaternion,
                    conventions: Quaternion) -> Quaternion:
    return conventions.__mul__(orientation).__mul__(conventions.inverse)
コード例 #20
0
    def get_2d_boxes(self, index: int, visibilities: List[str]) -> List[List]:
        """
        Get the 2D annotation records for a given `sample_data_token`.
        :param sample_data_token: Sample data token belonging to a camera keyframe.
        :param visibilities: Visibility filter.
        :return: [catagory, min_x, min_y, max_x, max_y],format:xyxy,absolute_coor_value
        List of 2D annotation record that belongs to the input `sample_data_token`

        """
        # Gettign data from nuscenes database
        sample_token = self.sample_tokens[index]
        sample = self.nusc.get('sample', sample_token)

        # Gettign sample_data_token from sensor['CAM_FRONT']
        sample_data_token = sample['data'][self.camera_sensors[0]]

        # Get the sample data and the sample corresponding to that sample data.
        sd_rec = self.nusc.get('sample_data', sample_data_token)

        assert sd_rec[
            'sensor_modality'] == 'camera', 'Error: get_2d_boxes only works for camera sample_data!'
        if not sd_rec['is_key_frame']:
            raise ValueError(
                'The 2D re-projections are available only for keyframes.')

        s_rec = self.nusc.get('sample', sd_rec['sample_token'])

        # Get the calibrated sensor and ego pose record to get the transformation matrices.
        cs_rec = self.nusc.get('calibrated_sensor',
                               sd_rec['calibrated_sensor_token'])
        pose_rec = self.nusc.get('ego_pose', sd_rec['ego_pose_token'])
        camera_intrinsic = np.array(cs_rec['camera_intrinsic'])

        # Get all the annotation with the specified visibilties.
        ann_recs = [
            self.nusc.get('sample_annotation', token)
            for token in s_rec['anns']
        ]
        ann_recs = [
            ann_rec for ann_rec in ann_recs
            if (ann_rec['visibility_token'] in visibilities)
        ]

        repro_recs = []
        bboxes = []

        for ann_rec in ann_recs:
            # Augment sample_annotation with token information.
            ann_rec['sample_annotation_token'] = ann_rec['token']
            ann_rec['sample_data_token'] = sample_data_token

            # Get the box in global coordinates.
            box = self.nusc.get_box(ann_rec['token'])

            # Move them to the ego-pose frame.
            box.translate(-np.array(pose_rec['translation']))
            box.rotate(Quaternion(pose_rec['rotation']).inverse)

            # Move them to the calibrated sensor frame.
            box.translate(-np.array(cs_rec['translation']))
            box.rotate(Quaternion(cs_rec['rotation']).inverse)

            # Filter out the corners that are not in front of the calibrated sensor.
            corners_3d = box.corners()
            in_front = np.argwhere(corners_3d[2, :] > 0).flatten()
            corners_3d = corners_3d[:, in_front]

            # Project 3d box to 2d.
            corner_coords = view_points(corners_3d, camera_intrinsic,
                                        True).T[:, :2].tolist()

            # Keep only corners that fall within the image.
            final_coords = self.post_process_coords(corner_coords)

            # Skip if the convex hull of the re-projected corners does not intersect the image canvas.
            if final_coords is None:
                continue
            else:
                min_x, min_y, max_x, max_y = final_coords
                min_x = min_x / 1600 * self.image_max_side
                max_x = max_x / 1600 * self.image_max_side
                min_y = min_y / 900 * self.image_min_side
                max_y = max_y / 900 * self.image_min_side
                category = ann_rec['category_name']
                #这里进行了类别的过滤!
                if category in list(self.classes.keys()):
                    category_digit = self.classes[category]
                    bbox = [category_digit, min_x, min_y, max_x, max_y]
                    bboxes.append(bbox)
                else:
                    continue
        if bboxes == []:
            bboxes.append([-1, 0, 0, 0, 0])  #添加-1为无anno,在可视化或者训练时要跳过处理
            print(sample_token)
            print("有一帧没有gt_box!!!")
            return bboxes
        return bboxes
コード例 #21
0
 def normalizeQuaternion(self):
     """
     Normalize the quaternion in the state vector
     """
     self.x[State.QW:State.BX, 0] = Quaternion(self.x[State.QW:State.BX, 0]).normalised.elements
コード例 #22
0
def get_2d_boxes(sample_data_token: str,
                 visibilities: List[str]) -> List[OrderedDict]:
    """
    Get the 2D annotation records for a given `sample_data_token`.
    :param sample_data_token: Sample data token belonging to a keyframe.
    :param visibilities: Visibility filter.
    :return: List of 2D annotation record that belongs to the input `sample_data_token`
    """

    # Get the sample data and the sample corresponding to that sample data.
    sd_rec = nusc.get('sample_data', sample_data_token)

    if not sd_rec['is_key_frame']:
        raise ValueError(
            'The 2D re-projections are available only for keyframes.')

    s_rec = nusc.get('sample', sd_rec['sample_token'])

    # Get the calibrated sensor and ego pose record to get the transformation matrices.
    cs_rec = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token'])
    pose_rec = nusc.get('ego_pose', sd_rec['ego_pose_token'])
    camera_intrinsic = np.array(cs_rec['camera_intrinsic'])

    # Get all the annotation with the specified visibilties.
    ann_recs = [
        nusc.get('sample_annotation', token) for token in s_rec['anns']
    ]
    ann_recs = [
        ann_rec for ann_rec in ann_recs
        if (ann_rec['visibility_token'] in visibilities)
    ]

    # Only for cars category
    ann_recs = [
        ann_rec for ann_rec in ann_recs
        if (ann_rec['category_name'] == "vehicle.car")
    ]

    #Determine token for parked attribute
    for att in nusc.attribute:
        if att['name'] == 'vehicle.parked':
            parked_token = att['token']
        if att['name'] == 'vehicle.stopped':
            stopped_token = att['token']

    # Only get the parked && stopped atrribute
    ann_recs = [
        ann_rec for ann_rec in ann_recs
        if (parked_token in ann_rec['attribute_tokens']
            or stopped_token in ann_rec['attribute_tokens'])
    ]

    repro_recs = [sd_rec['filename']]

    for ann_rec in ann_recs:

        if parked_token in ann_rec['attribute_tokens']:
            vehicle_state = 1
        elif stopped_token in ann_rec['attribute_tokens']:
            vehicle_state = 2

        ann_rec['sample_annotation_token'] = ann_rec['token']
        ann_rec['sample_data_token'] = sample_data_token

        box = nusc.get_box(ann_rec['token'])

        # Move them to the ego-pose frame.
        box.translate(-np.array(pose_rec['translation']))
        box.rotate(Quaternion(pose_rec['rotation']).inverse)

        # Move them to the calibrated sensor frame.
        box.translate(-np.array(cs_rec['translation']))
        box.rotate(Quaternion(cs_rec['rotation']).inverse)

        # Filter out the corners that are not in front of the calibrated sensor.
        corners_3d = box.corners()
        in_front = np.argwhere(corners_3d[2, :] > 0).flatten()
        corners_3d = corners_3d[:, in_front]

        # Project 3d box to 2d.
        corner_coords = view_points(corners_3d, camera_intrinsic,
                                    True).T[:, :2].tolist()

        # Keep only corners that fall within the image.
        final_coords = post_process_coords(corner_coords)

        # Skip if the convex hull of the re-projected corners does not intersect the image canvas.
        if final_coords is None:
            continue
        else:
            min_x, min_y, max_x, max_y = final_coords

        # Generate dictionary record to be included in the .json file.
        #repro_rec = generate_record(ann_rec, min_x, min_y, max_x, max_y, sample_data_token, sd_rec['filename'])

        repro_rec = [
            vehicle_state, (max_x + min_x) / (2 * 1600),
            (max_y + min_y) / (2 * 900), (max_x - min_x) / (1600),
            (max_y - min_y) / (900)
        ]
        repro_recs.append(repro_rec)
    if (len(repro_recs) == 1):
        return []
    return repro_recs
コード例 #23
0
ファイル: make_nusc_box_set.py プロジェクト: YiZhu89/3D-BBox
def get_2d_boxes(sample_data_token: str, visibilities: List[str],
                 dataroot: str, box_image_dir: str) -> List[OrderedDict]:
    """
    Get the 2D annotation records for a given `sample_data_token`.
    :param sample_data_token: Sample data token belonging to a camera keyframe.
    :param visibilities: Visibility filter.
    :return: List of 2D annotation record that belongs to the input `sample_data_token`
    """

    # Get the sample data and the sample corresponding to that sample data.
    sd_rec = nusc.get('sample_data', sample_data_token)
    s_rec = nusc.get('sample', sd_rec['sample_token'])

    # Get the calibrated sensor and ego pose record to get the transformation matrices.
    cs_rec = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token'])
    pose_rec = nusc.get('ego_pose', sd_rec['ego_pose_token'])
    camera_intrinsic = np.array(cs_rec['camera_intrinsic'])

    # Get all the annotation with the specified visibilties and in detection benchmark
    ann_recs = [
        nusc.get('sample_annotation', token) for token in s_rec['anns']
    ]
    ann_recs = [ann_rec for ann_rec in ann_recs if (ann_rec['visibility_token'] in visibilities and \
                                                    ann_rec['category_name'] in category2class.keys())]

    repro_recs = []
    scene_image = Image.open(os.path.join(dataroot, sd_rec['filename']))
    for i, ann_rec in enumerate(ann_recs):
        # Augment sample_annotation with token information.
        ann_rec['sample_annotation_token'] = ann_rec['token']
        ann_rec['sample_data_token'] = sample_data_token

        # Get the box in global coordinates.
        box = nusc.get_box(ann_rec['token'])

        # Move them to the ego-pose frame.
        box.translate(-np.array(pose_rec['translation']))
        box.rotate(Quaternion(pose_rec['rotation']).inverse)

        # Move them to the calibrated sensor frame.
        box.translate(-np.array(cs_rec['translation']))
        box.rotate(Quaternion(cs_rec['rotation']).inverse)

        # Filter out the corners that are not in front of the calibrated sensor.
        corners_3d = box.corners()
        in_front = np.argwhere(corners_3d[2, :] > 0).flatten()
        corners_3d = corners_3d[:, in_front]

        # Project 3d box to 2d.
        corner_coords = view_points(corners_3d, camera_intrinsic,
                                    True).T[:, :2].tolist()

        # Keep only corners that fall within the image.
        final_coords = post_process_coords(corner_coords)

        # Skip if the convex hull of the re-projected corners does not intersect the image canvas.
        if final_coords is None:
            continue
        else:
            min_x, min_y, max_x, max_y = final_coords
            if max_x - min_x < 25.0 or max_y - min_y < 25.0:
                continue

        # Generate dictionary record to be included in the .json file.
        repro_rec = generate_record(ann_rec, min_x, min_y, max_x, max_y,
                                    sample_data_token, sd_rec['filename'])

        # add dimensions, location
        repro_rec['dimensions'] = box.wlh.tolist()
        repro_rec['location'] = box.center.tolist()

        # add theta_l (approximate)
        theta_ray = np.arctan2(repro_rec['location'][2],
                               repro_rec['location'][0])
        front = box.orientation.rotate(np.array([1.0, 0.0, 0.0]))
        ry = -np.arctan2(front[2], front[0])
        theta_l = np.pi - theta_ray - ry
        if theta_l > np.pi:
            theta_l -= 2.0 * np.pi
        if theta_l < -np.pi:
            theta_l += 2.0 * np.pi
        repro_rec['theta_l'] = theta_l

        # save box image
        box_image = scene_image.resize((224, 224), box=final_coords)
        box_image_file = os.path.join(
            box_image_dir, 'camera__{}__{}.png'.format(sample_data_token, i))
        box_image.save(box_image_file)
        repro_rec['box_image_file'] = os.path.join(
            *box_image_file.split('/')[-4:])

        repro_recs.append(repro_rec)

    return repro_recs