Example #1
0
 def test_is_unit(self):
     q1 = Quaternion()
     q2 = Quaternion(1.0, 0, 0, 0.0001)
     self.assertTrue(q1.is_unit())
     self.assertFalse(q2.is_unit())
     self.assertTrue(q2.is_unit(0.001))
Example #2
0
def pointcloud_color_from_image(nusc: NuScenes, pointsensor_token: str,
                                camera_token: str):
    """
    Given a point sensor (lidar/radar) token and camera sample_data token, load point-cloud and map it to the image
    plane, then retrieve the colors of the closest image pixels.
    :param nusc: NuScenes instance.
    :param pointsensor_token: Lidar/radar sample_data token.
    :param camera_token: Camera sample data token.
    :return (coloring <np.float: 3, n>, mask <np.bool: m>). Returns the colors for n points that reproject into the
        image out of m total points. The mask indicates which points are selected.
    """

    cam = nusc.get('sample_data', camera_token)
    pointsensor = nusc.get('sample_data', pointsensor_token)

    pc = LidarPointCloud.from_file(
        osp.join(nusc.dataroot, pointsensor['filename']))
    im = Image.open(osp.join(nusc.dataroot, cam['filename']))

    # Points live in the point sensor frame. So they need to be transformed via global to the image plane.
    # First step: transform the point-cloud to the ego vehicle frame for the timestamp of the sweep.
    cs_record = nusc.get('calibrated_sensor',
                         pointsensor['calibrated_sensor_token'])
    pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
    pc.translate(np.array(cs_record['translation']))

    # Second step: transform to the global frame.
    poserecord = nusc.get('ego_pose', pointsensor['ego_pose_token'])
    pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
    pc.translate(np.array(poserecord['translation']))

    # Third step: transform into the ego vehicle frame for the timestamp of the image.
    poserecord = nusc.get('ego_pose', cam['ego_pose_token'])
    pc.translate(-np.array(poserecord['translation']))
    pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix.T)

    # Fourth step: transform into the camera.
    cs_record = nusc.get('calibrated_sensor', cam['calibrated_sensor_token'])
    pc.translate(-np.array(cs_record['translation']))
    pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix.T)

    # Fifth step: actually take a "picture" of the point cloud.
    # Grab the depths (camera frame z axis points away from the camera).
    depths = pc.points[2, :]

    # Take the actual picture (matrix multiplication with camera-matrix + renormalization).
    points = view_points(pc.points[:3, :],
                         np.array(cs_record['camera_intrinsic']),
                         normalize=True)

    # Remove points that are either outside or behind the camera. Leave a margin of 1 pixel for aesthetic reasons.
    mask = np.ones(depths.shape[0], dtype=bool)
    mask = np.logical_and(mask, depths > 0)
    mask = np.logical_and(mask, points[0, :] > 1)
    mask = np.logical_and(mask, points[0, :] < im.size[0] - 1)
    mask = np.logical_and(mask, points[1, :] > 1)
    mask = np.logical_and(mask, points[1, :] < im.size[1] - 1)
    points = points[:, mask]

    # Pick the colors of the points
    im_data = np.array(im)
    coloring = np.zeros(points.shape)
    for i, p in enumerate(points.transpose()):
        point = p[:2].round().astype(np.int32)
        coloring[:, i] = im_data[point[1], point[0], :]

    return coloring, mask
Example #3
0
    def generate_propagation_parameters(self):
        '''
        ---------------------------------------------
        generate_propagation_parameters()
        ---------------------------------------------
        
        This method finds the parameters to pass to the opencl kernel to correctly
        define the sampling grid. 

        '''
        # Find the unit normal vector to the plane.

        unit_normal = (1 /
                       np.linalg.norm(self.normal_vector)) * self.normal_vector

        # Define the quaternion required to rotate the parallel vector by an angle of pi/2.

        quaternion = Quaternion(
            axis=[unit_normal[0], unit_normal[1], unit_normal[2]],
            angle=tau / 4)

        # Rotate the parallel vector to get a new vector which is perpendicular to the normal and the parallel vector.

        self.vector_2 = quaternion.rotate(self.parallel_vector)

        # Find the norm of these vectors.

        parallel_vector_norm = np.linalg.norm(self.parallel_vector)
        vector_2_norm = np.linalg.norm(self.vector_2)

        # Find the unit vectors.

        unit_parallel_vector = (1 /
                                parallel_vector_norm) * self.parallel_vector

        unit_vector_2 = (1 / vector_2_norm) * self.vector_2

        # Compute the angle between these vectors to check that is equal to pi/2.

        angle_1 = int(
            np.round(
                np.rad2deg(
                    np.arccos(np.dot(unit_parallel_vector, unit_vector_2))),
                1))

        # Compute the angle between each of these vectors and the normal to ensure that they parameterise the plane perpendicular to the nromal.

        angle_2 = int(
            np.round(
                np.rad2deg(
                    np.arccos(
                        np.dot(self.parallel_vector, self.normal_vector) /
                        (parallel_vector_norm * vector_2_norm))), 1))
        angle_3 = int(
            np.round(
                np.rad2deg(
                    np.arccos(
                        np.dot(self.vector_2, self.normal_vector) /
                        (parallel_vector_norm * vector_2_norm))), 1))

        # Throw error if they are not.

        if np.bitwise_or(angle_1 != 90,
                         np.bitwise_or(int(angle_2) != 90,
                                       int(angle_3) != 90)):

            raise Exception(
                'Could not parameterise the requested sampling grid correctly!'
            )

        self.x_lim = np.float32(self.N_x / 2)
        self.y_lim = np.float32(self.N_y / 2)
        self.vx1 = np.float32(self.parallel_vector[0])
        self.vy1 = np.float32(self.parallel_vector[1])
        self.vz1 = np.float32(self.parallel_vector[2])
        self.x0 = np.float32(self.origin[0])
        self.y0 = np.float32(self.origin[1])
        self.z0 = np.float32(self.origin[2])
        self.vx2 = np.float32(np.round(self.vector_2[0], 4))
        self.vy2 = np.float32(np.round(self.vector_2[1], 4))
        self.vz2 = np.float32(np.round(self.vector_2[2], 4))
    return q.normalised


# setup 3D plot
fig = plt.figure('Quaternion attitude control')
ay = fig.add_subplot(111, projection='3d')
ay.set_xlim([-5, 1])
ay.set_ylim([-5, 1])
ay.set_zlim([-5, 1])
ay.set_xlabel('X')
ay.set_ylabel('Y')
ay.set_zlabel('Z')

# initialize state
steps = 0
q = qd = Quaternion()  # atttitude state
v = np.array([0., 0., 0.])  # velocity state
p = np.array([0., 0., 0.])  # position state

# specify goal and parameters
pd = np.array([-5, 0, 0])  # desired position
yd = radians(180)  # desired yaw
dt = 0.2  # time steps

# run simulation until the goal is reached
while steps < 1000 and (not np.isclose(p, pd).all() or
                        (q.inverse * qd).degrees > 0.1):
    # plot current vehicle body state abstraction
    plotrotc(q, p)

    # run minimal position & velocity control
Example #5
0
def load_calib_data(path_total_dataset, name_camera_calib, tf_tree):
    """
    :param path_total_dataset: Path to dataset root dir
    :param name_camera_calib: Camera calib file containing image intrinsic
    :param tf_tree: TF (transformation) tree containing translations from velodyne to cameras
    :return:
    """

    with open(os.path.join(path_total_dataset, name_camera_calib), 'r') as f:
        data_camera = json.load(f)

    with open(os.path.join(path_total_dataset, tf_tree), 'r') as f:
        data_extrinsics = json.load(f)

    calib_dict = {
        'calib_cam_stereo_left.json': 'cam_stereo_left_optical',
        'calib_cam_stereo_right.json': 'cam_stereo_right_optical',
        'calib_gated_bwv.json': 'bwv_cam_optical'
    }

    cam_name = calib_dict[name_camera_calib]

    # Scan data extrinsics for transformation from lidar to camera
    important_translations = ['lidar_hdl64_s3_roof', 'radar_ars300', cam_name]
    translations = []

    for item in data_extrinsics:
        if item['child_frame_id'] in important_translations:
            translations.append(item)
            if item['child_frame_id'] == cam_name:
                T_cam = item['transform']
            elif item['child_frame_id'] == 'lidar_hdl64_s3_roof':
                T_velodyne = item['transform']
            elif item['child_frame_id'] == 'radar_ars300':
                T_radar = item['transform']

    # Use pyquaternion to setup rotation matrices properly
    R_c_quaternion = Quaternion(w=T_cam['rotation']['w'] * 360 / 2 / np.pi,
                                x=T_cam['rotation']['x'] * 360 / 2 / np.pi,
                                y=T_cam['rotation']['y'] * 360 / 2 / np.pi,
                                z=T_cam['rotation']['z'] * 360 / 2 / np.pi)
    R_v_quaternion = Quaternion(
        w=T_velodyne['rotation']['w'] * 360 / 2 / np.pi,
        x=T_velodyne['rotation']['x'] * 360 / 2 / np.pi,
        y=T_velodyne['rotation']['y'] * 360 / 2 / np.pi,
        z=T_velodyne['rotation']['z'] * 360 / 2 / np.pi)

    # Setup quaternion values as 3x3 orthogonal rotation matrices
    R_c_matrix = R_c_quaternion.rotation_matrix
    R_v_matrix = R_v_quaternion.rotation_matrix

    # Setup translation Vectors
    Tr_cam = np.asarray([
        T_cam['translation']['x'], T_cam['translation']['y'],
        T_cam['translation']['z']
    ])
    Tr_velodyne = np.asarray([
        T_velodyne['translation']['x'], T_velodyne['translation']['y'],
        T_velodyne['translation']['z']
    ])
    Tr_radar = np.asarray([
        T_radar['translation']['x'], T_radar['translation']['y'],
        T_radar['translation']['z']
    ])

    # Setup Translation Matrix camera to lidar -> ROS spans transformation from its children to its parents
    # Therefore one inversion step is needed for zero_to_camera -> <parent_child>
    zero_to_camera = np.zeros((3, 4))
    zero_to_camera[0:3, 0:3] = R_c_matrix
    zero_to_camera[0:3, 3] = Tr_cam
    zero_to_camera = np.vstack((zero_to_camera, np.array([0, 0, 0, 1])))

    zero_to_velodyne = np.zeros((3, 4))
    zero_to_velodyne[0:3, 0:3] = R_v_matrix
    zero_to_velodyne[0:3, 3] = Tr_velodyne
    zero_to_velodyne = np.vstack((zero_to_velodyne, np.array([0, 0, 0, 1])))

    zero_to_radar = zero_to_velodyne.copy()
    zero_to_radar[0:3, 3] = Tr_radar

    # Calculate total extrinsic transformation to camera
    velodyne_to_camera = np.matmul(np.linalg.inv(zero_to_camera),
                                   zero_to_velodyne)
    camera_to_velodyne = np.matmul(np.linalg.inv(zero_to_velodyne),
                                   zero_to_camera)
    radar_to_camera = np.matmul(np.linalg.inv(zero_to_camera), zero_to_radar)

    # Read projection matrix P and camera rectification matrix R
    P = np.reshape(data_camera['P'], [3, 4])

    # In our case rectification matrix R has to be equal to the identity as the projection matrix P contains the
    # R matrix w.r.t KITTI definition
    R = np.identity(4)

    # Calculate total transformation matrix from velodyne to camera
    vtc = np.matmul(np.matmul(P, R), velodyne_to_camera)

    return velodyne_to_camera, camera_to_velodyne, P, R, vtc, radar_to_camera, zero_to_camera
Example #6
0
CAM_EGO = CAM_SAMPLE['ego_pose_token']
EGO = nusc.get('ego_pose', token=CAM_EGO)

CAM_SAMPLE_next_token = first_token_meta['next']
next_sample = nusc.get('sample', token=CAM_SAMPLE_next_token)
CAM_SAMPLE_next = nusc.get('sample_data', token=next_sample['data'][sensor])
CAM_EGO_next = CAM_SAMPLE_next['ego_pose_token']
EGO_next = nusc.get('ego_pose', token=CAM_EGO_next)

current_to_next = get_relative_pose(EGO, EGO_next)

H_pose1 = np.zeros((4, 4))
H_pose2 = np.zeros((4, 4))
H_pose1[3, 3] = 1
H_pose2[3, 3] = 1
pose1_rot = Quaternion(EGO['rotation']).rotation_matrix
pose1_tran = EGO['translation']
H_pose1[0:3, 0:3] = pose1_rot
H_pose1[0:3, 3] = pose1_tran

pose2_rot = Quaternion(EGO_next['rotation']).rotation_matrix
pose2_tran = EGO_next['translation']
H_pose2[0:3, 0:3] = pose2_rot
H_pose2[0:3, 3] = pose2_tran

H2_recovered = np.dot(H_pose1, current_to_next)

pose1_rot = Quaternion(pose1['rotation']).rotation_matrix
pose1_tran = pose1['translation']
H_pose1[0:3, 0:3] = pose1_rot
H_pose1[:, 3] = pose1_tran
Example #7
0
    loc = Localization(name, config['model'], config, build_db=args.build_db)

    query_file = f'{args.slice}.queries_with_intrinsics.txt'
    queries, query_dataset = loc.init_queries(query_file, config['cmu'])

    logging.info('Starting evaluation')
    metrics, results = evaluate(loc,
                                queries,
                                query_dataset,
                                max_iter=args.max_iter)
    logging.info('Evaluation metrics: \n' + pformat(metrics))

    output = {'config': config, 'metrics': metrics}
    output_dir = Path(EXPER_PATH, 'eval/cmu')
    output_dir.mkdir(exist_ok=True, parents=True)
    eval_filename = f'{args.eval_name}_{args.slice}'
    eval_path = Path(output_dir, f'{eval_filename}.yaml')
    with open(eval_path, 'w') as f:
        yaml.dump(output, f, default_flow_style=False)

    if args.export_poses:
        poses_path = Path(output_dir, f'{eval_filename}_poses.txt')
        with open(poses_path, 'w') as f:
            for query, result in zip(queries, results):
                query_T_w = np.linalg.inv(result.T)
                qvec_nvm = list(Quaternion(matrix=query_T_w))
                pos_nvm = query_T_w[:3, 3].tolist()
                name = '/'.join(query.name.split('/')[-2:])
                line = name + ' ' + ' '.join(map(str, qvec_nvm + pos_nvm))
                f.write(line + '\n')
Example #8
0
 def test_sym_distance(self):
     q = Quaternion(scalar=0, vector=[1,0,0])
     p = Quaternion(scalar=0, vector=[0,1,0])
     self.assertEqual(pi/2, Quaternion.sym_distance(q,p))
     q = Quaternion(angle=pi/2, axis=[1,0,0])
     p = Quaternion(angle=pi/2, axis=[0,1,0])
     self.assertAlmostEqual(pi/3, Quaternion.sym_distance(q,p), places=6)
     q = Quaternion(scalar=0, vector=[1,0,0])
     p = Quaternion(scalar=0, vector=[0,-1,0])
     self.assertEqual(pi/2, Quaternion.sym_distance(q,p))
     q = Quaternion(scalar=1, vector=[1,1,1])
     p = Quaternion(scalar=-1, vector=[-1,-1,-1])
     p._normalise()
     q._normalise()
     self.assertAlmostEqual(pi, Quaternion.sym_distance(q,p), places=8)
Example #9
0
 def test_slerp(self):
     q1 = Quaternion(axis=[1, 0, 0], angle=0.0)
     q2 = Quaternion(axis=[1, 0, 0], angle=pi/2)
     q3 = Quaternion.slerp(q1, q2, 0.5)
     self.assertEqual(q3, Quaternion(axis=[1,0,0], angle=pi/4))
Example #10
0
 def test_log(self):
     from math import log
     q = Quaternion(axis=[1,0,0], angle=pi)
     log_q = Quaternion.log(q)
     self.assertEqual(log_q, Quaternion(scalar=0, vector=[pi/2,0,0]))
Example #11
0
 def test_absolute_distance(self):
     q = Quaternion(scalar=0, vector=[1,0,0])
     p = Quaternion(scalar=0, vector=[0,1,0])
     self.assertEqual((q-p).norm, Quaternion.absolute_distance(q,p))
     q = Quaternion(angle=pi/2, axis=[1,0,0])
     p = Quaternion(angle=pi/2, axis=[0,1,0])
     self.assertEqual((q-p).norm, Quaternion.absolute_distance(q,p))
     q = Quaternion(scalar=0, vector=[1,0,0])
     p = Quaternion(scalar=-1, vector=[0,-1,0])
     self.assertEqual((q+p).norm, Quaternion.absolute_distance(q,p))
     q = Quaternion(scalar=1, vector=[1,1,1])
     p = Quaternion(scalar=-1, vector=[-1,-1,-1])
     p._normalise()
     q._normalise()
     self.assertAlmostEqual(0, Quaternion.absolute_distance(q,p), places=8)
Example #12
0
 def test_exp(self):
     from math import exp
     q = Quaternion(axis=[1,0,0], angle=pi)
     exp_q = Quaternion.exp(q)
     self.assertEqual(exp_q, exp(0) * Quaternion(scalar=cos(1.0), vector=[sin(1.0), 0,0]))
Example #13
0
    def test_rotate(self):
        q  = Quaternion(axis=[1,1,1], angle=2*pi/3)
        q2 = Quaternion(axis=[1, 0, 0], angle=-pi)
        q3 = Quaternion(axis=[1, 0, 0], angle=pi)
        precision = ALMOST_EQUAL_TOLERANCE
        for r in [1, 3.8976, -69.7, -0.000001]:
            # use np.testing.assert_almost_equal() to compare float sequences
            np.testing.assert_almost_equal(q.rotate((r, 0, 0)), (0, r, 0), decimal=ALMOST_EQUAL_TOLERANCE)
            np.testing.assert_almost_equal(q.rotate([0, r, 0]), [0, 0, r], decimal=ALMOST_EQUAL_TOLERANCE)
            np.testing.assert_almost_equal(q.rotate(np.array([0, 0, r])), np.array([r, 0, 0]), decimal=ALMOST_EQUAL_TOLERANCE)
            self.assertEqual(q.rotate(Quaternion(vector=[-r, 0, 0])), Quaternion(vector=[0, -r, 0]))
            np.testing.assert_almost_equal(q.rotate([0, -r, 0]), [0, 0, -r], decimal=ALMOST_EQUAL_TOLERANCE)
            self.assertEqual(q.rotate(Quaternion(vector=[0, 0, -r])), Quaternion(vector=[-r, 0, 0]))

            np.testing.assert_almost_equal(q2.rotate((r, 0, 0)), q3.rotate((r, 0, 0)), decimal=ALMOST_EQUAL_TOLERANCE)
            np.testing.assert_almost_equal(q2.rotate((0, r, 0)), q3.rotate((0, r, 0)), decimal=ALMOST_EQUAL_TOLERANCE)
            np.testing.assert_almost_equal(q2.rotate((0, 0, r)), q3.rotate((0, 0, r)), decimal=ALMOST_EQUAL_TOLERANCE)
Example #14
0
 def test_output_of_elements(self):
     r = randomElements()
     q = Quaternion(*r)
     self.assertEqual(tuple(q.elements), r)
Example #15
0
def record(client, start_pos, end_pos, start_quat, end_quat, N):
    # generate positions
    pos_diff = end_pos - start_pos
    pos_diff = np.repeat(np.reshape(pos_diff, [1, 3]), N, 0)
    inc = np.reshape(np.linspace(0, 1, num=N), [N, 1])
    pos_diff *= inc
    start_pos = np.repeat(np.reshape(start_pos, [1, 3]), N, 0)
    pos = start_pos + pos_diff

    # generate orientations
    qs = Quaternion.intermediates(start_quat,
                                  end_quat,
                                  N,
                                  include_endpoints=False)

    # combine
    for i, q in enumerate(qs):
        print(pos[i, :], q.rotation_matrix)
        responses = get_image_and_depth(client, pos[i, :], q)
        for j, response in enumerate(responses):
            if response.pixels_as_float:
                airsim.write_pfm(
                    os.path.normpath('depth{:07d}.pfm'.format(i + 1)),
                    airsim.get_pfm_array(response))
            else:
                airsim.write_file(
                    os.path.normpath('image{:07d}.png'.format(i + 1)),
                    response.image_data_uint8)

        R = q.rotation_matrix
        rx = R[1, :]
        ry = -R[2, :]
        rz = -R[0, :]

        def rotm(rx, ry, rz):
            return np.array([-rz, rx, -ry])

        savemat("pose{:07d}.mat".format(i + 1), {'C': pos[i, :], 'R': R})

        thetas = [-20, -10, 10, 20]
        for theta in thetas:
            th = np.radians(theta)
            R = rotm(rx * cos(th) + ry * sin(th), -rx * sin(th) + ry * cos(th),
                     rz)
            response = get_image(client, pos[i, :], Quaternion(matrix=R))
            airsim.write_file(
                os.path.normpath('image{:07d}_rotz{}.png'.format(i + 1,
                                                                 theta)),
                response.image_data_uint8)
            savemat("pose{:07d}_rotz{}.mat".format(i + 1, theta), {
                'C': pos[i, :],
                'R': R
            })

            R = rotm(-rz * sin(th) + rx * cos(th), ry,
                     rz * cos(th) + rx * sin(th))
            response = get_image(client, pos[i, :], Quaternion(matrix=R))
            airsim.write_file(
                os.path.normpath('image{:07d}_roty{}.png'.format(i + 1,
                                                                 theta)),
                response.image_data_uint8)
            savemat("pose{:07d}_roty{}.mat".format(i + 1, theta), {
                'C': pos[i, :],
                'R': R
            })

            R = rotm(rx,
                     ry * cos(th) + rz * sin(th), -ry * sin(th) + rz * cos(th))
            response = get_image(client, pos[i, :], Quaternion(matrix=R))
            airsim.write_file(
                os.path.normpath('image{:07d}_rotx{}.png'.format(i + 1,
                                                                 theta)),
                response.image_data_uint8)
            savemat("pose{:07d}_rotx{}.mat".format(i + 1, theta), {
                'C': pos[i, :],
                'R': R
            })
Example #16
0
def extract_db(config, dir_meta, cameras):
    """

    :param config: config from config.yaml
    :param dir_meta: determine extraction of train or test
    :param cameras:
    :return:
    """
    dataset = []  # all images of train or test
    all_joints = dt.vicon_joints
    for dir, meta in dir_meta:  # one action contains 8 cam views
        meta_sub = meta['subject']
        meta_act = config['action_reverse_map'][
            meta['action']]  # action string name
        meta_subact = meta['subaction']

        gt_pos_path = os.path.join(dir, 'gt_skel_gbl_pos.txt')
        gt_ori_path = os.path.join(dir, 'gt_skel_gbl_ori.txt')
        calib_imu_bone_path = os.path.join(
            dir, 's{}_{}{}_calib_imu_bone.txt'.format(meta_sub, meta_act,
                                                      meta_subact))
        calib_imu_ref_path = os.path.join(
            dir, 's{}_{}{}_calib_imu_ref.txt'.format(meta_sub, meta_act,
                                                     meta_subact))
        imu_data_path = os.path.join(
            dir, 's{}_{}{}_Xsens.sensors'.format(meta_sub, meta_act,
                                                 meta_subact))
        bvh_path = os.path.join(
            dir, '{}{}_BlenderZXY_YmZ.bvh'.format(meta_act, meta_subact))
        gt_pos = dt.parse_vicon_gt_pos(gt_pos_path)
        gt_ori = dt.parse_vicon_gt_ori(gt_ori_path)
        imu_data = dt.parse_sensor_6axis(imu_data_path)
        calib_imu_bone = dt.parse_calib_imu_bone(calib_imu_bone_path)
        calib_imu_ref = dt.parse_calib_imu_ref(calib_imu_ref_path)
        bone_info = dt.parse_imu_bone_info(bvh_path)
        canvas_size = (1079., 1919.)  # height width

        filtered_joints = config['joints_filter']

        # bone vector / orientation, not camera related
        bones = [
            'Head', 'Sternum', 'Pelvis', 'L_UpArm', 'R_UpArm', 'L_LowArm',
            'R_LowArm', 'L_UpLeg', 'R_UpLeg', 'L_LowLeg', 'R_LowLeg'
        ]
        # obtain ref for all bones
        bone_refs = dict()
        for bone in bones:
            joint_p = bone_info[bone][0]
            joint_c = bone_info[bone][1]
            bone_vec = np.array(bone_info[bone][2]) * 25.4
            q_TI = calib_imu_ref[bone]
            q_bi = calib_imu_bone[bone]
            q_TI = Quaternion(q_TI)
            q_bi = Quaternion(q_bi)
            q_ib = q_bi.conjugate
            bone_refs[bone] = {
                'joint_p': joint_p,
                'joint_c': joint_c,
                'bone_vec': bone_vec,
                'q_TI': q_TI,
                'q_ib': q_ib
            }

        bone_vectors = dict()  # of all frames

        for c in range(8):
            mp4_file_name = 'TC_S{}_{}{}_cam{}.mp4'.format(
                meta_sub, meta_act, meta_subact, c + 1)
            mp4_file_path = os.path.join(dir, mp4_file_name)
            cam = cameras[c]
            vid_info = ffprobe(mp4_file_path)
            vid_frame_num = int(vid_info['video']['@nb_frames'])

            # print(mp4_file_name, vid_info['video']['@nb_frames'], len(gt_pos)- int(vid_info['video']['@nb_frames']),
            #       vid_info['video']['@bit_rate'])

            out_path = os.path.join(config['db_out_dir'], 'marked')
            out_path = os.path.join(
                out_path, 'sub{}_{}_{}_cam{}'.format(meta_sub, meta_act,
                                                     meta_subact, c + 1))
            if config['save_visualization']:
                if not os.path.exists(out_path):
                    os.makedirs(out_path)

            # where to save extract frames
            seq_dir_name = 's_{:0>2}_act_{:0>2}_subact_{:0>2}_ca_{:0>2}'.format(
                meta_sub, meta['action'], meta_subact, c + 1)
            seq_dir_path = os.path.join(config['db_out_dir'], seq_dir_name)
            if config['save_frame']:
                if not os.path.exists(seq_dir_path):
                    os.makedirs(seq_dir_path)

            vid_ff = vreader(mp4_file_path)
            min_frame_to_iter = min(vid_frame_num, len(gt_pos), len(gt_ori),
                                    len(imu_data))
            for idx in tqdm(range(min_frame_to_iter)):
                pose3d = np.zeros([3, len(all_joints)])
                for idx_j, j in enumerate(all_joints):
                    pose3d[:, idx_j] = gt_pos[idx][j]
                pose3d = pose3d * 0.0254  # inch to meter
                pose2d = project_pose3d_to_2d(pose3d, cam, do_distor_corr=True)

                if config['save_visualization'] or config['save_frame']:
                    aframe = next(vid_ff)
                if config[
                        'save_visualization']:  # skeleton visualization save to disk
                    out_file_path = os.path.join(out_path,
                                                 '{:0>6d}.jpg'.format(idx))
                    marked_img = _visualize_one_frame(
                        aframe, pose2d)  # todo vis box on image
                    img_4save = cv2.cvtColor(marked_img, cv2.COLOR_RGB2BGR)
                    cv2.imwrite(out_file_path, img_4save)

                # cropping box information
                p2d, p3d_cam, p3d, vis = filter_and_project_2d_pose(
                    gt_pos[idx],
                    filtered_joints,
                    cam,
                    canvas_size,
                    do_distor_corr=True)
                mvpose_vis = np.reshape([vis / 2., vis / 2., vis / 2.],
                                        (3, -1))
                # vis follow coco protocol, divide 2 and copy 3 times to follow mvpose
                root_joint = project_pose3d_to_cam(
                    np.reshape(gt_pos[idx]['Hips'], (3, -1)) * 0.0254, cam)
                tl_joint = np.copy(root_joint)  # shape (3,1)
                br_joint = np.copy(root_joint)
                tl_joint[0, 0] -= 1.0000
                tl_joint[1, 0] -= 0.9000
                br_joint[0, 0] += 1.0000
                br_joint[1, 0] += 1.1000
                bbox_25d = np.concatenate((root_joint, tl_joint, br_joint),
                                          axis=1)
                bbox = project_cam_to_uv(
                    bbox_25d, cam,
                    do_distor_corr=True)  # contain 3 point: center, tl, br

                box_center = tuple(bbox[:, 0])  # (x, y)
                box_scale = tuple((bbox[:, 2] - bbox[:, 1]) / 200.)
                box = tuple(np.concatenate([bbox[:, 2], bbox[:, 1]
                                            ]))  # (x_tl, y_tl, x_br, y_br)

                frame_file_name = '{:0>6d}.jpg'.format(idx)
                frame_file_path = os.path.join(seq_dir_path, frame_file_name)
                if config['save_frame']:  # save video frame to disk
                    frame_to_cv = cv2.cvtColor(aframe, cv2.COLOR_RGB2BGR)
                    cv2.imwrite(frame_file_path, frame_to_cv)

                # notice: Difference between totalcapture and h36m project,
                # (1) joints_3d in mm
                # (2) camera['T'] in mm
                # (3) in totalcapture: point_Camera = R.dot(point_Tracking) + T (point and T in m);
                #     in h36m: point_Camera = R.dot(point_Tracking - T)  (point and T in mm)
                #     aka in h36m: point_Tracking = R^{-1}.dot(point_Camera) + T
                # (4) coordinates shape is (num_cords, 3), aka row vector, but I like col vector more
                cam_in_h36m_format = copy.deepcopy(cam)
                cam_in_h36m_format['R'] = cam_in_h36m_format['R']
                cam_in_h36m_format['T'] = cam_in_h36m_format['T'] * 1000. * (
                    -1.)
                cam_in_h36m_format['T'] = cam_in_h36m_format['R'].T.dot(
                    cam_in_h36m_format['T'])
                del cam_in_h36m_format['intri_mat']
                del cam_in_h36m_format['extri_mat']

                # bone vector
                # avoid parsing in each view, only in first view
                if idx not in bone_vectors:
                    bone_vector_of_one_frame = dict()
                    for bone in bones:
                        q_TI = bone_refs[bone]['q_TI']
                        q_ib = bone_refs[bone]['q_ib']
                        bone_vec = bone_refs[bone]['bone_vec']

                        ori = imu_data[idx][bone][0]
                        q_Ii = Quaternion(ori)
                        q_Tb = q_TI * q_Ii * q_ib
                        rotated_bone_vec = q_Tb.rotate(bone_vec)
                        bone_vector_of_one_frame[bone] = rotated_bone_vec
                    bone_vectors[idx] = bone_vector_of_one_frame

                dataitem = {
                    'image': os.path.join(seq_dir_name,
                                          '{:0>6d}.jpg'.format(idx)),
                    'joints_2d': p2d.T,
                    'joints_3d':
                    (p3d_cam *
                     1000.).T,  # 3d pose in camera frame, for psm evaluation
                    'joints_vis': mvpose_vis.T,  # 0: in-visible, 1: visible.
                    'center': box_center,
                    'scale': box_scale,
                    'box': box,
                    'video_id': mp4_file_name,  # mp4 file name  # todo
                    'image_id': idx,
                    'subject': meta['subject'],
                    'action': meta['action'],
                    'subaction': meta['subaction'],
                    'camera_id': c,  # start from 0
                    'camera': cam_in_h36m_format,
                    'source': 'totalcapture',
                    'bone_vec': bone_vectors[idx],
                    'joints_gt': p3d.T * 1000.  # groundtruth in tracking frame
                }

                dataset.append(dataitem)

    return dataset
Example #17
0
import time
import math
import numpy as np
from pyquaternion import Quaternion
from isecc import transform
from isecc import starparse
from isecc import symops
from isecc import checks
from isecc import utils
from isecc.isecc_classes import Particle

I1Quaternions = symops.getSymOps()
my_vector = np.array( [0, 0.618, 1] )


my_arctan = np.arctan( np.true_divide( my_vector[1], my_vector[2] ) ) 
quaternion_toZ = Quaternion(axis=(1.0,0.0,0.0), radians=my_arctan)


my_particle = Particle( 'dummy.mrc', 0.256, -0.432, 58.46, 126.2, -70.5 )
my_particle.add_defocus_info( 32350.43, 30611.52, 13.18, 0 )

print( my_particle )

my_particle.rotateParticle(I1Quaternions[0], quaternion_toZ)
print( vars(my_particle) )

my_particle.defineSubparticle(my_vector)
print( vars(my_particle) )

def yaw2quaternion(yaw: float) -> Quaternion:
    """
    """
    return Quaternion(axis=[0, 0, 1], radians=yaw)
Example #19
0
def map_pointcloud_to_image(nusc,
                            pointsensor_token: str,
                            camera_token: str,
                            min_dist: float = 1.0,
                            render_intensity: bool = False,
                            show_lidarseg: bool = False):
    """
    Given a point sensor (lidar/radar) token and camera sample_data token, load pointcloud and map it to the image
    plane.
    :param pointsensor_token: Lidar/radar sample_data token.
    :param camera_token: Camera sample_data token.
    :param min_dist: Distance from the camera below which points are discarded.
    :param render_intensity: Whether to render lidar intensity instead of point depth.
    :param show_lidarseg: Whether to render lidar intensity instead of point depth.
    :param filter_lidarseg_labels: Only show lidar points which belong to the given list of classes. If None
        or the list is empty, all classes will be displayed.
    :param lidarseg_preds_bin_path: A path to the .bin file which contains the user's lidar segmentation
                                    predictions for the sample.
    :return (pointcloud <np.float: 2, n)>, coloring <np.float: n>, image <Image>).
    """

    cam = nusc.get('sample_data', camera_token)
    pointsensor = nusc.get('sample_data', pointsensor_token)
    pcl_path = osp.join(nusc.dataroot, pointsensor['filename'])
    if pointsensor['sensor_modality'] == 'lidar':
        # Ensure that lidar pointcloud is from a keyframe.
        assert pointsensor['is_key_frame'], \
            'Error: Only pointclouds which are keyframes have lidar segmentation labels. Rendering aborted.'
        pc = LidarPointCloud.from_file(pcl_path)
    else:
        pc = RadarPointCloud.from_file(pcl_path)
    im = Image.open(osp.join(nusc.dataroot, cam['filename']))

    # Points live in the point sensor frame. So they need to be transformed via global to the image plane.
    # First step: transform the pointcloud to the ego vehicle frame for the timestamp of the sweep.
    cs_record = nusc.get('calibrated_sensor',
                         pointsensor['calibrated_sensor_token'])
    pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
    pc.translate(np.array(cs_record['translation']))

    # Second step: transform from ego to the global frame.
    poserecord = nusc.get('ego_pose', pointsensor['ego_pose_token'])
    pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
    pc.translate(np.array(poserecord['translation']))

    # Third step: transform from global into the ego vehicle frame for the timestamp of the image.
    poserecord = nusc.get('ego_pose', cam['ego_pose_token'])
    pc.translate(-np.array(poserecord['translation']))
    pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix.T)

    # Fourth step: transform from ego into the camera.
    cs_record = nusc.get('calibrated_sensor', cam['calibrated_sensor_token'])
    pc.translate(-np.array(cs_record['translation']))
    pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix.T)

    # Fifth step: actually take a "picture" of the point cloud.
    # Grab the depths (camera frame z axis points away from the camera).
    depths = pc.points[2, :]

    if render_intensity:
        assert pointsensor['sensor_modality'] == 'lidar', 'Error: Can only render intensity for lidar, ' \
                                                          'not %s!' % pointsensor['sensor_modality']
        # Retrieve the color from the intensities.
        # Performs arbitary scaling to achieve more visually pleasing results.
        intensities = pc.points[3, :]
        intensities = (intensities - np.min(intensities)) / (
            np.max(intensities) - np.min(intensities))
        intensities = intensities**0.1
        intensities = np.maximum(0, intensities - 0.5)
        coloring = intensities

    else:
        # Retrieve the color from the depth.
        coloring = depths

    # Take the actual picture (matrix multiplication with camera-matrix + renormalization).
    points = view_points(pc.points[:3, :],
                         np.array(cs_record['camera_intrinsic']),
                         normalize=True)

    # Remove points that are either outside or behind the camera. Leave a margin of 1 pixel for aesthetic reasons.
    # Also make sure points are at least 1m in front of the camera to avoid seeing the lidar points on the camera
    # casing for non-keyframes which are slightly out of sync.
    mask = np.ones(depths.shape[0], dtype=bool)
    mask = np.logical_and(mask, depths > min_dist)
    mask = np.logical_and(mask, points[0, :] > 1)
    mask = np.logical_and(mask, points[0, :] < im.size[0] - 1)
    mask = np.logical_and(mask, points[1, :] > 1)
    mask = np.logical_and(mask, points[1, :] < im.size[1] - 1)
    points = points[:, mask]
    coloring = coloring[mask]

    return points, coloring, im
Example #20
0
def convert_track(args):

    tracking_result_path = os.path.join(args.work_dir, 'output.json')

    print(f'Loading tracking result from {tracking_result_path}')

    with open(tracking_result_path, 'rb') as f:
        tracking_result = json.load(f)

    with open(args.gt_anns, 'rb') as f:
        gt = json.load(f)

    nusc = NuScenes(version=f'{args.version}',
                    dataroot=f'{args.root}', verbose=True)

    if args.version == 'v1.0-mini':
        start_seq_id = seq_map['mini_val_seq_id']
        vid_num = 2
    elif args.version == 'v1.0-trainval':
        start_seq_id = seq_map['val_seq_id']
        vid_num = 150
    else:
        start_seq_id = seq_map['test_seq_id']
        vid_num = 150

    for sensor in USED_SENSOR:
        # grep scene token
        for vid_info in tracking_result['videos']:
            for gt_vid_info in gt['videos']:
                if f"{int(vid_info['id']) - vid_num*(SENSOR_ID[sensor]-1) + start_seq_id:05}_{SENSOR_ID[sensor]}" == gt_vid_info['name']:
                    vid_info['scene_token'] = gt_vid_info['scene_token']
                    vid_info['sensor'] = sensor

    del gt

    nusc_annos = {
        "results": {},
        "meta": None,
    }

    first_frame_id = -1
    memo = []
    first_frame_token = []

    print("Begin Converting Tracking Result")
    for i in tqdm(range(len(tracking_result['videos']))):

        sensor = tracking_result['videos'][i]['sensor']
        scene = nusc.get('scene', tracking_result['videos'][i]['scene_token'])

        # find start frame for each video
        for frame in tracking_result['images']:
            if frame['first_frame'] and not frame['id'] in memo:
                first_frame_id = frame['id']
                memo.append(frame['id'])
                break

        # First sample (key frame)
        token = scene['first_sample_token']
        sample = nusc.get('sample', token)

        first_frame_token.append(token)

        for sample_id in range(scene['nbr_samples']):
            annos = []
            img_id = first_frame_id + sample_id
            pose_dict = tracking_result['images'][img_id]['pose']
            projection = tracking_result['images'][img_id]['cali']

            cam_to_global = R.from_euler(
                'xyz', pose_dict['rotation']).as_matrix()

            outputs = [ann for ann in tracking_result['annotations'] if ann['image_id'] == img_id]

            for output in outputs:
                # remove non-tracking objects
                cat = ''
                for key in model_cats:
                    if cats_mapping[output['category_id']] == key:
                        cat = key.lower()
                        break

                if cat == '':
                    continue

                # move to world coordinate
                translation = np.dot(
                    cam_to_global, np.array(output['translation']))
                translation += np.array(pose_dict['position'])

                quat = Quaternion(axis=[0, 1, 0], radians=output['roty'])
                x, y, z, w = R.from_matrix(cam_to_global).as_quat()
                rotation = Quaternion([w, x, y, z]) * quat

                tracking_id = str(output['instance_id'])

                h, w, l = output['dimension']

                nusc_anno = {
                    "sample_token": token,
                    "translation": translation.tolist(),
                    "size": [w, l, h],
                    "rotation": rotation.elements.tolist(),
                    "velocity": [0., 0.],
                    "tracking_id": tracking_id,
                    "tracking_name": cat,
                    "tracking_score": output['score'] * output['uncertainty'],
                }

                # align six camera
                if nusc_annos["results"].get(token) is not None:
                    nms_flag = 0
                    for all_cam_ann in nusc_annos["results"][token]:
                        if nusc_anno['tracking_name'] == all_cam_ann['tracking_name']:
                            translation = nusc_anno['translation']
                            ref_translation = all_cam_ann['translation']
                            translation_diff = (translation[0] - ref_translation[0],
                                                translation[1] - ref_translation[1],
                                                translation[2] - ref_translation[2])
                            if nusc_anno['tracking_name'] in ['pedestrian']:
                                nms_dist = 1
                            else:
                                nms_dist = 2
                            if np.sqrt(np.sum(np.array(translation_diff[:2]) ** 2)) < nms_dist:
                                if all_cam_ann['tracking_score'] < nusc_anno['tracking_score']:
                                    all_cam_ann = nusc_anno
                                nms_flag = 1
                                break
                    if nms_flag == 0:
                        annos.append(nusc_anno)
                else:
                    annos.append(nusc_anno)

            if nusc_annos["results"].get(token) is not None:
                annos += nusc_annos["results"][token]
            nusc_annos["results"].update({token: annos})

            token = sample['next']
            if token != '':
                sample = nusc.get('sample', token)

    nusc_annos["meta"] = {
        "use_camera": True,
        "use_lidar": False,
        "use_radar": False,
        "use_map": False,
        "use_external": False,
    }

    with open(os.path.join(args.work_dir, 'tracking_result.json'), "w") as f:
        json.dump(nusc_annos, f)

    return nusc
Example #21
0
dtheda_xh = phierr * d2r
dtheda_yh = thetaerr * d2r
dtheda_zh = psierr * d2r
# initial gyro bias predict data
bgx_h = 0
bgy_h = 0
bgz_h = 0
# initial quaternion
dq11 = -dtheda_xh / 2
dq21 = -dtheda_yh / 2
dq31 = -dtheda_zh / 2
q2 = -dq11
q3 = -dq21
q4 = -dq31
q1 = np.sqrt(1 - np.square(q2) - np.square(q3) - np.square(q4))
dQerr = Quaternion(q1, q2, q3, q4)
Q_E_B = Quaternion(1, 0, 0, 0)
QE_B_m = dQerr.normalised * Q_E_B.normalised
# initial state matrix
bgx0 = gyro_bias_flag * 0.05 * d2r
bgy0 = gyro_bias_flag * (-0.05) * d2r
bgz0 = gyro_bias_flag * 0.05 * d2r

s6_xz_h = np.zeros([6, 1])
s6_xz_h[0, 0] = phierr * d2r
s6_xz_h[1, 0] = thetaerr * d2r
s6_xz_h[2, 0] = psierr * d2r
s6_xz_h[3, 0] = bgx0
s6_xz_h[4, 0] = bgy0
s6_xz_h[5, 0] = bgz0
# initial P matrix
Example #22
0
def process_scene(sample_token, processed_sample_tokens, env, nusc, helper,
                  data_path, data_class, half_dt, dynamic):
    data = pd.DataFrame(columns=[
        'frame_id', 'type', 'node_id', 'robot', 'x', 'y', 'z', 'length',
        'width', 'height', 'heading'
    ])

    samples = aggregate_samples(nusc,
                                start=sample_token,
                                data_class=data_class)

    attribute_dict = defaultdict(set)

    frame_id = 0
    for sample in samples:
        annotation_tokens = sample['anns']
        for annotation_token in annotation_tokens:
            annotation = nusc.get('sample_annotation', annotation_token)
            category = annotation['category_name']
            if len(annotation['attribute_tokens']):
                attribute = nusc.get('attribute',
                                     annotation['attribute_tokens'][0])['name']
            else:
                if 'vehicle' in category:
                    attribute = ''
                else:
                    continue

            if 'pedestrian' in category and 'stroller' not in category and 'wheelchair' not in category:
                our_category = env.NodeType.PEDESTRIAN
            elif 'vehicle' in category and 'bicycle' not in category and 'motorcycle' not in category:  # and 'parked' not in attribute:
                our_category = env.NodeType.VEHICLE
            else:
                continue

            attribute_dict[annotation['instance_token']].add(attribute)

            data_point = pd.Series({
                'frame_id':
                frame_id,
                'type':
                our_category,
                'node_id':
                annotation['instance_token'],
                'robot':
                False,
                'x':
                annotation['translation'][0],
                'y':
                annotation['translation'][1],
                'z':
                annotation['translation'][2],
                'length':
                annotation['size'][0],
                'width':
                annotation['size'][1],
                'height':
                annotation['size'][2],
                'heading':
                Quaternion(annotation['rotation']).yaw_pitch_roll[0]
            })
            data = data.append(data_point, ignore_index=True)

        # Ego Vehicle
        our_category = env.NodeType.VEHICLE
        sample_data = nusc.get('sample_data', sample['data']['CAM_FRONT'])
        annotation = nusc.get('ego_pose', sample_data['ego_pose_token'])
        data_point = pd.Series({
            'frame_id':
            frame_id,
            'type':
            our_category,
            'node_id':
            'ego',
            'robot':
            True,
            'x':
            annotation['translation'][0],
            'y':
            annotation['translation'][1],
            'z':
            annotation['translation'][2],
            'length':
            4,
            'width':
            1.7,
            'height':
            1.5,
            'heading':
            Quaternion(annotation['rotation']).yaw_pitch_roll[0],
            'orientation':
            None
        })
        data = data.append(data_point, ignore_index=True)

        processed_sample_tokens.add(sample['token'])
        frame_id += 1

    if len(data.index) == 0:
        return None

    data.sort_values('frame_id', inplace=True)
    max_timesteps = data['frame_id'].max()

    x_min = np.round(data['x'].min() - 50)
    x_max = np.round(data['x'].max() + 50)
    y_min = np.round(data['y'].min() - 50)
    y_max = np.round(data['y'].max() + 50)

    data['x'] = data['x'] - x_min
    data['y'] = data['y'] - y_min

    scene = Scene(timesteps=max_timesteps + 1,
                  dt=dt,
                  name=sample_token,
                  aug_func=augment,
                  sample_tokens=[sample['token'] for sample in samples],
                  x_min=x_min,
                  y_min=y_min)

    # Generate Maps
    map_name = helper.get_map_name_from_sample_token(sample_token)
    nusc_map = NuScenesMap(dataroot=data_path, map_name=map_name)

    type_map = dict()
    x_size = x_max - x_min
    y_size = y_max - y_min
    patch_box = (x_min + 0.5 * (x_max - x_min), y_min + 0.5 * (y_max - y_min),
                 y_size, x_size)
    patch_angle = 0  # Default orientation where North is up
    canvas_size = (np.round(3 * y_size).astype(int),
                   np.round(3 * x_size).astype(int))
    homography = np.array([[3., 0., 0.], [0., 3., 0.], [0., 0., 3.]])
    layer_names = [
        'lane', 'road_segment', 'drivable_area', 'road_divider',
        'lane_divider', 'stop_line', 'ped_crossing', 'stop_line',
        'ped_crossing', 'walkway'
    ]
    map_mask = (nusc_map.get_map_mask(patch_box, patch_angle, layer_names,
                                      canvas_size) * 255.0).astype(np.uint8)
    map_mask = np.swapaxes(map_mask, 1, 2)  # x axis comes first
    # PEDESTRIANS
    map_mask_pedestrian = np.stack(
        (map_mask[9], map_mask[8], np.max(map_mask[:3], axis=0)), axis=0)
    type_map['PEDESTRIAN'] = GeometricMap(data=map_mask_pedestrian,
                                          homography=homography,
                                          description=', '.join(layer_names))
    # VEHICLES
    map_mask_vehicle = np.stack(
        (np.max(map_mask[:3], axis=0), map_mask[3], map_mask[4]), axis=0)
    type_map['VEHICLE'] = GeometricMap(data=map_mask_vehicle,
                                       homography=homography,
                                       description=', '.join(layer_names))

    map_mask_plot = np.stack(
        ((np.max(map_mask[:3], axis=0) -
          (map_mask[3] + 0.5 * map_mask[4]).clip(max=255)).clip(min=0).astype(
              np.uint8), map_mask[8], map_mask[9]),
        axis=0)
    type_map['VISUALIZATION'] = GeometricMap(
        data=map_mask_plot,
        homography=homography,
        description=', '.join(layer_names))

    scene.map = type_map
    del map_mask
    del map_mask_pedestrian
    del map_mask_vehicle
    del map_mask_plot

    for node_id in pd.unique(data['node_id']):
        node_frequency_multiplier = 1
        node_df = data[data['node_id'] == node_id]

        if dynamic:
            if 'vehicle.parked' in attribute_dict[
                    node_id] or 'vehicle.stopped' in attribute_dict[node_id]:
                continue

        elif len(attribute_dict[node_id]
                 ) == 1 and 'vehicle.parked' in attribute_dict[node_id]:
            # Catching instances of vehicles that were parked and then moving, but not allowing
            # only parked vehicles through.
            continue

        if node_df['x'].shape[0] < 2:
            continue

        if not np.all(np.diff(node_df['frame_id']) == 1):
            min_index = node_df['frame_id'].min()
            max_index = node_df['frame_id'].max()
            node_df = node_df.set_index('frame_id').reindex(
                range(min_index, max_index + 1)).interpolate().reset_index()
            node_df['type'] = node_df['type'].mode()[0]
            node_df['node_id'] = node_id
            node_df['robot'] = False
            # print('Occlusion')
            # continue  # TODO Make better

        node_values = node_df[['x', 'y']].values
        x = node_values[:, 0]
        y = node_values[:, 1]
        heading = node_df['heading'].values
        if node_df.iloc[0][
                'type'] == env.NodeType.VEHICLE and not node_id == 'ego':
            # Kalman filter Agent
            vx = derivative_of(x, scene.dt)
            vy = derivative_of(y, scene.dt)
            velocity = np.linalg.norm(np.stack((vx, vy), axis=-1), axis=-1)

            filter_veh = NonlinearKinematicBicycle(dt=scene.dt,
                                                   sMeasurement=1.0)
            P_matrix = None
            for i in range(len(x)):
                if i == 0:  # initalize KF
                    # initial P_matrix
                    P_matrix = np.identity(4)
                elif i < len(x):
                    # assign new est values
                    x[i] = x_vec_est_new[0][0]
                    y[i] = x_vec_est_new[1][0]
                    heading[i] = x_vec_est_new[2][0]
                    velocity[i] = x_vec_est_new[3][0]

                if i < len(x) - 1:  # no action on last data
                    # filtering
                    x_vec_est = np.array([[x[i]], [y[i]], [heading[i]],
                                          [velocity[i]]])
                    z_new = np.array([[x[i + 1]], [y[i + 1]], [heading[i + 1]],
                                      [velocity[i + 1]]])
                    x_vec_est_new, P_matrix_new = filter_veh.predict_and_update(
                        x_vec_est=x_vec_est,
                        u_vec=np.array([[0.], [0.]]),
                        P_matrix=P_matrix,
                        z_new=z_new)
                    P_matrix = P_matrix_new

            curvature, pl, _ = trajectory_curvature(np.stack((x, y), axis=-1))
            if pl < 1.0:  # vehicle is "not" moving
                x = x[0].repeat(max_timesteps + 1)
                y = y[0].repeat(max_timesteps + 1)
                heading = heading[0].repeat(max_timesteps + 1)
            global total
            global curv_0_2
            global curv_0_1
            total += 1
            if pl > 1.0:
                if curvature > .2:
                    curv_0_2 += 1
                    node_frequency_multiplier = 3 * int(
                        np.floor(total / curv_0_2))
                elif curvature > .1:
                    curv_0_1 += 1
                    node_frequency_multiplier = 3 * int(
                        np.floor(total / curv_0_1))

        if half_dt:
            t_old = np.linspace(0, 1, x.shape[0])
            t_new = np.linspace(0, 1, 2 * x.shape[0])
            x = np.interp(t_new, t_old, x)
            y = np.interp(t_new, t_old, y)
            heading = np.interp(
                t_new, t_old, heading + np.pi, period=2 * np.pi) - np.pi

            vx = derivative_of(x, scene.dt / 2)
            vy = derivative_of(y, scene.dt / 2)
            ax = derivative_of(vx, scene.dt / 2)
            ay = derivative_of(vy, scene.dt / 2)
        else:
            vx = derivative_of(x, scene.dt)
            vy = derivative_of(y, scene.dt)
            ax = derivative_of(vx, scene.dt)
            ay = derivative_of(vy, scene.dt)

        if node_df.iloc[0]['type'] == env.NodeType.VEHICLE:
            v = np.stack((vx, vy), axis=-1)
            v_norm = np.linalg.norm(np.stack((vx, vy), axis=-1),
                                    axis=-1,
                                    keepdims=True)
            heading_v = np.divide(v,
                                  v_norm,
                                  out=np.zeros_like(v),
                                  where=(v_norm > 1.))
            heading_x = heading_v[:, 0]
            heading_y = heading_v[:, 1]

            a_norm = np.divide(ax * vx + ay * vy,
                               v_norm[..., 0],
                               out=np.zeros_like(ax),
                               where=(v_norm[..., 0] > 1.))

            if half_dt:
                d_heading = derivative_of(heading, scene.dt / 2, radian=True)
            else:
                d_heading = derivative_of(heading, scene.dt, radian=True)

            data_dict = {
                ('position', 'x'):
                x,
                ('position', 'y'):
                y,
                ('velocity', 'x'):
                vx,
                ('velocity', 'y'):
                vy,
                ('velocity', 'norm'):
                np.linalg.norm(np.stack((vx, vy), axis=-1), axis=-1),
                ('acceleration', 'x'):
                ax,
                ('acceleration', 'y'):
                ay,
                ('acceleration', 'norm'):
                a_norm,
                ('heading', 'x'):
                heading_x,
                ('heading', 'y'):
                heading_y,
                ('heading', '°'):
                heading,
                ('heading', 'd°'):
                d_heading
            }
            node_data = pd.DataFrame(data_dict, columns=data_columns_vehicle)
        else:
            data_dict = {
                ('position', 'x'): x,
                ('position', 'y'): y,
                ('velocity', 'x'): vx,
                ('velocity', 'y'): vy,
                ('acceleration', 'x'): ax,
                ('acceleration', 'y'): ay
            }
            node_data = pd.DataFrame(data_dict,
                                     columns=data_columns_pedestrian)

        node = Node(node_type=node_df.iloc[0]['type'],
                    node_id=0 if node_id == 'ego' else abs(hash(node_id)),
                    data=node_data,
                    frequency_multiplier=node_frequency_multiplier,
                    description=node_id)

        node.first_timestep = node_df['frame_id'].iloc[0]
        if half_dt:
            node.first_timestep *= 2

        if node_df.iloc[0]['robot'] == True:
            node.is_robot = True
            scene.robot = node

        scene.nodes.append(node)

    if half_dt:
        # We are interpolating to make the overall dt half of what it was.
        scene.dt /= 2.0
        scene.timesteps *= 2

    return scene
Example #23
0
    def pc_to_sensor(pc_orig,
                     cs_record,
                     global_coordinates=False,
                     ego_pose=None):
        """
        Tramsform the input point cloud from global/vehicle coordinates to
        sensor coordinates
        """
        assert pc_orig is not None, 'Pointcloud cannot be None. Nothing to translate'
        assert global_coordinates is False or (global_coordinates and ego_pose is not None), \
            'when in global coordinates, ego_pose is required'
        ## Copy is required to prevent the original pointcloud from being manipulate
        pc = copy.deepcopy(pc_orig)

        if isinstance(pc, PointCloud):
            if global_coordinates:
                ## Transform from global to vehicle
                pc.translate(np.array(-np.array(ego_pose['translation'])))
                pc.rotate(Quaternion(ego_pose['rotation']).rotation_matrix.T)

            ## Transform from vehicle to sensor
            pc.translate(-np.array(cs_record['translation']))
            pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix.T)

        elif isinstance(pc, np.ndarray):
            if global_coordinates:
                ## Transform from global to vehicle
                for i in range(3):
                    pc[i, :] = pc[i, :] + np.array(
                        -np.array(ego_pose['translation']))[i]
                pc[:3, :] = np.dot(
                    Quaternion(ego_pose['rotation']).rotation_matrix.T,
                    pc[:3, :])
            ## Transform from vehicle to sensor
            for i in range(3):
                pc[i, :] = pc[i, :] - np.array(cs_record['translation'])[i]
            pc[:3, :] = np.dot(
                Quaternion(cs_record['rotation']).rotation_matrix.T, pc[:3, :])

        elif isinstance(pc, list):
            if len(pc) == 0:
                return []
            if isinstance(pc[0], Box):
                new_list = []
                for box in pc:
                    if global_coordinates:
                        ## Transform from global to vehicle
                        box.translate(-np.array(ego_pose['translation']))
                        box.rotate(Quaternion(ego_pose['rotation']).inverse)

                    ## Transform from vehicle to sensor
                    box.translate(-np.array(cs_record['translation']))
                    box.rotate(Quaternion(cs_record['rotation']).inverse)
                    new_list.append(box)
                return new_list

        elif isinstance(pc, Box):
            if global_coordinates:
                ## Transform from global to vehicle
                pc.translate(-np.array(ego_pose['translation']))
                pc.rotate(Quaternion(ego_pose['rotation']).inverse)

            ## Transform from vehicle to sensor
            pc.translate(-np.array(cs_record['translation']))
            pc.rotate(Quaternion(cs_record['rotation']).inverse)
        else:
            raise TypeError('cannot filter object with type {}'.format(
                type(pc)))

        return pc
Example #24
0
 def computeEndEffQuat(self):
     quat = Quaternion(matrix=self.Hmat[5])
     self.endeffQuat = quat.elements
Example #25
0
def main():
    parser = getparser()
    args = parser.parse_args()
    f_index = args.frame_index
    if os.path.splitext(f_index)[1] == '.csv':
        frame_index = pd.read_csv(f_index)
    else:
        frame_index = pd.read_pickle(f_index)
    logging.info("sample fn {}".format(
        glob.glob(
            os.path.join(args.camera_folder,
                         '*{}*.tsai'.format(frame_index['name'].values[0])))))
    cam_list = [
        glob.glob(
            os.path.join(args.camera_folder,
                         '*{}*.tsai'.format(os.path.basename(frame))))[0]
        for frame in frame_index['name'].values
    ]
    if not args.gcp_folder:
        gcp_folder = args.camera_folder
    else:
        gcp_folder = args.gcp_folder
    if not os.path.exists(args.outfol):
        os.makedirs(args.outfol)
    gcp_list = [
        glob.glob(
            os.path.join(gcp_folder,
                         '*{}*.gcp'.format(os.path.basename(frame))))[0]
        for frame in frame_index['name'].values
    ]
    CX, CY, CZ = [
        frame_index.x_sat_ecef_km.values * 1000,
        frame_index.y_sat_ecef_km * 1000, frame_index.z_sat_ecef_km * 1000
    ]
    rotation_matrices = [
        Quaternion(matrix=(
            np.reshape(asp.read_tsai_dict(x)['rotation_matrix'], (3, 3))))
        for x in cam_list
    ]
    fu, fv = asp.read_tsai_dict(cam_list[0])['focal_length']
    cu, cv = asp.read_tsai_dict(cam_list[0])['optical_center']
    pitch = asp.read_tsai_dict(cam_list[0])['pitch']
    q1 = [x[0] for x in rotation_matrices]
    q2 = [x[1] for x in rotation_matrices]
    q3 = [x[2] for x in rotation_matrices]
    q4 = [x[3] for x in rotation_matrices]
    for idx, row in frame_index.iterrows():
        identifier = os.path.basename(row['name'])
        gcp = pd.read_csv(glob.glob(
            os.path.join(gcp_folder, '*{}*.gcp'.format(identifier)))[0],
                          header=None,
                          sep=' ')
        im_x, im_y = [gcp[8].values, gcp[9].values]
        lon, lat, ht = [gcp[2].values, gcp[1].values, gcp[3].values]
        X, Y, Z = geolib.ll2ecef(lon, lat, ht)
        CX_idx, CY_idx, CZ_idx = [CX[idx], CY[idx], CZ[idx]]
        q1_idx, q2_idx, q3_idx, q4_idx = [q1[idx], q2[idx], q3[idx], q4[idx]]
        #tpl_int = (q1_idx,q2_idx,q3_idx,q4_idx)
        print(idx)
        Q1, Q2, Q3, Q4 = optimiser_quaternion(q1_idx, q2_idx, q3_idx, q4_idx,
                                              CX_idx, CY_idx, CZ_idx, cu, cv,
                                              fu, fv, pitch, X, Y, Z, im_x,
                                              im_y)
        rot_mat = Quaternion([Q1, Q2, Q3, Q4]).rotation_matrix
        out_cam = os.path.join(args.outfol, '{}_scipy.tsai'.format(identifier))
        asp.make_tsai(out_cam, cu, cv, fu, fv, rot_mat,
                      [CX_idx, CY_idx, CZ_idx], pitch)
    logging.info("Successfully created optimised camera models at {}".format(
        args.outfol))
Example #26
0
 def quatError2AngluerVel(self, quatError_array):
     quatError = Quaternion(numpy.array(quatError_array))
     theta = quatError.angle
     angular_vel = theta * quatError.elements[1:4] / quatError.norm
     return angular_vel
Example #27
0
def export_scene_pointcloud(nusc: NuScenes,
                            out_path: str,
                            scene_token: str,
                            channel: str = 'LIDAR_TOP',
                            min_dist: float = 3.0,
                            max_dist: float = 30.0,
                            verbose: bool = True):
    """
    Export fused point clouds of a scene to a Wavefront OBJ file.
    This point-cloud can be viewed in your favorite 3D rendering tool, e.g. Meshlab or Maya.
    :param nusc: NuScenes instance.
    :param out_path: Output path to write the point-cloud to.
    :param scene_token: Unique identifier of scene to render.
    :param channel: Channel to render.
    :param min_dist: Minimum distance to ego vehicle below which points are dropped.
    :param max_dist: Maximum distance to ego vehicle above which points are dropped.
    :param verbose: Whether to print messages to stdout.
    """

    # Check inputs.
    valid_channels = [
        'LIDAR_TOP', 'RADAR_FRONT', 'RADAR_FRONT_RIGHT', 'RADAR_FRONT_LEFT',
        'RADAR_BACK_LEFT', 'RADAR_BACK_RIGHT'
    ]
    camera_channels = [
        'CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT',
        'CAM_BACK', 'CAM_BACK_RIGHT'
    ]
    assert channel in valid_channels, 'Input channel {} not valid.'.format(
        channel)

    # Get records from DB.
    scene_rec = nusc.get('scene', scene_token)
    start_sample_rec = nusc.get('sample', scene_rec['first_sample_token'])
    sd_rec = nusc.get('sample_data', start_sample_rec['data'][channel])

    # Make list of frames
    cur_sd_rec = sd_rec
    sd_tokens = []
    while cur_sd_rec['next'] != '':
        cur_sd_rec = nusc.get('sample_data', cur_sd_rec['next'])
        sd_tokens.append(cur_sd_rec['token'])

    # Write point-cloud.
    with open(out_path, 'w') as f:
        f.write("OBJ File:\n")

        for sd_token in tqdm(sd_tokens):
            if verbose:
                print('Processing {}'.format(sd_rec['filename']))
            sc_rec = nusc.get('sample_data', sd_token)
            sample_rec = nusc.get('sample', sc_rec['sample_token'])
            lidar_token = sd_rec['token']
            lidar_rec = nusc.get('sample_data', lidar_token)
            pc = LidarPointCloud.from_file(
                osp.join(nusc.dataroot, lidar_rec['filename']))

            # Get point cloud colors.
            coloring = np.ones((3, pc.points.shape[1])) * -1
            for channel in camera_channels:
                camera_token = sample_rec['data'][channel]
                cam_coloring, cam_mask = pointcloud_color_from_image(
                    nusc, lidar_token, camera_token)
                coloring[:, cam_mask] = cam_coloring

            # Points live in their own reference frame. So they need to be transformed via global to the image plane.
            # First step: transform the point cloud to the ego vehicle frame for the timestamp of the sweep.
            cs_record = nusc.get('calibrated_sensor',
                                 lidar_rec['calibrated_sensor_token'])
            pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
            pc.translate(np.array(cs_record['translation']))

            # Optional Filter by distance to remove the ego vehicle.
            dists_origin = np.sqrt(np.sum(pc.points[:3, :]**2, axis=0))
            keep = np.logical_and(min_dist <= dists_origin,
                                  dists_origin <= max_dist)
            pc.points = pc.points[:, keep]
            coloring = coloring[:, keep]
            if verbose:
                print('Distance filter: Keeping %d of %d points...' %
                      (keep.sum(), len(keep)))

            # Second step: transform to the global frame.
            poserecord = nusc.get('ego_pose', lidar_rec['ego_pose_token'])
            pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
            pc.translate(np.array(poserecord['translation']))

            # Write points to file
            for (v, c) in zip(pc.points.transpose(), coloring.transpose()):
                if (c == -1).any():
                    # Ignore points without a color.
                    pass
                else:
                    f.write(
                        "v {v[0]:.8f} {v[1]:.8f} {v[2]:.8f} {c[0]:.4f} {c[1]:.4f} {c[2]:.4f}\n"
                        .format(v=v, c=c / 255.0))

            if not sd_rec['next'] == "":
                sd_rec = nusc.get('sample_data', sd_rec['next'])
Example #28
0
 def normalizeQuat(self, quat_array):
     quat = Quaternion(numpy.array(quat_array))
     return quat.normalised.elements
Example #29
0
def main():
    imu_params = scipy.io.loadmat('./IMUParams.mat')
    acc_scale = imu_params['IMUParams'][0]
    acc_bias = imu_params['IMUParams'][1]
    Parser = argparse.ArgumentParser()

    Parser.add_argument('--imu_data',
                        default=' ',
                        help="The path of data file being used")
    Parser.add_argument('--vicon_data',
                        default=' ',
                        help="The path of vicon data file being used")
    Args = Parser.parse_args()

    imu_data = Args.imu_data
    vicon_data = Args.vicon_data

    data = scipy.io.loadmat(imu_data)
    # print data

    time_stamp = data['ts']
    # print "ts_imu, ", time_stamp[0]

    if vicon_data is not ' ':
        imu_data_vicon = scipy.io.loadmat(vicon_data)
        ts_vicon = imu_data_vicon['ts']
        # print "ts_vicon ", ts_vicon[0][0]-time_stamp[0][0]

    acc = data['vals'][:3, :]
    acc = np.float32(acc)
    gyro = np.float32(data['vals'][-3:, :])

    # print gyro.shape
    # input('a')
    align_num = 0
    align_minus = 160
    acc_all, gyro_all = process_data(acc, acc_scale, acc_bias, gyro)
    q_est = Quaternion(1, 0, 0, 0)
    q_init = Quaternion(1, 0, 0, 0)
    angles = np.zeros((align_num, 3))
    acc_pose = np.array([0, 0, 0])
    gyro_pose = np.array([0, 0, 0])
    mk = madgwick(q=q_init)
    thresh = acc_all.shape[1] - 1
    i = 0

    while i <= thresh:
        acc = np.reshape(acc_all[:, i], (3, 1))
        gyro = np.reshape(gyro_all[:, i], (3, 1))

        q, acc_int, gyro_int = mk.imu_update(acc, gyro, q_est)

        phi, thetha, psy = quat_to_euler(quaternConj(q))
        temp = np.array([phi, thetha, psy])
        angles = np.vstack((angles, temp))

        phi, thetha, psy = quat_to_euler(quaternConj(acc_int))
        temp = np.array([phi, thetha, psy])
        acc_pose = np.vstack((acc_pose, temp))

        phi, thetha, psy = quat_to_euler(quaternConj(gyro_int))
        temp = np.array([phi, thetha, psy])
        gyro_pose = np.vstack((gyro_pose, temp))

        q_est = Quaternion(q[0], q[1], q[2], q[3])
        i += 1

    angles = angles[1:]

    num = 0
    acc_all = acc_all[num:]
    gyro_all = gyro_all[num:]
    if vicon_data is not ' ':
        #PLot vicon data
        rots = imu_data_vicon['rots']
        vicon_data = np.zeros((3, rots.shape[2] + align_minus),
                              dtype=np.float32)
        for ang in range(rots.shape[2]):
            vicon_data[:, ang +
                       align_minus] = (180 / np.pi) * rot2eul(rots[:, :, ang])

        # print(vicon_data.shape)
        t_vicon = np.linspace(1, vicon_data.shape[1], num=vicon_data.shape[1])
    # input('aaaa')
    # print("befpre angle shape = ", angles.shape)

    # app_angles = np.zeros((1,align_num))
    # angles = np.concatenate(app_angles,angles)
    # print("after angle shape = ", angles.shape)
    t = np.linspace(1, int(acc_all.shape[1]), num=int(acc_all.shape[1]))
    t_angles = np.linspace(1, int(angles.shape[0]), num=int(angles.shape[0]))
    fig = plt.figure(1)
    # plt.rcParams['figure.figsize'] = [10,10]
    # if vicon_data is not ' ':
    # 	a1 = plt.subplot(3,4,1)
    # 	plt.plot(t[:thresh],acc_all[0,:thresh],'r-')
    # 	a1.title.set_text('X-accel')

    # 	a2 = plt.subplot(3,4,2)
    # 	plt.plot(t[:thresh],gyro_all[0,:thresh],'r-')
    # 	a2.title.set_text('X-gyro')

    # 	# print t.shape,angles[:,0].shape

    # 	a3 = plt.subplot(3,4,3)
    # 	plt.plot(t_angles,angles[:,0],'r-')
    # 	a3.title.set_text('X-madgwick')

    # 	a10 = plt.subplot(3,4,4)
    # 	plt.plot(t_vicon,vicon_data[0,:],'r-')
    # 	a10.title.set_text('X-Vicon')

    # 	a4 = plt.subplot(3,4,5)
    # 	plt.plot(t[:thresh],acc_all[1,:thresh],'g-')
    # 	a4.title.set_text('Y-accel')

    # 	a5 = plt.subplot(3,4,6)
    # 	plt.plot(t[:thresh],gyro_all[1,:thresh],'g-')
    # 	a5.title.set_text('Y-gyro')

    # 	a6 = plt.subplot(3,4,7)
    # 	plt.plot(t_angles,angles[:,1],'g-')
    # 	a6.title.set_text('Y-madgwick')

    # 	a11 = plt.subplot(3,4,8)
    # 	plt.plot(t_vicon,vicon_data[1,:],'g-')
    # 	a11.title.set_text('Y-Vicon')

    # 	a7 = plt.subplot(3,4,9)
    # 	plt.plot(t[:thresh],acc_all[2,:thresh],'b-')
    # 	a7.title.set_text('Z-accel')

    # 	a8 = plt.subplot(3,4,10)
    # 	plt.plot(t[:thresh],gyro_all[2,:thresh],'b-')
    # 	a8.title.set_text('Z-gyro')

    # 	a9 = plt.subplot(3,4,11)
    # 	plt.plot(t_angles,angles[:,2],'b-')
    # 	a9.title.set_text('Z-madgwick')

    # 	a12 = plt.subplot(3,4,12)
    # 	plt.plot(t_vicon,vicon_data[2,:],'b-')
    # 	a12.title.set_text('Z-Vicon')

    # else:
    # 	a1 = plt.subplot(3,3,1)
    # 	plt.plot(t[:thresh],acc_all[0,:thresh],'r-')
    # 	a1.title.set_text('X-accel')

    # 	a2 = plt.subplot(3,3,2)
    # 	plt.plot(t[:thresh],gyro_all[0,:thresh],'r-')
    # 	a2.title.set_text('X-gyro')

    # 	# print t.shape,angles[:,0].shape

    # 	a3 = plt.subplot(3,3,3)
    # 	plt.plot(t_angles,angles[:,0],'r-')
    # 	a3.title.set_text('X-madgwick')

    # 	a4 = plt.subplot(3,3,4)
    # 	plt.plot(t[:thresh],acc_all[1,:thresh],'g-')
    # 	a4.title.set_text('Y-accel')

    # 	a5 = plt.subplot(3,3,5)
    # 	plt.plot(t[:thresh],gyro_all[1,:thresh],'g-')
    # 	a5.title.set_text('Y-gyro')

    # 	# print t.shape,angles[:,0].shape

    # 	a6 = plt.subplot(3,3,6)
    # 	plt.plot(t_angles,angles[:,1],'g-')
    # 	a6.title.set_text('Y-madgwick')

    # 	a7 = plt.subplot(3,3,7)
    # 	plt.plot(t[:thresh],acc_all[2,:thresh],'b-')
    # 	a7.title.set_text('Z-accel')

    # 	a8 = plt.subplot(3,3,8)
    # 	plt.plot(t[:thresh],gyro_all[2,:thresh],'b-')
    # 	a8.title.set_text('Z-gyro')

    # 	# print t.shape,angles[:,0].shape

    # 	a9 = plt.subplot(3,3,9)
    # 	plt.plot(t_angles,angles[:,2],'b-')
    # 	a9.title.set_text('Z-madgwick')

    a1 = plt.subplot(3, 1, 1)
    line1, = a1.plot(t_angles, angles[:, 0], 'r-')
    line1.set_label('Madgwick')
    if vicon_data is not ' ':
        line2, = a1.plot(t_vicon, vicon_data[0, :], '-b')
        line2.set_label('Vicon')
        a1.title.set_text('X-data')
    a1.legend()

    a2 = plt.subplot(3, 1, 2)
    line3, = a2.plot(t_angles, angles[:, 1], 'r-')
    line3.set_label('Madgwick')
    if vicon_data is not ' ':
        line4, = a2.plot(t_vicon, vicon_data[1, :], '-b')
        line4.set_label('Vicon')
        a2.title.set_text('Y-data')
    a2.legend()

    a3 = plt.subplot(3, 1, 3)
    line5, = a3.plot(t_angles, angles[:, 2], 'r-')
    line5.set_label('Madgwick')
    if vicon_data is not ' ':
        line6, = a3.plot(t_vicon, vicon_data[2, :], '-b')
        line6.set_label('Vicon')
        a3.title.set_text('Z-data')
    a3.legend()
    # print(len(angles))

    plt.show()
Example #30
0
 def test_init_random(self):
     r1 = Quaternion.random()
     r2 = Quaternion.random()
     self.assertAlmostEqual(r1.norm, 1.0, ALMOST_EQUAL_TOLERANCE)
     self.assertIsInstance(r1, Quaternion)