예제 #1
0
    def from_file_multisweep(
            cls,
            nusc,
            sample_rec: str,
            chan: str,
            ref_chan: str,
            nsweeps: int = 26,
            min_distance: float = 1.0) -> Tuple[PointCloud, np.ndarray]:
        """
        Return a point cloud that aggregates multiple sweeps.
        As every sweep is in a different coordinate frame, we need to map the coordinates to a single reference frame.
        As every sweep has a different timestamp, we need to account for that in the transformations and timestamps.
        :param nusc: A NuScenes instance.
        :param sample_rec: The current sample.
        :param chan: The radar channel from which we track back n sweeps to aggregate the point cloud.
        :param ref_chan: The reference channel of the current sample_rec that the point clouds are mapped to.
        :param nsweeps: Number of sweeps to aggregated.
        :param min_distance: Distance below which points are discarded.
        :return: (all_pc, all_times). The aggregated point cloud and timestamps.
        """

        # Init
        points = np.zeros((cls.nbr_dims(), 0))
        all_pc = cls(points)
        all_times = np.zeros((1, 0))

        # Get reference pose and timestamp
        ref_sd_token = sample_rec['data'][ref_chan]
        ref_sd_rec = nusc.get('sample_data', ref_sd_token)
        ref_pose_rec = nusc.get('ego_pose', ref_sd_rec['ego_pose_token'])
        ref_cs_rec = nusc.get('calibrated_sensor',
                              ref_sd_rec['calibrated_sensor_token'])
        ref_time = 1e-6 * ref_sd_rec['timestamp']

        # Homogeneous transform from ego car frame to reference frame
        ref_from_car = transform_matrix(ref_cs_rec['translation'],
                                        Quaternion(ref_cs_rec['rotation']),
                                        inverse=True)

        # Homogeneous transformation matrix from global to _current_ ego car frame
        car_from_global = transform_matrix(ref_pose_rec['translation'],
                                           Quaternion(
                                               ref_pose_rec['rotation']),
                                           inverse=True)

        # Aggregate current and previous sweeps.
        sample_data_token = sample_rec['data'][chan]
        current_sd_rec = nusc.get('sample_data', sample_data_token)
        for _ in range(nsweeps):
            # Load up the pointcloud.
            current_pc = cls.from_file(
                osp.join(nusc.dataroot, current_sd_rec['filename']))

            # Get past pose.
            current_pose_rec = nusc.get('ego_pose',
                                        current_sd_rec['ego_pose_token'])
            global_from_car = transform_matrix(
                current_pose_rec['translation'],
                Quaternion(current_pose_rec['rotation']),
                inverse=False)

            # Homogeneous transformation matrix from sensor coordinate frame to ego car frame.
            current_cs_rec = nusc.get(
                'calibrated_sensor', current_sd_rec['calibrated_sensor_token'])
            car_from_current = transform_matrix(
                current_cs_rec['translation'],
                Quaternion(current_cs_rec['rotation']),
                inverse=False)

            # Fuse four transformation matrices into one and perform transform.
            trans_matrix = reduce(np.dot, [
                ref_from_car, car_from_global, global_from_car,
                car_from_current
            ])
            current_pc.transform(trans_matrix)

            # Remove close points and add timevector.
            current_pc.remove_close(min_distance)
            time_lag = ref_time - 1e-6 * current_sd_rec[
                'timestamp']  # positive difference
            times = time_lag * np.ones((1, current_pc.nbr_points()))
            all_times = np.hstack((all_times, times))

            # Merge with key pc.
            all_pc.points = np.hstack((all_pc.points, current_pc.points))

            # Abort if there are no previous sweeps.
            if current_sd_rec['prev'] == '':
                break
            else:
                current_sd_rec = nusc.get('sample_data',
                                          current_sd_rec['prev'])

        return all_pc, all_times
예제 #2
0
 def test_init_default(self):
     q = Quaternion()
     self.assertIsInstance(q, Quaternion)
     self.assertEqual(q, Quaternion(1., 0., 0., 0.))
예제 #3
0
 def test_output_of_elements(self):
     r = randomElements()
     q = Quaternion(*r)
     self.assertEqual(tuple(q.elements), r)
예제 #4
0
 def test_float(self):
     a, b, c, d = randomElements()
     q = Quaternion(a, b, c, d)
     self.assertEqual(float(q), a)
예제 #5
0
    def test_equality(self):
        r = randomElements()
        self.assertEqual(Quaternion(*r), Quaternion(*r))
        q = Quaternion(*r)
        self.assertEqual(q, q)
        # Equality should work with other types, if they can be interpreted as quaternions
        self.assertEqual(q, r)
        self.assertEqual(Quaternion(1., 0., 0., 0.), 1.0)
        self.assertEqual(Quaternion(1., 0., 0., 0.), "1.0")
        self.assertNotEqual(q, q + Quaternion(0.0, 0.002, 0.0, 0.0))

        # Equality should also cover small rounding and floating point errors
        self.assertEqual(Quaternion(1., 0., 0., 0.), Quaternion(1.0 - 1e-14, 0., 0., 0.))
        self.assertNotEqual(Quaternion(1., 0., 0., 0.), Quaternion(1.0 - 1e-12, 0., 0., 0.))
        self.assertNotEqual(Quaternion(160., 0., 0., 0.), Quaternion(160.0 - 1e-10, 0., 0., 0.))
        self.assertNotEqual(Quaternion(1600., 0., 0., 0.), Quaternion(1600.0 - 1e-9, 0., 0., 0.))

        with self.assertRaises(TypeError):
            q == None
        with self.assertRaises(ValueError):
            q == 's'
예제 #6
0
    def test_init_from_explicit_rotation_params(self):
        vx = random()
        vy = random()
        vz = random()
        theta = random() * 2.0 * pi

        v1 = (vx, vy, vz) # tuple format
        v2 = [vx, vy, vz] # list format
        v3 = np.array(v2) # array format

        q1 = Quaternion(axis=v1, angle=theta)
        q2 = Quaternion(axis=v2, radians=theta)
        q3 = Quaternion(axis=v3, degrees=theta / pi * 180)

        # normalise v to a unit vector
        v3 = v3 / np.linalg.norm(v3)

        q4 = Quaternion(angle=theta, axis=v3)

        # Construct the true quaternion
        t = theta / 2.0

        a = cos(t)
        b = v3[0] * sin(t)
        c = v3[1] * sin(t)
        d = v3[2] * sin(t)

        truth = Quaternion(a, b, c, d)

        self.assertEqual(q1, truth)
        self.assertEqual(q2, truth)
        self.assertEqual(q3, truth)
        self.assertEqual(q4, truth)
        self.assertEqual(Quaternion(axis=v3, angle=0), Quaternion())
        self.assertEqual(Quaternion(axis=v3, radians=0), Quaternion())
        self.assertEqual(Quaternion(axis=v3, degrees=0), Quaternion())
        self.assertEqual(Quaternion(axis=v3), Quaternion())

        # Result should be a versor (Unit Quaternion)
        self.assertAlmostEqual(q1.norm, 1.0, ALMOST_EQUAL_TOLERANCE)
        self.assertAlmostEqual(q2.norm, 1.0, ALMOST_EQUAL_TOLERANCE)
        self.assertAlmostEqual(q3.norm, 1.0, ALMOST_EQUAL_TOLERANCE)
        self.assertAlmostEqual(q4.norm, 1.0, ALMOST_EQUAL_TOLERANCE)


        with self.assertRaises(ValueError):
            q = Quaternion(angle=theta)
        with self.assertRaises(ValueError):
            q = Quaternion(axis=[b, c], angle=theta)
        with self.assertRaises(ValueError):
            q = Quaternion(axis=(b, c, d, d), angle=theta)
        with self.assertRaises(ZeroDivisionError):
            q = Quaternion(axis=[0., 0., 0.], angle=theta)
예제 #7
0
 def test_repr(self):
     a, b, c, d = np.array(randomElements()) # Numpy seems to increase precision of floats (C magic?)
     q = Quaternion(a, b, c, d)
     string = "Quaternion(" + repr(a) + ", " + repr(b) + ", " + repr(c) + ", " + repr(d) + ")"
     self.assertEqual(string, repr(q))
예제 #8
0
def QP_abbirb6640(q, v, check_flag=False, collision_checker=None):
    # Init the joystick

    # Initialize Robot Parameters
    ex, ey, ez, n, P, q_ver, H, ttype, dq_bounds = robotParams()
    # joint limits
    lower_limit = np.transpose(
        np.array([
            -360 * np.pi / 180, -360 * np.pi / 180, -2 * np.pi,
            -300 * np.pi / 180, -180 * np.pi / 180, -2 * np.pi
        ]))
    upper_limit = np.transpose(
        np.array([
            360 * np.pi / 180, 360 * np.pi / 180, 360 * np.pi / 180,
            300 * np.pi / 180, 180 * np.pi / 180, 2 * np.pi
        ]))

    # Initialize Control Parameters
    # initial joint angles

    pos_v = np.zeros((3, 1))
    ang_v = np.array([1, 0, 0, 0])
    dq = np.zeros((int(n), 1))

    # inequality constraints
    # we use h to calculate the sigma, so h should also be a vector of (13*1)
    # h consists of 6 upper bound, 6 lower bound and 1 distance constraints
    h = np.zeros((17, 1))
    sigma = np.zeros((17, 1))
    dhdq = np.vstack((np.hstack((np.eye(6), np.zeros((6, 1)), np.zeros(
        (6, 1)))), np.hstack((-np.eye(6), np.zeros((6, 1)), np.zeros(
            (6, 1)))), np.zeros((5, 8))))

    # velocities
    w_t = np.zeros((3, 1))
    v_t = np.zeros((3, 1))

    # keyboard controls
    # define position and angle step
    inc_pos_v = 0.01  # m/s
    inc_ang_v = 0.5 * np.pi / 180  # rad/s

    # optimization params
    er = 0.05
    ep = 0.05
    epsilon = 0  # legacy param for newton iters

    # parameters for inequality constraints
    c = 0.5
    eta = 0.1
    epsilon_in = 0.15
    E = 0.005

    pp, RR = fwdkin_alljoints(q, ttype, H, P, n)
    orien_tmp = Quaternion(matrix=RR[:, :, -1])
    orien_tmp = np.array(
        [orien_tmp[0], orien_tmp[1], orien_tmp[2],
         orien_tmp[3]]).reshape(1, 4)

    # create a handle of these parameters for interactive modifications
    obj = ControlParams(ex, ey, ez, n, P, H, ttype,
                        dq_bounds, q, dq, pp[:, -1], orien_tmp, pos_v,
                        ang_v.reshape(1, 4), w_t, v_t, epsilon, inc_pos_v,
                        inc_ang_v, 0, er, ep, 0)

    J_eef = getJacobian(obj.params['controls']['q'],
                        obj.params['defi']['ttype'], obj.params['defi']['H'],
                        obj.params['defi']['P'], obj.params['defi']['n'])

    # desired rotational velocity
    vr = v[0:3]

    # desired linear velocity
    vp = v[3:6]

    Q = getqp_H_new(J_eef, vr.reshape(3, 1), vp.reshape(3, 1),
                    obj.params['opt']['er'], obj.params['opt']['ep'])

    # make sure Q is symmetric
    Q = 0.5 * (Q + Q.T)

    f = getqp_f_new(obj.params['opt']['er'], obj.params['opt']['ep'])
    f = f.reshape((8, ))

    # bounds for qp
    if obj.params['opt']['upper_dq_bounds']:
        bound = obj.params['defi']['dq_bounds'][1, :]
    else:
        bound = obj.params['defi']['dq_bounds'][0, :]

    LB = np.vstack((-0.1 * bound.reshape(6, 1), 0, 0))
    UB = np.vstack((0.1 * bound.reshape(6, 1), 1, 1))

    # inequality constrains A and b
    h[0:6] = obj.params['controls']['q'] - lower_limit.reshape(6, 1)
    h[6:12] = upper_limit.reshape(6, 1) - obj.params['controls']['q']
    sigma[0:12] = inequality_bound(h[0:12], c, eta, epsilon_in, E)

    # collision constraint
    if check_flag:
        # (Closest_Pt, Closest_Pt_env) = collision_checker.collision_report()
        closest_Pts = collision_checker.collision_report()
        i = 12
        for key, value in closest_Pts.items():
            Closest_Pt, Closest_Pt_env = value
            dist = numpy.linalg.norm(Closest_Pt_env - Closest_Pt)

            ## TODO: check several points instead of one point
            dx = Closest_Pt_env[0] - Closest_Pt[0]
            dy = Closest_Pt_env[1] - Closest_Pt[1]
            dz = Closest_Pt_env[2] - Closest_Pt[2]
            # dz = 0
            """ """
            dist = np.sqrt(dx**2 + dy**2 + dz**2)
            # assert dist == dist1
            # derivative of dist w.r.t time
            der = np.array([dx / dist, dy / dist, dz / dist])
            h[i] = dist
            dhdq[i, 0:6] = np.dot(-der[None, :], J_eef[3:6, :])
            # TODO: here I changed the equation of the sigma
            eta = 0.03
            sigma[i] = inequality_bound(h[i], c, eta, epsilon_in, E)
            # sigma[i] =  h[i]
            i += 1
            # break

    A = np.vstack((dhdq, np.eye(8), -np.eye(8)))
    b = np.vstack((sigma, LB, -UB))

    # solve the quadprog problem
    try:
        dq_sln = quadprog.solve_qp(Q, -f, A.T, b.reshape((33, )))[0]
    except:
        return [0, 0, 0, 0, 0, 0]

    if len(dq_sln) < obj.params['defi']['n']:
        obj.params['controls']['dq'] = np.zeros((6, 1))
        V_scaled = 0
        print 'No Solution'
        dq_sln = np.zeros((int(n), 1))
    else:
        obj.params['controls']['dq'] = dq_sln[0:int(obj.params['defi']['n'])]
        obj.params['controls']['dq'] = obj.params['controls']['dq'].reshape(
            (6, 1))
        #print dq_sln
    #        V_scaled = dq_sln[-1]*V_desired
    #        vr_scaled = dq_sln[-2]*vr.reshape(3,1)

    #print np.dot(np.linalg.pinv(J_eef),v)

    # get the linear velocity and angular velocity
    V_linear = np.dot(J_eef[3:6, :], obj.params['controls']['dq'])
    V_rot = np.dot(J_eef[0:3, :], obj.params['controls']['dq'])

    return dq_sln[0:6]
예제 #9
0
def main():

    # OpenRAVE_obj = OpenRAVEObject()

    # Initialize Robot Parameters
    ex, ey, ez, n, P, q_ver, H, ttype, dq_bounds = robotParams()

    # Initialize Control Parameters
    # initial joint angles
    """ """
    #    Q_all = np.array([[-1.5832209587097168, 0.35797879099845886, -0.018985196948051453, 3.492117684800178e-05, 1.2321044206619263, -0.012819867581129074],
    #    [-1.0001745223999023, 0.3400745689868927, 0.27556654810905457, -0.01793256774544716, 0.8835334777832031, -0.06672105938196182],
    #    [-0.4340008497238159, 0.33434557914733887, 0.2755616009235382, -0.01793256774544716, 0.8835381269454956, -0.06672105938196182],
    #    [-0.08396485447883606, 0.33433452248573303, 0.2755637466907501, -0.017921535298228264, 0.8835418224334717, -0.06673289835453033],
    #    [0.027317576110363007, 0.7219697833061218, -0.3032461106777191, -0.025421472266316414, 1.0768276453018188, 0.044921014457941055],
    #    [0.5158949494361877, 1.2091666460037231, -0.9566897749900818, -0.04086354747414589, 1.269188404083252, 0.5332122445106506]])
    #    Q_all = np.array([[-1.4735171794891357, 0.43576669692993164, 0.025293484330177307, 0.012627056799829006, 1.0394766330718994, 0.0606585256755352],
    #    [-1.040715515613556, 0.43576669692993164, 0.025293484330177307, 0.012627056799829006, 1.0394766330718994, 0.0606585256755352],
    #    [-0.6079138517379761, 0.4357639253139496, 0.025260837748646736, 0.030262663960456848, 1.039481282234192, 0.060406409204006195],
    #    [-0.607966959476471, 0.5187827348709106, 0.12658417224884033, 0.03462858498096466, 0.8553994297981262, -0.6607409119606018],
    #    [-0.4619627594947815, 1.0956398248672485, -0.7398647665977478, 0.01724303886294365, 1.1420093774795532, -0.49965518712997437]])

    Q_all = np.array([[
        -1.5832209587097168, 0.35797879099845886, -0.018985196948051453,
        3.492117684800178e-05, 1.2321044206619263, -0.012819867581129074
    ],
                      [
                          -1.040715515613556, 0.35797879099845886,
                          -0.018985196948051453, 3.492117684800178e-05,
                          1.2321044206619263, -0.012819867581129074
                      ],
                      [
                          -0.6079138517379761, 0.35797879099845886,
                          -0.018985196948051453, 3.492117684800178e-05,
                          1.2321044206619263, -0.012819867581129074
                      ],
                      [
                          -0.607966959476471, 0.5187827348709106,
                          0.12658417224884033, 0.03462858498096466,
                          0.8553994297981262, -0.6607409119606018
                      ],
                      [
                          -0.4619627594947815, 1.0956398248672485,
                          -0.7398647665977478, 0.01724303886294365,
                          1.1420093774795532, -0.49965518712997437
                      ]])
    test_q = np.array([4.8046852, 2.92482, 1.002, 4.2031852, 1.4458, 1.3233])
    test_q = np.array([3.464029, 2.65480, 0.514936, 3.10589, 1.3006, 2.48233])
    test_q = np.array([
        0.0019249955138216472, 3.1432288997635376, 3.139667720753884,
        3.1451626210310444, 3.1404027532147456, 0.004759989010934167
    ])
    test_q = np.array([0., pi, pi, pi, pi, 0])

    # calibrate 1:
    # real x: -0.00213531428017 y: 0.0643087550998 z: 1.18114089966
    # predict x: -1.71608523e-16 y: 6.43000000e-02 z: 1.18114496e+00
    # test_q = np.array([0., pi, pi, 0, 0, 0])

    # calibrate 2:
    # real: x: 0.498562157154 y: 0.0647798776627 z: 0.679222285748
    # our predict: x: 0.49564496 y: 0.0643 z: 0.6855
    # test_q = np.array([0., pi, 1.5*pi, 0, 0, 0])

    # calibrate 3:
    # real x: 0.398912936449 y: -0.305943399668 z: 0.677314043045
    # our predict x: 0.39594088 y: -0.30500695 z: 0.6855
    # test_q = np.array([pi/4., pi, 1.5*pi, 0, 0, 0])

    # calibrate 4:
    # real: x: 0.349278271198 y: -0.256594508886 z: 0.431439548731
    # our predict: x: 0.34898628 y: -0.25805235 z: 0.43767752
    # test_q = np.array([pi/4., pi, pi*5/3, 0, 0, 0]) # 45, 180, 300, 0, 0, 0

    # calibrate 5:
    # real: x: 0.382904559374 y: -0.165705829859 z: 0.657457232475
    # predict: x: 0.38003075 y: -0.16491893 z:0.66580688
    test_q = np.array([pi / 4., pi, pi * 5 / 3, 0, pi / 2,
                       0])  # 45, 180, 300, 0, 90, 0

    # calibrate 6:
    # real x: 0.50574274 y: -0.27025385 z: 0.85814328
    # predict: match
    test_q = np.array([
        0.5149363115205982, 2.7840728646265798, 4.284077575957208,
        -0.5259787760854754, 0.860368012295942, 0.1713596023130354
    ])

    # current bullet environment joint angle
    test_q = np.array([
        2.08477017189754, 2.94851560508792, 0.9403603166942158,
        4.0745507140341335, 0.7758782280277694, 8.224071010650652 - 2 * pi
    ])
    test_q = np.array([
        2.9654557064658054, 2.9599693283041333, 0.5794236254734035,
        3.9531707557671565, 1.8754357233632242, 1.953975539702969
    ])

    test_q = test_q.reshape(6, 1)
    # q = Q_all[0,:].reshape(6, 1)
    q = test_q
    Checker = CollisionCheck()
    Checker.simulation2()

    # without checker: [-0.17453293 -0.005       0.07432923 -0.03735876 -0.18331453  0.03045566]
    # with collision checker: [-0.0049198  -0.1234     -0.00917626  0.02173149 -0.0389669  -0.09331467]

    result = QP_abbirb6640(q, np.array([0, 0, 0, 0, 0.3, 0]), True, Checker)
    print result

    R, pos = fwdkin(test_q, ttype, H, P, n)
    print(pos)
    R, pos = fwdkin(test_q + np.array(result), ttype, H, P, n)
    print(pos)

    # sys.exit(1)

    orien = Quaternion(matrix=R)
    orien = np.array([orien[0], orien[1], orien[2], orien[3]]).reshape(1, 4)

    pos_v = np.zeros((3, 1))
    ang_v = np.array([1, 0, 0, 0])
    dq = np.zeros((int(n), 1))

    # joint limits
    lower_limit = np.transpose(
        np.array([
            -170 * np.pi / 180, -65 * np.pi / 180, -np.pi, -300 * np.pi / 180,
            -120 * np.pi / 180, -2 * np.pi
        ]))
    upper_limit = np.transpose(
        np.array([
            170 * np.pi / 180, 85 * np.pi / 180, 70 * np.pi / 180,
            300 * np.pi / 180, 120 * np.pi / 180, 2 * np.pi
        ]))

    # inequality constraints
    h = np.zeros((15, 1))
    sigma = np.zeros((13, 1))
    dhdq = np.vstack((np.hstack((np.eye(6), np.zeros((6, 1)), np.zeros(
        (6, 1)))), np.hstack((-np.eye(6), np.zeros((6, 1)), np.zeros(
            (6, 1)))), np.zeros((1, 8))))

    # velocities
    w_t = np.zeros((3, 1))
    v_t = np.zeros((3, 1))

    # keyboard controls
    # define position and angle step
    inc_pos_v = 0.01  # m/s
    inc_ang_v = 0.5 * np.pi / 180  # rad/s

    # optimization params
    er = 0.05
    ep = 0.05
    epsilon = 0  # legacy param for newton iters

    # parameters for inequality constraints
    c = 0.5
    eta = 0.1
    epsilon_in = 0.15
    E = 0.005

    Ke = 1

    # create a handle of these parameters for interactive modifications
    obj = ControlParams(ex, ey, ez, n, P, H,
                        ttype, dq_bounds, q, dq, pos, orien, pos_v,
                        ang_v.reshape(1, 4), w_t, v_t, epsilon, inc_pos_v,
                        inc_ang_v, 0, er, ep, 0)

    dt = 0
    counter = 0

    # dL = 1.0/200.0
    dL = 1.0 / 5.0
    Lambda = np.linspace(0, 1, 1.0 / dL + 1)  #np.linspace(0,1,1001)

    pos_b = pos
    R_b = R
    q_b = q
    eu_b = np.array(euler_from_matrix(R_b))

    Joint_all = []

    Checker = CollisionCheck()
    Checker.simulation2()

    # Q_all: is the path for the arm
    # for iq in range(Q_all.shape[0]-1):
    # here we just define two point, start point and end point:
    my_R, start_pos = fwdkin(test_q, ttype, H, P, n)
    cur_pos = start_pos
    end_pos = start_pos + np.array([0, 0.2, -0.]).reshape(
        (3, 1))  # move 10cm in the y direction
    # for iq in range(Q_all.shape[0]-1):
    index = 0
    if 1:  # cur_pos != end_pos:
        orien = Quaternion(matrix=my_R)
        iq = Checker.calculateInverseKinematics(end_pos, orien).reshape(6, 1)
        iq_copy = iq
        # Checker.update_joint(iq)

        q_a = q_b
        R_a = R_b
        pos_a = pos_b
        eu_a = eu_b
        #        q_a = obj.params['controls']['q']
        #        R_a, pos_a = fwdkin(q_a,ttype,H,P,n)
        #        eu_a = np.array(euler_from_matrix(R_a))

        q_b = iq  # Q_all[iq+1,:].reshape(6, 1)

        ## TODO: can not get correct pos_b from fwdkin(q_b). The q_b is from bullet, this function is our own function

        R_b, pos_b = fwdkin(iq, ttype, H, P, n)
        eu_b = np.array(euler_from_matrix(R_b))

        #stop, Closest_Pt, Closest_Pt_env = OpenRAVE_obj.CollisionReport(obj.params['controls']['q'][0],obj.params['controls']['q'][1],obj.params['controls']['q'][2],obj.params['controls']['q'][3],obj.params['controls']['q'][4],obj.params['controls']['q'][5])
        #raw_input('pause')
        for ld in Lambda:

            eu = eu_a * (1 - ld) + eu_b * ld
            R_des = euler_matrix(eu[0], eu[1], eu[2])
            R_des = R_des[0:3, 0:3]
            #print R_des[0:3,0:3],x_des, eu

            x_des = pos_a * (1 - ld) + pos_b * ld
            x_des = np.array([x_des[0][0], x_des[1][0], x_des[2][0]])
            if x_des[1] > 0.5:
                print

            obj.params['controls']['q'] = obj.params['controls'][
                'q'] + obj.params['controls']['dq']  #*dt*0.1

            Joint_all.append(np.transpose(obj.params['controls']['q'])[0])
            #print np.transpose(obj.params['controls']['q'])[0]

            pp, RR = fwdkin_alljoints(obj.params['controls']['q'], ttype,
                                      obj.params['defi']['H'],
                                      obj.params['defi']['P'],
                                      obj.params['defi']['n'])

            # parameters for qp
            obj.params['controls']['pos'] = pp[:, -1]

            orien_tmp = Quaternion(matrix=RR[:, :, -1])
            obj.params['controls']['orien'] = np.array(
                [orien_tmp[0], orien_tmp[1], orien_tmp[2],
                 orien_tmp[3]]).reshape(1, 4)

            (Closest_Pt, Closest_Pt_env) = Checker.collision_report()
            # stop, Closest_Pt, Closest_Pt_env = OpenRAVE_obj.CollisionReport(obj.params['controls']['q'][0],obj.params['controls']['q'][1],obj.params['controls']['q'][2],obj.params['controls']['q'][3],obj.params['controls']['q'][4],obj.params['controls']['q'][5])

            # check self-collision
            # if (stop):
            #     print 'robot is about to self-collide.'
            #     #obj.params['controls']['pos_v'] = np.array([0,0,0]).reshape(3, 1)
            #     #obj.params['controls']['ang_v'] = np.array([1,0,0,0]).reshape(1, 4)

            J2C_Joint = Joint2Collision(Closest_Pt, pp)

            J_eef = getJacobian(obj.params['controls']['q'],
                                obj.params['defi']['ttype'],
                                obj.params['defi']['H'],
                                obj.params['defi']['P'],
                                obj.params['defi']['n'])

            v_tmp = Closest_Pt - obj.params['controls']['pos']

            v_tmp2 = (pp[:, -1] - pp[:, -3])
            p_norm2 = norm(v_tmp2)
            v_tmp2 = v_tmp2 / p_norm2

            # determine if the closest point is on the panel
            #print norm(v_tmp),np.arccos(np.inner(v_tmp, v_tmp2)/norm(v_tmp))*180/np.pi

            if (norm(v_tmp) < 2.5
                    and np.arccos(np.inner(v_tmp, v_tmp2) / norm(v_tmp)) *
                    180 / np.pi < 70):
                # print '---the closest point is on the panel---'
                J2C_Joint = 6
                J = getJacobian3(obj.params['controls']['q'],
                                 obj.params['defi']['ttype'],
                                 obj.params['defi']['H'],
                                 obj.params['defi']['P'],
                                 obj.params['defi']['n'], Closest_Pt,
                                 J2C_Joint)
                #J,p_0_tmp = getJacobian2(obj.params['controls']['q'], obj.params['defi']['ttype'], obj.params['defi']['H'], obj.params['defi']['P'], obj.params['defi']['n'],Closest_Pt,J2C_Joint)

            #if (J2C_Joint < 4):
            else:
                J, p_0_tmp = getJacobian2(obj.params['controls']['q'],
                                          obj.params['defi']['ttype'],
                                          obj.params['defi']['H'],
                                          obj.params['defi']['P'],
                                          obj.params['defi']['n'], Closest_Pt,
                                          J2C_Joint)

            #else:
            #   J = getJacobian3(obj.params['controls']['q'], obj.params['defi']['ttype'], obj.params['defi']['H'], obj.params['defi']['P'], obj.params['defi']['n'], Closest_Pt,J2C_Joint)

            # update joint velocities
            #axang = quat2axang(obj.params['controls']['ang_v'])

            # desired rotational velocity
            w_skew = logm(np.dot(RR[:, :, -1], R_des.T))
            w = np.array([w_skew[2, 1], w_skew[0, 2], w_skew[1, 0]])
            vr = -Ke * w
            obj.params['controls']['ang_v'] = vr

            # desired linear velocity
            V_desired = np.reshape(Ke * (x_des - pp[:, -1]), [3, 1])
            obj.params['controls']['pos_v'] = V_desired

            #print x_des
            #print pp[:,-1]
            #print (x_des-np.reshape(pp[:,-1],[3,1]))
            Q = getqp_H(obj.params['controls']['dq'], J_eef, vr.reshape(3, 1),
                        obj.params['controls']['pos_v'],
                        obj.params['opt']['er'], obj.params['opt']['ep'])

            # make sure Q is symmetric
            Q = 0.5 * (Q + Q.T)

            f = getqp_f(obj.params['controls']['dq'], obj.params['opt']['er'],
                        obj.params['opt']['ep'])
            f = f.reshape((8, ))

            # bounds for qp
            if obj.params['opt']['upper_dq_bounds']:
                bound = obj.params['defi']['dq_bounds'][1, :]
            else:
                bound = obj.params['defi']['dq_bounds'][0, :]

            LB = np.vstack((-0.1 * bound.reshape(6, 1), 0, 0))
            UB = np.vstack((0.1 * bound.reshape(6, 1), 1, 1))
            # LB = matrix(LB, tc = 'd')
            # UB = matrix(UB, tc = 'd')

            # inequality constrains A and b
            h[0:6] = obj.params['controls']['q'] - lower_limit.reshape(6, 1)
            h[6:12] = upper_limit.reshape(6, 1) - obj.params['controls']['q']

            dx = Closest_Pt_env[0] - Closest_Pt[0]
            dy = Closest_Pt_env[1] - Closest_Pt[1]
            dz = Closest_Pt_env[2] - Closest_Pt[2]
            """ """
            dist = np.sqrt(dx**2 + dy**2 + dz**2)

            # derivative of dist w.r.t time
            der = np.array([dx / dist, dy / dist, dz / dist])
            """ """
            h[12] = dist - 0.1
            """ """ """ """
            #dhdq[12, 0:6] = np.dot(-der.reshape(1, 3), J_eef2[3:6,:])
            dhdq[12, 0:6] = np.dot(-der[None, :], J[3:6, :])

            sigma[0:12] = inequality_bound(h[0:12], c, eta, epsilon_in, E)
            sigma[12] = inequality_bound(h[12], c, eta, epsilon_in, E)

            A = dhdq
            b = sigma

            A = np.vstack((A, np.eye(8), -np.eye(8)))
            b = np.vstack((b, LB, -UB))
            b = b.reshape((29, ))

            # solve the quadprog problem
            #if not is_pos_def(Q):
            #    print np.linalg.eigvals(Q)
            #    dq_sln = np.zeros((6,1))
            #    raw_input('pause')
            #
            #else:
            dq_sln = quadprog.solve_qp(Q, -f, A.T, b)[0]

            if len(dq_sln) < obj.params['defi']['n']:
                obj.params['controls']['dq'] = np.zeros((6, 1))
                V_scaled = 0
                print 'No Solution'
            else:
                obj.params['controls']['dq'] = dq_sln[
                    0:int(obj.params['defi']['n'])]
                obj.params['controls']['dq'] = obj.params['controls'][
                    'dq'].reshape((6, 1))
                V_scaled = dq_sln[-1] * V_desired
                vr_scaled = dq_sln[-2] * vr.reshape(3, 1)

            V_linear = np.dot(J_eef[3:6, :], obj.params['controls']['dq'])
            V_rot = np.dot(J_eef[0:3, :], obj.params['controls']['dq'])

            iq = Checker.calculateInverseKinematics(x_des, orien).reshape(6, 1)
            # Checker.update_joint(obj.params['controls']['q'])
            # print obj.params["controls"]['dq']
            Checker.update_joint(iq)
            # print x_des

            # bullet approach to calculate the jacobian:
            (jac_t, jac_r) = Checker.calculateJacobian(iq_copy)
            print Checker.multiplyJacobian2(obj.params["controls"]['dq'])

            dq = np.concatenate((iq - iq_copy, np.array([[0], [0], [0]])))
            result = dq.reshape(9) * jac_t
            # print '------Scaled desired linear velocity------'
            # # print V_scaled
            #
            # print '------Real linear velocity by solving quadratic programming------'
            # # print V_linear
            # print '------Position--------'
            # print x_des
            # print end_pos
            if index % 100 == 0:
                print
            index += 1
            time.sleep(0.01)

            # print '------Scaled desired angular velocity------'
            # print vr_scaled
            #
            # print '------Real angular velocity by solving quadratic programming------'
            # print V_rot
    raw_input("pause")
    np.savetxt('Joint_all.out', Joint_all, delimiter=',')
    def sampled_kvectors_spherical_coordinates(self, NA_in, NA_out, NumSample,
                                               kd):
        #sample multiple planewaves at different angle to do simulation as a focused beam
        # return a list of planewave direction vectors Kd
        # and the corresponding scale factor if using uniform sampling
        # NA: numberical aperture of the lens which planewaves are focused from
        # NumSample: number of samples(planewaves)
        # kd: center planewave of the focused beam

        #allocatgge space for the field and initialize it to zero
        start3 = time.time()
        CenterKd = self.k  #defualt planewave coming in perpendicular to the surface
        kd = kd / np.linalg.norm(kd)  #normalize the new planewave
        r = np.sqrt(
            CenterKd[0]**2 + CenterKd[1]**2 + CenterKd[2]**2
        )  #radiance of the hemisphere where the k vectors are sampled from

        if (
                kd[0] == CenterKd[0] and kd[1] == CenterKd[1]
                and kd[2] == CenterKd[2]
        ):  #if new planewave is at the same direction as the default plane wave
            rotateAxis = CenterKd  #set rotation axis as defualt k vector
            RoAngle = 0  #set rotation axis as 0 degrees
        else:  #if new plane wave is at different direction as the defualt planewave, rotation is needed
            rotateAxis = np.cross(
                CenterKd, kd
            )  #find a axis which is perpendicular to both vectors to be rotation axis
            RoAngle = math.asin(kd[2] / r)  #calculate the rotation angle
        beamRotate = Quaternion(
            axis=rotateAxis, angle=RoAngle)  #create a quaternion for rotation

        Kd = np.zeros((3, NumSample))  #initialize the planewave list
        #        scaleFactor = np.zeros(NumSample)                          #initialize a list of scalefactors which are used to scale down the amplitude of planewaves later on along latitude domain

        #convert the axis from Cartesian to Spherical
        pha = math.acos(CenterKd[2] /
                        r)  #calculate polar angle pha from Z coordinate

        phaM = math.asin(NA_out / np.real(
            self.n))  #calculate sample range of pha from numerical aperture

        inZ = np.cos(pha)  #lower boundary of sampling along Z axis
        outZ = np.cos(phaM)  #upper boundary of sampling along Z axis

        rangeZ = np.abs(inZ) - np.abs(outZ)  #sampling range along Z axis

        #        phaStep = phaM / NumSample                                  #set longitudinal sample resolution as maximal pha divided by number of samples
        #        thetaStep = thetaM / NumSample                              #set latitudinal sample resolution as maximal theta divided by number of samples

        ###following is uniform sampling
        #        for i in range(NumSample):                                  #sample along longitudinal (pha) domain
        #            for j in range(NumSample):                              #sample along latitudinal (theta) domain
        #                KdR = r                                             #sample hemisphere radiance will be all the same as r
        #                KdTheta = theta + thetaStep * j                     #sample theta at each step in the sample range
        #                KdPha = pha + phaStep * i                           #sample theta at each step in the sample range
        #                Kd[0,j,i] = KdR * np.cos(KdTheta) * np.sin(KdPha)   #convert coordinates from spherical to Cartesian
        #                Kd[1,j,i] = KdR * np.sin(KdTheta) * np.sin(KdPha)
        #                Kd[2,j,i] = KdR * np.cos(KdPha)
        #                Kd[:,j,i] = beamRotate.rotate(Kd[:,j,i])            #rotate k vectors by the quaternion generated
        #                scaleFactor[j,i] = np.sin(KdPha)                    #calculate the scalefactors by the current polar angle pha
        #

        ###here comes Monte Carlo Sampling
        for i in range(NumSample):
            KdR = r  #the r coordinate of the vector under spherical system
            KdTheta = random.random(
            ) * 2 * np.pi  #get a random value for theta coordinate under spherical system
            KdZ = random.random(
            ) * rangeZ + inZ  #get a random value for Z coordinate under cartesian system
            KdPha = math.acos(KdZ)  #convert it back to spherical system
            Kd[0, i] = KdR * np.cos(KdTheta) * np.sin(
                KdPha)  #convert coordinates from spherical to Cartesian
            Kd[1, i] = KdR * np.sin(KdTheta) * np.sin(
                KdPha
            )  #the reason why we sample Z at cartesian is that we want the vectors to distribute randomly on that direction
            Kd[2, i] = KdR * np.cos(
                KdPha
            )  #if we sample it on phi domain, they will be denser towards center
            Kd[:, i] = beamRotate.rotate(
                Kd[:, i])  #rotate k vectors by the quaternion generated


#        Kd = np.reshape(Kd, ((3, NumSample ** 2)))
#        scaleFactor = np.reshape(scaleFactor, ((NumSample ** 2)))   #reshape list of k vectors and scalefactors to an one dimentional list

        end3 = time.time()
        print("sample planewaves: " + str(end3 - start3) + " s\n")

        return Kd
예제 #11
0
def _fill_trainval_infos(nusc,
                         train_scenes,
                         val_scenes,
                         test=False,
                         max_sweeps=10):
    train_nusc_infos = []
    val_nusc_infos = []
    from pyquaternion import Quaternion
    for sample in prog_bar(nusc.sample):
        lidar_token = sample["data"]["LIDAR_TOP"]
        cam_front_token = sample["data"]["CAM_FRONT"]
        sd_rec = nusc.get('sample_data', sample['data']["LIDAR_TOP"])
        # =================== for lidar ===================
        # {'is_key_frame': True,
        #  'prev': '',
        #  'fileformat': 'bin',
        #  'token': 'ec9950f7b5d4ae85ae48d07786e09cebbf4ee771d054353f1e24a95700b4c4af',
        #  'timestamp': 1557858039302414.8,
        #  'next': 'b2fb6b275352ff1bc8d63cae2ec88561dddb044cae6f8e6ee7ada4ed07d79dc7',
        #  'ego_pose_token': '2d673d4bee560c77788b91e2ee24503538e74a23e7972e3e0099b92015f76dde',
        #  'sample_token': '24b0962e44420e6322de3f25d9e4e5cc3c7a348ec00bfa69db21517e4ca92cc8',
        #  'calibrated_sensor_token': '82130f5d48b806b62fec95989081337218fbf338ebcc95115d8afcebb305630c',
        #  'filename': 'lidar/host-a101_lidar1_1241893239302414726.bin',
        #  'sensor_modality': 'lidar',
        #  'channel': 'LIDAR_TOP'}
        # =================== for camera ===================
        # {'width': 1920,
        #  'height': 1080,
        #  'calibrated_sensor_token': '8e73e320d1fa9e5af96059e6eb1dd7d28e3271dea04de86ead47fa25fd13fd20',
        #  'token': 'fb40b3b5b9d289cd0e763bec34e327d3317a7b416f787feac0d387363b4d00f0',
        #  'sample_token': '24b0962e44420e6322de3f25d9e4e5cc3c7a348ec00bfa69db21517e4ca92cc8',
        #  'is_key_frame': True,
        #  'prev': '',
        #  'fileformat': 'jpeg',
        #  'ego_pose_token': '0c257254dad346c9d90f7970ce2c0b8142f7c6e6a90716f4c0538cd2d2ef77d5',
        #  'timestamp': 1557858039250000.0,
        #  'next': '00ba71deb97524b5d9be9b677962b74bb32f89284ad7838462250218786b903e',
        #  'filename': 'images/host-a101_cam0_1241893239250000006.jpeg',
        #  'sensor_modality': 'camera',
        #  'channel': 'CAM_FRONT'}

        cs_record = nusc.get('calibrated_sensor',
                             sd_rec['calibrated_sensor_token'])
        pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token'])
        lidar_path, boxes, _ = nusc.get_sample_data(lidar_token)
        # PosixPath('/media/zzhou/data/data-lyft-3D-objects/lidar/host-a101_lidar1_1241893239302414726.bin')
        # boxes list[lyft_dataset_sdk.utils.data_classes.Box]
        # boxes[3].corners() returns array with shape (3, 8)
        # (Convention: x points forward, y to the left, z up.)
        # my_boxes[3].name -> "truck"
        # my_boxes[3].orientation -> Quaternion(0.7094216708769627, 0.009708448436296943, -0.006909889242425616, -0.7046835405696343)
        # I assume boxes class is for rendering

        cam_path, _, cam_intrinsic = nusc.get_sample_data(cam_front_token)
        # PosixPath('/media/zzhou/data/data-lyft-3D-objects/images/host-a101_cam0_1241893239250000006.jpeg')
        # 3x3 np.ndarray
        assert Path(lidar_path).exists(), (
            "you must download all trainval data, key-frame only dataset performs far worse than sweeps."
        )
        info = {
            "lidar_path": lidar_path,
            "cam_front_path": cam_path,
            "token": sample["token"],
            "sweeps": [],
            "lidar2ego_translation": cs_record['translation'],
            "lidar2ego_rotation": cs_record['rotation'],
            "ego2global_translation": pose_record['translation'],
            "ego2global_rotation": pose_record['rotation'],
            "timestamp": sample["timestamp"],
        }

        l2e_r = info["lidar2ego_rotation"]
        l2e_t = info["lidar2ego_translation"]
        e2g_r = info["ego2global_rotation"]
        e2g_t = info["ego2global_translation"]
        l2e_r_mat = Quaternion(l2e_r).rotation_matrix
        e2g_r_mat = Quaternion(e2g_r).rotation_matrix

        sd_rec = nusc.get('sample_data', sample['data']["LIDAR_TOP"])
        sweeps = []
        while len(sweeps) < max_sweeps:
            if not sd_rec['prev'] == "":
                sd_rec = nusc.get('sample_data', sd_rec['prev'])
                cs_record = nusc.get('calibrated_sensor',
                                     sd_rec['calibrated_sensor_token'])
                pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token'])
                lidar_path = nusc.get_sample_data_path(sd_rec['token'])
                sweep = {
                    "lidar_path": lidar_path,
                    "sample_data_token": sd_rec['token'],
                    "lidar2ego_translation": cs_record['translation'],
                    "lidar2ego_rotation": cs_record['rotation'],
                    "ego2global_translation": pose_record['translation'],
                    "ego2global_rotation": pose_record['rotation'],
                    "timestamp": sd_rec["timestamp"]
                }
                l2e_r_s = sweep["lidar2ego_rotation"]
                l2e_t_s = sweep["lidar2ego_translation"]
                e2g_r_s = sweep["ego2global_rotation"]
                e2g_t_s = sweep["ego2global_translation"]
                # sweep->ego->global->ego'->lidar
                l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix
                e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix

                R = (l2e_r_s_mat.T @ e2g_r_s_mat.T) @ (
                    np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T)
                T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ (
                    np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T)
                T -= e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(
                    l2e_r_mat).T) + l2e_t @ np.linalg.inv(l2e_r_mat).T
                sweep["sweep2lidar_rotation"] = R.T  # points @ R.T + T
                sweep["sweep2lidar_translation"] = T
                sweeps.append(sweep)
            else:
                break
        info["sweeps"] = sweeps
        if not test:
            annotations = [
                nusc.get('sample_annotation', token)
                for token in sample['anns']
            ]
            locs = np.array([b.center for b in boxes]).reshape(
                -1, 3)  # w.r.t global coordinates, use lidar coord ?
            dims = np.array([b.wlh for b in boxes]).reshape(-1, 3)
            rots = np.array([b.orientation.yaw_pitch_roll[0]
                             for b in boxes]).reshape(-1, 1)
            velocity = np.array(
                [nusc.box_velocity(token)[:2] for token in sample['anns']])
            # convert velo from global to lidar
            for i in range(len(boxes)):
                velo = np.array([*velocity[i], 0.0])
                velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(
                    l2e_r_mat).T
                velocity[i] = velo[:2]

            names = [b.name for b in boxes]
            for i in range(len(names)):
                if names[i] in NuScenesDataset.NameMapping:
                    names[i] = NuScenesDataset.NameMapping[names[i]]
            names = np.array(names)
            # we need to convert rot to SECOND format.
            # change the rot format will break all checkpoint, so...
            gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1)
            assert len(gt_boxes) == len(
                annotations), f"{len(gt_boxes)}, {len(annotations)}"
            info["gt_boxes"] = gt_boxes
            info["gt_names"] = names
            info["gt_velocity"] = velocity.reshape(-1, 2)
            info["num_lidar_pts"] = np.array(
                [a["num_lidar_pts"] for a in annotations])
            info["num_radar_pts"] = np.array(
                [a["num_radar_pts"] for a in annotations])
        if sample["scene_token"] in train_scenes:
            train_nusc_infos.append(info)
        else:
            val_nusc_infos.append(info)
    return train_nusc_infos, val_nusc_infos
        The distance between the poses in y-direction

    Returns
    ------  
    List[Pose]
        A list of generated poses
    """

    # The point defines the rotation and is a corner of the grid
    rotation = pose.quaternion
    # Calculate the delta of each step in x- and y-direction
    # For that, we scale the unit vector pointing in x-direction/y-direction
    # and then rotate it by the rotation of the pose
    delta_x = rotation.rotate(np.array([xStepSize, 0.0, 0.0]))
    delta_y = rotation.rotate(np.array([0.0, yStepSize, 0.0]))
    poses = []  # type: List[Pose]
    for y_index in range(ySize):
        for x_index in range(ySize):
            translation = pose.translation + x_index * delta_x + y_index * delta_y
            poses.append(Pose(translation, rotation))
    return poses


if __name__ == '__main__':
    # Called when running this script standalone
    translation = [0.6089578, 0.3406782, 0.208865]
    rotation = Quaternion(w=0.231852, x=0.33222, y=0.746109, z=0.528387)
    pose = Pose(translation, rotation)
    poses = main(pose, 3, 3, 0.05, 0.05)
    print(poses)
예제 #13
0
def _fill_train_infos(lyft, train_scenes, test=False, max_sweeps=10):
    train_lyft_infos = []
    from pyquaternion import Quaternion
    print("sample number:", len(lyft.sample))
    for sample in prog_bar(lyft.sample):
        lidar_token = sample["data"]["LIDAR_TOP"]
        cam_front_token = sample["data"]["CAM_FRONT"]
        sd_rec = lyft.get('sample_data', sample['data']["LIDAR_TOP"])
        cs_record = lyft.get('calibrated_sensor',
                             sd_rec['calibrated_sensor_token'])
        pose_record = lyft.get('ego_pose', sd_rec['ego_pose_token'])
        lidar_path, boxes, _ = lyft.get_sample_data(lidar_token)
        cam_path, _, cam_intrinsic = lyft.get_sample_data(cam_front_token)
        assert Path(lidar_path).exists()

        info = {
            "lidar_path": lidar_path,
            "cam_front_path": cam_path,
            "token": sample["token"],
            "sweeps": [],
            "lidar2ego_translation": cs_record['translation'],
            "lidar2ego_rotation": cs_record['rotation'],
            "ego2global_translation": pose_record['translation'],
            "ego2global_rotation": pose_record['rotation'],
            "timestamp": sample["timestamp"],
        }

        # print("info:", info)

        l2e_r = info["lidar2ego_rotation"]
        l2e_t = info["lidar2ego_translation"]
        e2g_r = info["ego2global_rotation"]
        e2g_t = info["ego2global_translation"]
        l2e_r_mat = Quaternion(l2e_r).rotation_matrix
        e2g_r_mat = Quaternion(e2g_r).rotation_matrix

        sd_rec = lyft.get('sample_data', sample['data']["LIDAR_TOP"])
        sweeps = []
        while len(sweeps) < max_sweeps:
            if not sd_rec['prev'] == "":
                sd_rec = lyft.get('sample_data', sd_rec['prev'])
                cs_record = lyft.get('calibrated_sensor',
                                     sd_rec['calibrated_sensor_token'])
                pose_record = lyft.get('ego_pose', sd_rec['ego_pose_token'])
                lidar_path = lyft.get_sample_data_path(sd_rec['token'])
                sweep = {
                    "lidar_path": lidar_path,
                    "sample_data_token": sd_rec['token'],
                    "lidar2ego_translation": cs_record['translation'],
                    "lidar2ego_rotation": cs_record['rotation'],
                    "ego2global_translation": pose_record['translation'],
                    "ego2global_rotation": pose_record['rotation'],
                    "timestamp": sd_rec["timestamp"]
                }
                l2e_r_s = sweep["lidar2ego_rotation"]
                l2e_t_s = sweep["lidar2ego_translation"]
                e2g_r_s = sweep["ego2global_rotation"]
                e2g_t_s = sweep["ego2global_translation"]
                # sweep->ego->global->ego'->lidar
                l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix
                e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix

                R = (l2e_r_s_mat.T @ e2g_r_s_mat.T) @ (
                    np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T)
                T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ (
                    np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T)
                T -= e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(
                    l2e_r_mat).T) + l2e_t @ np.linalg.inv(l2e_r_mat).T
                sweep["sweep2lidar_rotation"] = R.T  # points @ R.T + T
                sweep["sweep2lidar_translation"] = T
                sweeps.append(sweep)
            else:
                break
        info["sweeps"] = sweeps
        # print("sweeps:", sweeps)
        if not test:
            annotations = [
                lyft.get('sample_annotation', token)
                for token in sample['anns']
            ]
            locs = np.array([b.center for b in boxes]).reshape(-1, 3)
            dims = np.array([b.wlh for b in boxes]).reshape(-1, 3)
            rots = np.array([b.orientation.yaw_pitch_roll[0]
                             for b in boxes]).reshape(-1, 1)
            velocity = np.array(
                [lyft.box_velocity(token)[:2] for token in sample['anns']])
            # convert velo from global to lidar
            for i in range(len(boxes)):
                velo = np.array([*velocity[i], 0.0])
                velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(
                    l2e_r_mat).T
                velocity[i] = velo[:2]

            names = [b.name for b in boxes]
            for i in range(len(names)):
                if names[i] in LyftDataset.NameMapping:
                    names[i] = LyftDataset.NameMapping[names[i]]
            names = np.array(names)
            # we need to convert rot to SECOND format.
            # change the rot format will break all checkpoint, so...
            gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1)
            assert len(gt_boxes) == len(
                annotations), f"{len(gt_boxes)}, {len(annotations)}"
            info["gt_boxes"] = gt_boxes
            info["gt_names"] = names
            info["gt_velocity"] = velocity.reshape(-1, 2)
            info["num_lidar_pts"] = np.array(
                [a["num_lidar_pts"] for a in annotations])
            info["num_radar_pts"] = np.array(
                [a["num_radar_pts"] for a in annotations])

        if sample["scene_token"] in train_scenes:
            # print("sample_scene_token:", sample["scene_token"])
            train_lyft_infos.append(info)
        # else:
        # val_nusc_infos.append(info)
    return train_lyft_infos
예제 #14
0
def parse_sample(sample_token, nusc):

    ret_dict = {}
    sample_data = nusc.get('sample', sample_token)
    timestamp = sample_data['timestamp']

    # First Step: Get lidar points in global frame cord!
    lidar_token = sample_data['data']['LIDAR_TOP']
    lidar_data = nusc.get('sample_data', lidar_token)
    pcl_path = osp.join(nusc.dataroot, lidar_data['filename'])
    pc = LidarPointCloud.from_file(pcl_path)

    # lidar point in point sensor frame

    cs_record = nusc.get('calibrated_sensor', lidar_data['calibrated_sensor_token'])
    pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
    pc.translate(np.array(cs_record['translation']))

    # Second step: transform from ego to the global frame.
    poserecord = nusc.get('ego_pose', lidar_data['ego_pose_token'])
    pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
    pc.translate(np.array(poserecord['translation']))

    # First Step finished! pc in global frame

    for cam_name in CamNames:
        cam_token = sample_data['data'][cam_name]
        _, boxes, _ = nusc.get_sample_data(cam_token, box_vis_level=BoxVisibility.ANY)

        all_points_next = []
        all_sf_next = []
        all_points_prev = []
        all_sf_prev = []
        for box in boxes:
            ann_token = box.token
            points_next, sf_next, points_prev, sf_prev, _ = get_sf_ann(ann_token, timestamp, deepcopy(pc.points[:3, ]), nusc)
            if points_next is not None:
                all_points_next.append(points_next)
                all_sf_next.append(sf_next)
            
            if points_prev is not None:
                all_points_prev.append(points_prev)
                all_sf_prev.append(sf_prev)

        if len(all_points_next) > 0:
            points_next = np.concatenate(all_points_next, axis=-1)
            sf_next = np.concatenate(all_sf_next, axis=-1)
        else:
            points_next = None
            sf_next = None
        
        if len(all_points_prev) > 0:
            points_prev = np.concatenate(all_points_prev, axis=-1)
            sf_prev = np.concatenate(all_sf_prev, axis=-1)
        else:
            points_prev = None
            sf_prev = None
        
        # transform points to ego pose; change sf's orentiation

        # change from global to ego pose
        ret_points_list = []
        for points, sf in zip([points_next, points_prev], [sf_next, sf_prev]):
            if points is None:
                ret_points_list.append(None)
                continue
            points = points - np.array(poserecord['translation'])[:, np.newaxis]
            points = np.dot(Quaternion(poserecord['rotation']).rotation_matrix.T, points)

            sf = np.dot(Quaternion(poserecord['rotation']).rotation_matrix.T, sf)

            # change from ego to camera
            cam_data = nusc.get('sample_data', cam_token)
            cam_cs = nusc.get('calibrated_sensor', cam_data['calibrated_sensor_token'])

            # transform points for ego pose to camera pose
            cam_points = deepcopy(points) - np.array(cam_cs['translation'])[:, np.newaxis]
            cam_points = np.dot(Quaternion(cam_cs['rotation']).rotation_matrix.T, cam_points)
            cam_sf = np.dot(Quaternion(cam_cs['rotation']).rotation_matrix.T, sf)

            ret_points = np.concatenate([cam_points, cam_sf], axis=0)
            ret_points_list.append(ret_points)
        ret_dict[cam_name] = ret_points_list

    return ret_dict
예제 #15
0
    def test_unequal_quaternions(self):
        q1 = Quaternion(1, 0, 0, 0)
        q2 = Quaternion(0, 1, 0, 0)

        self.assertNotEqual(hash(q1), hash(q2))
예제 #16
0
def transform_vector(vec, axis, angle):
    q = Quaternion(axis=axis, degrees=angle)
    vec_ = np.zeros((4))
    vec_[1:] = vec
    vec = q * vec_ * q.inverse
    return vec.elements[1:].tolist()
예제 #17
0
    def test_init_from_explicit_component(self):
        a, b, c, d = randomElements()

        # Using 'real' & 'imaginary' notation
        q1 = Quaternion(real=a, imaginary=(b, c, d))
        q2 = Quaternion(real=a, imaginary=[b, c, d])
        q3 = Quaternion(real=a, imaginary=np.array([b, c, d]))
        q4 = Quaternion(real=a)
        q5 = Quaternion(imaginary=np.array([b, c, d]))
        q6 = Quaternion(real=None, imaginary=np.array([b, c, d]))
        self.assertIsInstance(q1, Quaternion)
        self.assertIsInstance(q2, Quaternion)
        self.assertIsInstance(q3, Quaternion)
        self.assertIsInstance(q4, Quaternion)
        self.assertIsInstance(q5, Quaternion)
        self.assertIsInstance(q6, Quaternion)
        self.assertEqual(q1, Quaternion(a, b, c, d))
        self.assertEqual(q1, q2)
        self.assertEqual(q2, q3)
        self.assertEqual(q4, Quaternion(a, 0, 0, 0))
        self.assertEqual(q5, Quaternion(0, b, c, d))
        self.assertEqual(q5, q6)
        with self.assertRaises(ValueError):
            q = Quaternion(real=a, imaginary=[b, c])
        with self.assertRaises(ValueError):
            q = Quaternion(real=a, imaginary=(b, c, d, d))

        # Using 'scalar' & 'vector' notation
        q1 = Quaternion(scalar=a, vector=(b, c, d))
        q2 = Quaternion(scalar=a, vector=[b, c, d])
        q3 = Quaternion(scalar=a, vector=np.array([b, c, d]))
        q4 = Quaternion(scalar=a)
        q5 = Quaternion(vector=np.array([b, c, d]))
        q6 = Quaternion(scalar=None, vector=np.array([b, c, d]))
        self.assertIsInstance(q1, Quaternion)
        self.assertIsInstance(q2, Quaternion)
        self.assertIsInstance(q3, Quaternion)
        self.assertIsInstance(q4, Quaternion)
        self.assertIsInstance(q5, Quaternion)
        self.assertIsInstance(q6, Quaternion)
        self.assertEqual(q1, Quaternion(a, b, c, d))
        self.assertEqual(q1, q2)
        self.assertEqual(q2, q3)
        self.assertEqual(q4, Quaternion(a, 0, 0, 0))
        self.assertEqual(q5, Quaternion(0, b, c, d))
        self.assertEqual(q5, q6)
        with self.assertRaises(ValueError):
            q = Quaternion(scalar=a, vector=[b, c])
        with self.assertRaises(ValueError):
            q = Quaternion(scalar=a, vector=(b, c, d, d))
예제 #18
0
def calculateOrientation(x, im_width, fov_w):
    angle = float(x - im_width / 2.0) / im_width * fov_w
    q = Quaternion(axis=[0, 0, 1], radians=angle)
    return q, angle
예제 #19
0
 def test_str(self):
     a, b, c, d = randomElements()
     q = Quaternion(a, b, c, d)
     string = "{:.3f} {:+.3f}i {:+.3f}j {:+.3f}k".format(a, b, c, d)
     self.assertEqual(string, str(q))
예제 #20
0
    br = tf.TransformBroadcaster()
    while not rospy.is_shutdown():
        try:
            (trans_vel_baselink,
             rot_vel_baselink) = t.lookupTransform('/velodyne_sensor',
                                                   '/base_link', rospy.Time(0))
            (trans_world_vel,
             rot_world_vel) = t.lookupTransform('/map', '/velodyne',
                                                rospy.Time(0))

            H_vel_baselink = create_H(trans_vel_baselink, rot_vel_baselink)
            H_world_vel = create_H(trans_world_vel, rot_world_vel)

            H_world_baselink = np.matmul(H_world_vel, H_vel_baselink)

            q = Quaternion(matrix=H_world_baselink).elements
            trans_world_baselink = (H_world_baselink[0, 3],
                                    H_world_baselink[1,
                                                     3], H_world_baselink[2,
                                                                          3])
            rot_world_baselink = (q[1], q[2], q[3], q[0])

            br.sendTransform(trans_world_baselink, rot_world_baselink,
                             rospy.Time.now(), "/base_link", "/map")
            # br.sendTransform((0,0,0), (1,0,0,0), rospy.Time(0), "/world", "/map")

        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException):
            rospy.logerr("TF lookup error")
            continue
예제 #21
0
 def test_bool(self):
     self.assertTrue(Quaternion())
     self.assertFalse(Quaternion(scalar=0.0))
     self.assertTrue(~Quaternion(scalar=0.0))
     self.assertFalse(~Quaternion())
예제 #22
0
    def _add_gt_annotations_Car3d(self, idx, image_shape, im_scale):
        """Add ground truth annotation metadata to an roidb entry."""

        morph_ellipse_kernel = cv2.getStructuringElement(
            cv2.MORPH_ELLIPSE, (5, 5))

        # initiate the lists
        masks = []
        segms = []
        boxes = []
        car_cat_classes = []
        poses = []
        quaternions = []

        car_pose_file = os.path.join(self.dataset_dir, 'car_poses',
                                     self.img_list_all[idx] + '.json')
        assert os.path.exists(car_pose_file), 'Label \'{}\' not found'.format(
            car_pose_file)
        f = open(car_pose_file, 'r')
        data = f.read()
        f.close()

        car_poses = json.loads(data)
        for i, car_pose in enumerate(car_poses):
            car_name = self.car_id2name[car_pose['car_id']].name
            car = self.car_models[car_name]
            pose = np.array(car_pose['pose'])

            # Fix for kaggle pku-autonomous-driving
            yaw, pitch, roll = pose[:3]
            yaw, pitch, roll = -pitch, -yaw, -roll
            pose[:3] = yaw, pitch, roll

            # project 3D points to 2d image plane
            rot_mat = euler_angles_to_rotation_matrix(pose[:3])
            rvect, _ = cv2.Rodrigues(rot_mat)
            imgpts, jac = cv2.projectPoints(np.float32(car['vertices']),
                                            rvect,
                                            pose[3:],
                                            self.intrinsic_mat,
                                            distCoeffs=None)
            imgpts = np.int32(imgpts).reshape(-1, 2)

            # project 3D points to 2d image plane
            mask = np.zeros((image_shape[1], image_shape[0]))
            for face in car['faces'] - 1:
                pts = np.array([[imgpts[idx, 0], imgpts[idx, 1]]
                                for idx in face], np.int32)
                pts = pts.reshape((-1, 1, 2))
                #cv2.polylines(mask, [pts], True, (255, 255, 255))
                cv2.drawContours(mask, [pts], 0, (255, 255, 255), -1)

            # Find mask
            ground_truth_binary_mask = np.zeros(mask.shape, dtype=np.uint8)
            ground_truth_binary_mask[mask == 255] = 1
            if self.cfg['INPUT']['BOTTOM_HALF']:
                ground_truth_binary_mask = ground_truth_binary_mask[
                    int(mask.shape[0] / 2):, :]

            ground_truth_binary_mask = cv2.morphologyEx(
                ground_truth_binary_mask, cv2.MORPH_CLOSE,
                morph_ellipse_kernel)

            fortran_ground_truth_binary_mask = np.asfortranarray(
                ground_truth_binary_mask)
            encoded_ground_truth = maskUtils.encode(
                fortran_ground_truth_binary_mask)

            contours = measure.find_contours(
                np.array(ground_truth_binary_mask), 0.5)
            mask_instance = []
            # if len(contours) > 1:
            #     print("Image with problem: %d, %s" % (idx, self.img_list_all[idx]))
            for contour in contours:
                contour = np.flip(contour, axis=1)
                segmentation = contour.ravel().tolist()
                mask_instance.append(segmentation)

            masks.append(mask_instance)
            segms.append(encoded_ground_truth)
            #cv2.imwrite(os.path.join('/media/SSD_1TB/ApolloScape', self.img_list_all[idx] + '_' + str(i) + '_.jpg'), mask)
            x1, y1, x2, y2 = imgpts[:, 0].min(), imgpts[:, 1].min(
            ), imgpts[:, 0].max(), imgpts[:, 1].max()
            if self.cfg['INPUT']['BOTTOM_HALF']:
                y1 -= int(image_shape[1] / 2)
                y2 -= int(image_shape[1] / 2)
            boxes.append([x1, y1, x2, y2])

            #q = euler_angles_to_quaternions(np.array(car_pose['pose'][:3]))[0]
            q = Quaternion(matrix=rot_mat).q
            if self.cfg['MODEL']['ROI_CAR_CLS_ROT_HEAD'][
                    'QUATERNION_HEMISPHERE']:
                q = quaternion_upper_hemispher(q)
            quaternions.append(q)

            car_cat_classes.append(
                np.where(self.unique_car_models == car_pose['car_id'])[0][0])
            poses.append(car_pose['pose'])

        if self.cfg['INPUT']['BOTTOM_HALF']:
            target = BoxList(boxes, (image_shape[0], int(image_shape[1] / 2)),
                             mode="xyxy")
            masks = SegmentationMask(masks,
                                     (image_shape[0], int(image_shape[1] / 2)))
        else:
            target = BoxList(boxes, image_shape, mode="xyxy")
            masks = SegmentationMask(masks, image_shape)

        car_cat_classes = torch.tensor(car_cat_classes)
        target.add_field('car_cat_classes', car_cat_classes)
        target.add_field("masks", masks)

        quaternions = torch.tensor(quaternions)
        target.add_field("quaternions", quaternions)
        poses = torch.tensor(poses)
        target.add_field("poses", poses)

        labels = np.ones(car_cat_classes.shape)
        labels = torch.tensor(labels)
        target.add_field("labels", labels)

        im_scales = im_scale * np.ones(car_cat_classes.shape)
        im_scales = torch.tensor(im_scales)
        target.add_field("im_scales", im_scales)

        target = target.clip_to_image(remove_empty=True)
        # for evaluation purpose
        if not self.training:
            target.add_field("segms", segms)
        return target
예제 #23
0
 def test_complex(self):
     a, b, c, d = randomElements()
     q = Quaternion(a, b, c, d)
     self.assertEqual(complex(q), complex(a, b))
예제 #24
0
from Lib import *
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from pyquaternion import Quaternion

v = [0, 0, 1]
axis = [0, 1, 0]
theta = -np.pi / 2  # radian
vX, vY, vZ = Quaternion(axis=axis, angle=theta).rotate(v)

print("rotate Vector : {}".format(vX))

fig = plt.figure()
ax = fig.add_subplot(2, 1, 1, projection='3d')

ax.quiver(0, 0, 0, v[0], v[1], v[2], length=1, normalize=True)
ax.quiver(0, 0, 0, vX, vY, vZ, length=1, normalize=True)
ax.set_xlabel("x axis")
ax.set_ylabel("y axis")
ax.set_zlabel("z axis")

ax.set_xlim(-3, 3)
ax.set_ylim(-3, 3)
ax.set_zlim(-3, 3)

plt.show()
# print(np.dot(rotation_matrix(axis,theta), v))
예제 #25
0
 def test_unary_minus(self):
     a, b, c, d = randomElements()
     q = Quaternion(a, b, c, d)
     self.assertEqual(-q, Quaternion(-a, -b, -c, -d))
예제 #26
0
    def get_relative_position(self):

        if self.tracking_mode == "OSC_direct":
            try:
                angles = self.osc_input_server.get_current_angle()
            except:
                return self.fallback_angle[0], self.fallback_angle[
                    1], self.fallback_angle[2]

            return angles[0], angles[1], angles[2]

        try:
            if self.acoustical_center_pos is not None:
                pose_head, _ = self.get_tracker_data(only_tracker_1=True)
                translation_speaker = self.acoustical_center_pos
            else:
                # if speaker is not calibrated yet, the live tracking speaker pose is used
                pose_head, pose_speaker = self.get_tracker_data()
                translation_speaker = np.array([
                    pose_speaker.m[0][3], pose_speaker.m[1][3],
                    pose_speaker.m[2][3]
                ])

        except:
            return self.fallback_angle[0], self.fallback_angle[
                1], self.fallback_angle[2]

        if pose_head != False:

            # MYSTERIOUS PROBLEM: The Y and Z Axis are flipped in the TrackerPose from openVR most of the times.
            mystery_flag = True  #for testing debugging

            # STEP1: get the correct translation between head and speaker

            translation_head = np.array(
                [pose_head.m[0][3], pose_head.m[1][3], pose_head.m[2][3]])

            if self.head_dimensions['ear_center'] is not None:
                offset_x = self.head_dimensions['ear_center'][0] * np.array(
                    [pose_head.m[0][0], pose_head.m[1][0], pose_head.m[2][0]])
                offset_y = self.head_dimensions['ear_center'][1] * np.array(
                    [pose_head.m[0][1], pose_head.m[1][1], pose_head.m[2][1]])
                offset_z = self.head_dimensions['ear_center'][2] * np.array(
                    [pose_head.m[0][2], pose_head.m[1][2], pose_head.m[2][2]])

                translation_head = translation_head + offset_x + offset_y + offset_z

            # get vector pointing from head center to speaker center
            transvec = translation_speaker - translation_head

            # STEP2: get the correct orientation of the head

            # get the base tracker rotation in quaternions
            fwd = np.array(
                [pose_head.m[0][2], pose_head.m[1][2], pose_head.m[2][2]])
            up = np.array(
                [pose_head.m[0][1], pose_head.m[1][1], pose_head.m[2][1]])
            side = np.array(
                [pose_head.m[0][0], pose_head.m[1][0], pose_head.m[2][0]])

            r_w = np.sqrt(abs(1 + side[0] + up[1] + fwd[2])) / 2
            r_x = (up[2] - fwd[1]) / (4 * r_w)
            r_y = (fwd[0] - side[2]) / (4 * r_w)
            r_z = (side[1] - up[0]) / (4 * r_w)

            head_rotation = Quaternion(r_w, r_x, r_y, r_z)

            # apply calibrated rotation
            rotation = head_rotation * self.calibrationRotation

            # make "new' direction vectors by rotating a normed set of orthogonal direction vectors

            if mystery_flag:
                side = np.array([1.0, 0.0, 0.0])
                up = np.array([
                    0.0, 0.0, -1.0
                ])  # i don´t know why, but y and z axis are flipped somehow
                fwd = np.array([0.0, 1.0, 0.0])  #
            else:
                side = np.array([1.0, 0.0, 0.0])
                up = np.array([0.0, 1.0, 0.0])
                fwd = np.array([0.0, 0.0, 1.0])

            side = rotation.rotate(side)
            up = rotation.rotate(up)
            fwd = rotation.rotate(fwd)

            # STEP3: calculate direction vector to speaker relative to head coordinate system
            # (head coordinate system = side, up, fwd)

            direction_vector = np.array([
                np.inner(side, transvec),
                np.inner(up, transvec),
                np.inner(fwd, transvec)
            ])
            radius = np.linalg.norm(direction_vector)
            direction_vector = direction_vector / radius

            # get spherical coordinates from direction vector
            az = np.rad2deg(
                np.arctan2(-direction_vector[0], -direction_vector[2]))
            if az < 0:
                az += 360

            el = np.rad2deg(np.arccos(-direction_vector[1]))
            el = el - 90

            #print(az, el, radius)
            return az, el, radius
예제 #27
0
 def test_is_unit(self):
     q1 = Quaternion()
     q2 = Quaternion(1.0, 0, 0, 0.0001)
     self.assertTrue(q1.is_unit())
     self.assertFalse(q2.is_unit())
     self.assertTrue(q2.is_unit(0.001))
예제 #28
0
    def __init__(self):

        self.fallback_angle = np.array([0.0, 0.0, 1.0])
        self.trackers_switched = False
        self.counter = 0

        self.head_dimensions = {
            'ear_pos_l': None,
            'ear_pos_r': None,
            'ear_center': None,
            'left_pos': None,
            'right_pos': None,
            'head_width': None,
            'front_pos': None,
            'back_pos': None,
            'head_length': None
        }

        self.tracking_mode = "Vive"
        self.osc_input_server = osc_input.OSCInputServer()
        self.osc_input_server.start_listening()

        self.acoustical_center_pos = None

        self.vr_system_initialized = False

        # initialize open VR
        try:
            self.vr_system = init(VRApplication_Background)
        except:
            return

        self.vr_system_initialized = True
        # get available trackers and controllers

        for deviceID in range(k_unMaxTrackedDeviceCount):
            dev_class = self.vr_system.getTrackedDeviceClass(deviceID)
            if (dev_class == TrackedDeviceClass_GenericTracker):
                if (not hasattr(self, 'tracker1')):
                    if self.vr_system.isTrackedDeviceConnected(deviceID):
                        self.tracker1 = Device(
                            TrackedDeviceClass_GenericTracker, deviceID)
                        self.tracker1.set_availability(True)
                elif (not hasattr(self, 'tracker2')):
                    if self.vr_system.isTrackedDeviceConnected(deviceID):
                        self.tracker2 = Device(
                            TrackedDeviceClass_GenericTracker, deviceID)
                        self.tracker2.set_availability(True)
            elif (dev_class == TrackedDeviceClass_Controller):
                if (not hasattr(self, 'controller1')):
                    self.controller1 = Device(TrackedDeviceClass_Controller,
                                              deviceID)
                    self.controller1.set_availability(
                        self.vr_system.isTrackedDeviceConnected(deviceID))
                elif (not hasattr(self, 'controller2')):
                    self.controller2 = Device(TrackedDeviceClass_Controller,
                                              deviceID)
                    self.controller2.set_availability(
                        self.vr_system.isTrackedDeviceConnected(deviceID))

        # if(hasattr(self, 'tracker1')):
        #     #print("Has tracker one!")
        #     if(self.tracker1.is_available()):
        #         pass
        #         #print("Tracker 1 Available")
        #
        # if (hasattr(self, 'tracker2')):
        #     #print("Has tracker two!")
        #     if (self.tracker2.is_available()):
        #         pass
        #         #print("Tracker 2 Available")
        #
        # if (hasattr(self, 'controller1')):
        #     #print("Has controller 1!")
        #     pass
        #
        # if (hasattr(self, 'controller2')):
        #     #print("Has controller 2!")
        #     pass

        self.calibrationRotation = Quaternion()
        self.calibrate_orientation()
예제 #29
0
 def test_exp(self):
     from math import exp
     q = Quaternion(axis=[1,0,0], angle=pi)
     exp_q = Quaternion.exp(q)
     self.assertEqual(exp_q, exp(0) * Quaternion(scalar=cos(1.0), vector=[sin(1.0), 0,0]))
예제 #30
0
    def plot_road_agents(self,
                         ax,
                         fig,
                         sample,
                         plot_list,
                         text_box,
                         sensor_info,
                         my_patch,
                         plot_traj=False,
                         contour_func=None,
                         animated_agent=False):

        road_agents_in_patch = {'pedestrians': [], 'vehicles': []}
        for ann_token in sample['anns']:
            ann = self.nusc.get('sample_annotation', ann_token)
            category = ann['category_name']
            if len(ann['attribute_tokens']) != 0:
                attribute = self.nusc.get('attribute',
                                          ann['attribute_tokens'][0])['name']
            else:
                attribute = ""

            pos = [ann['translation'][0], ann['translation'][1]]
            instance_token = ann['instance_token']
            sample_token = sample['token']
            #### Plot other agents ####
            valid_agent = False
            if 'other_cars' in plot_list and 'vehicle' in category and 'parked' not in attribute and self.in_my_patch(
                    pos, my_patch):
                valid_agent = True
                agent_yaw = Quaternion(ann['rotation'])
                agent_yaw = quaternion_yaw(agent_yaw)
                agent_yaw = angle_of_rotation(agent_yaw)
                agent_yaw = np.rad2deg(agent_yaw)
                self.plot_elements(pos,
                                   agent_yaw,
                                   'other_cars',
                                   ax,
                                   animated_agent=animated_agent)

                car_info = {
                    'instance_token': ann['instance_token'],
                    'category': category,
                    'attribute': attribute,
                    'translation': pos,
                    'rotation_quat': ann['rotation'],
                    'rotation_deg': agent_yaw
                }
                road_agents_in_patch['vehicles'].append(car_info)

                if text_box:
                    self.plot_text_box(
                        ax, category,
                        [ann['translation'][0] + 1.2, ann['translation'][1]])
                    self.plot_text_box(ax, attribute, [
                        ann['translation'][0] + 1.2,
                        ann['translation'][1] - 1.2
                    ])
                    self.plot_text_box(ax, ann['instance_token'], [
                        ann['translation'][0] + 1.2,
                        ann['translation'][1] + 1.2
                    ])

            #### Plot pedestrians ####
            if 'pedestrian' in plot_list and 'pedestrian' in category and not 'stroller' in category and not 'wheelchair' in category and self.in_my_patch(
                    pos, my_patch):
                valid_agent = True
                agent_yaw = Quaternion(ann['rotation'])
                agent_yaw = quaternion_yaw(agent_yaw)
                agent_yaw = angle_of_rotation(agent_yaw)
                agent_yaw = np.rad2deg(agent_yaw)

                self.plot_elements(pos, agent_yaw, 'pedestrian', ax)
                if text_box:
                    self.plot_text_box(
                        ax, category,
                        [ann['translation'][0] + 1.2, ann['translation'][1]])
                    self.plot_text_box(ax, attribute, [
                        ann['translation'][0] + 1.2,
                        ann['translation'][1] - 1.2
                    ])

            if valid_agent and plot_traj:
                agent_future = self.helper.get_future_for_agent(
                    instance_token,
                    sample_token,
                    self.na_config['pred_horizon'],
                    in_agent_frame=False,
                    just_xy=True)

                agent_past = self.helper.get_past_for_agent(
                    instance_token,
                    sample_token,
                    self.na_config['obs_horizon'],
                    in_agent_frame=False,
                    just_xy=True)

                if len(agent_future) > 0:
                    ax.scatter(agent_future[:, 0],
                               agent_future[:, 1],
                               s=10,
                               c='y',
                               alpha=1.0,
                               zorder=200)
                if len(agent_past) > 0:
                    ax.scatter(agent_past[:, 0],
                               agent_past[:, 1],
                               s=10,
                               c='k',
                               alpha=0.2,
                               zorder=200)

        return road_agents_in_patch