示例#1
0
def generate_new_cameras_hemisphere(radius,
                                    lookat_point,
                                    pitch=[20, 60, 20],
                                    yaw=[0, 350, 36],
                                    yaw_list=None,
                                    rot_format="quat"):
    """
    pitch: elevation [min_pitch, max_pitch, d_pitch]
    yaw: azimuth [min_yaw, max_yaw, d_yaw]
    """
    min_pitch, max_pitch, d_pitch = pitch
    min_yaw, max_yaw, d_yaw = yaw

    xyz_points = []
    quats = []
    if yaw_list == None:
        yaw_list = range(min_yaw, max_yaw + 1, d_yaw)
    for pitch in range(min_pitch, max_pitch + 1, d_pitch):
        for yaw in yaw_list:
            mat_yaw = transformations.euler_matrix(0, 0, math.radians(yaw),
                                                   'rxyz')
            mat_pitch = transformations.euler_matrix(0, math.radians(-pitch),
                                                     0, 'rxyz')

            # camera at the origin is x+ = inverse lookat vector(z), y is x of camera
            # camera x: right, camera y: up, camera z: inverse lookat direction
            x_vector = np.zeros((4), dtype=np.float32)
            x_vector[0] = 1

            y_vector = np.zeros((4), dtype=np.float32)
            y_vector[1] = 1

            z_vector = np.zeros((4), dtype=np.float32)
            z_vector[2] = 1

            z_vec_out = mat_yaw.dot(mat_pitch.dot(x_vector))
            x_vec_out = mat_yaw.dot(mat_pitch.dot(y_vector))
            y_vec_out = mat_yaw.dot(mat_pitch.dot(z_vector))

            cam_loc = z_vec_out * radius
            rot_mat = np.eye(4, dtype=np.float32)
            rot_mat[:3, 0] = x_vec_out[:3]
            rot_mat[:3, 1] = y_vec_out[:3]
            rot_mat[:3, 2] = z_vec_out[:3]

            if rot_format == "quat":
                quat = transformations.quaternion_from_matrix(rot_mat)

            else:
                quat = np.reshape(rot_mat[:3, :3], [-1])
            cam_loc = lookat_point + cam_loc[:3]
            xyz_points.append(cam_loc)
            quats.append(quat)

    quats = np.stack(quats, axis=0)
    xyz_points = np.stack(xyz_points, axis=0)

    return xyz_points, quats
    def handle(self):
        global rx, ry, rz, crx, cry, crz, R, cR, paramInit

        data = self.request[0].strip()
        print data
        (rx,ry,rz,crx,cry,crz) = map(lambda x: float(x), data.split(','))

        R = euler_matrix(rx,ry,rz)[0:3,0:3].transpose()
        cR = euler_matrix(crx, cry, crz)[0:3,0:3]
        paramInit = True
    def handle(self):
        global rx, ry, rz, crx, cry, crz, R, cR, paramInit

        data = self.request[0].strip()
        print data
        (rx, ry, rz, crx, cry, crz) = map(lambda x: float(x), data.split(','))

        R = euler_matrix(rx, ry, rz)[0:3, 0:3].transpose()
        cR = euler_matrix(crx, cry, crz)[0:3, 0:3]
        paramInit = True
def GetQ50CameraParams():
    cam = {}
    for i in [601, 604]:
        cam[i] = {}
        cam[i]['R_to_c_from_i'] = np.array([[-1, 0, 0], [0, 0, -1], [0, -1,
                                                                     0]])

        if i == 601:  # right camera
            R_to_c_from_l_in_camera_frame = euler_matrix(
                0.022500, -0.006000, -0.009500)[0:3, 0:3]
            # R_to_c_from_l_in_camera_frame = euler_matrix(0.037000,0.025500,-0.001000)[0:3,0:3]
            cam[i][
                'R_to_c_from_l_in_camera_frame'] = R_to_c_from_l_in_camera_frame
            cam[i]['displacement_from_l_to_c_in_lidar_frame'] = \
                np.array([-0.522, 0.2925, 0.273])
            cam[i]['E'] = np.eye(4)
            cam[i]['width'] = 1280
            cam[i]['height'] = 800
            cam[i]['fx'] = 1.95205250e+03
            cam[i]['fy'] = 1.95063141e+03
            cam[i]['cu'] = 6.54927553e+02
            cam[i]['cv'] = 4.01883041e+02
            cam[i]['distort'] = np.array([
                -3.90035052e-01, 6.85186191e-01, 3.22989088e-03, 1.02017567e-03
            ])
            # cam[i]['fx'] = 2.03350359e+03
            # cam[i]['fy'] = 2.03412303e+03
            # cam[i]['cu'] = 5.90322443e+02
            # cam[i]['cv'] = 4.30538351e+02
            # cam[i]['distort'] = np.array([-4.36806404e-01, 2.16674616e+00, -1.02905742e-02, 1.81030435e-03, -1.05642273e+01])

        elif i == 604:  # left camera
            R_to_c_from_l_in_camera_frame = euler_matrix(
                0.025000, -0.011500, 0.000000)[0:3, 0:3]
            cam[i][
                'R_to_c_from_l_in_camera_frame'] = R_to_c_from_l_in_camera_frame
            cam[i]['displacement_from_l_to_c_in_lidar_frame'] = \
                np.array([-0.522, -0.2925, 0.273])
            cam[i]['E'] = np.eye(4)
            cam[i]['width'] = 1280
            cam[i]['height'] = 800
            cam[i]['fx'] = 1.95205250e+03
            cam[i]['fy'] = 1.95063141e+03
            cam[i]['cu'] = 6.54927553e+02
            cam[i]['cv'] = 4.01883041e+02
            cam[i]['distort'] = np.array([
                -3.90035052e-01, 6.85186191e-01, 3.22989088e-03, 1.02017567e-03
            ])

        cam[i]['KK'] = np.array([[cam[i]['fx'], 0.0, cam[i]['cu']],
                                 [0.0, cam[i]['fy'], cam[i]['cv']],
                                 [0.0, 0.0, 1.0]])
        cam[i]['f'] = (cam[i]['fx'] + cam[i]['fy']) / 2

    return cam
示例#5
0
 def test_euler_from_matrix_random_angles_all_axes(self):
     angles = (4*math.pi) * (numpy.random.random(3) - 0.5)
     successes = []
     failures = []
     for axes in transformations._AXES2TUPLE.keys():
         R0 = transformations.euler_matrix(axes=axes, *angles)
         R1 = transformations.euler_matrix(axes=axes, *transformations.euler_from_matrix(R0, axes))
         if numpy.allclose(R0, R1):
             successes.append(axes)
         else:
             failures.append(axes)
     self.assertEquals(0, len(failures), "Failed for:\n%sand succeeded for:\n%s" % (failures, successes))
示例#6
0
def dead_simple_offset(camera_x, camera_y, yaw, alt):
    x_m, y_m = calculate_pixel_offset(camera_x, camera_y, alt)

    # Sketchy frame switching
    point_mat = numpy.array([[x_m, y_m, 0, 0]])

    rotation_mat = euler_matrix(0, 0, yaw, 'sxyz')
    static_tf_camera_to_world = euler_matrix(0, 0, math.radians(90), 'sxyz')

    return numpy.dot(
        numpy.dot(numpy.dot(point_mat, camera_to_world_tf()), rotation_mat),
        static_tf_camera_to_world)
示例#7
0
def process_april_tag(pose):
    ## Aquí jugar
    ## pose es tag con respecto al robot
    ## T_a es la transformación del april tag con respecto al mapa
    T_a = tf.translation_matrix([-X * tile_size, -tag_size * 3/4, y*tile_size])
    R_a = tf.euler_matrix(0, angle, 0)
    T_m_a = tf.concatenate_matrices(T_a, R_a)
    ## Aquí dando vuelta el robot, por cuenta del cambio de los angulos
    T_r_a = np.dot(pose, tf.euler_matrix(0, np.pi, 0))
    ##print(T_r_a)
    T_a_r = np.linalg.inv(T_r_a)
    T_m_r = np.dot(T_m_a, T_a_r)
    print(pose @ T_m_r)
示例#8
0
def create_euler_matrix(angles, order):
    m = np.eye(4, dtype=np.float32)
    for idx, d in enumerate(order):
        a = np.radians(angles[idx])
        d = d[0].lower()
        local_rot = np.eye(4)
        if d =="x":
            local_rot = euler_matrix(a,0,0, AXES)
        elif d =="y":
            local_rot = euler_matrix(0,a,0, AXES)
        elif d =="z":
            local_rot = euler_matrix(0,0,a, AXES)
        m = np.dot(local_rot, m)
    return m
示例#9
0
文件: bvh.py 项目: eherr/anim_utils
 def convert_motion_rotation_order(self, rotation_order_str):
     new_frames = np.zeros(self.frames.shape)
     for i in range(len(new_frames)):
         for node_name, node in self.node_names.items():
             if 'End' not in node_name:
                 if len(node['channels']) == 6:
                     rot_mat = euler_matrix(*np.deg2rad(self.frames[i, node['channel_indices'][3:]]),
                                            axes=node['rotation_order'])
                     new_frames[i, node['channel_indices'][:3]] = self.frames[i, node['channel_indices'][:3]]
                     new_frames[i, node['channel_indices'][3:]] = np.rad2deg(euler_from_matrix(rot_mat, rotation_order_str))
                 else:
                     rot_mat = euler_matrix(*np.deg2rad(self.frames[i, node['channel_indices']]),
                                            axes=node['rotation_order'])
                     new_frames[i, node['channel_indices']] = np.rad2deg(euler_from_matrix(rot_mat, rotation_order_str))
     self.frames = new_frames
示例#10
0
def GetQ50CameraParams():
    cam = {}
    for i in [601, 604]:
        cam[i] = {}
        cam[i]['R_to_c_from_i'] = np.array([[-1, 0, 0],
                                            [0, 0, -1],
                                            [0, -1, 0]])

        if i == 601: # right camera
            R_to_c_from_l_in_camera_frame = euler_matrix(0.022500,-0.006000,-0.009500)[0:3,0:3]
            # R_to_c_from_l_in_camera_frame = euler_matrix(0.037000,0.025500,-0.001000)[0:3,0:3]
            cam[i]['R_to_c_from_l_in_camera_frame'] = R_to_c_from_l_in_camera_frame
            cam[i]['displacement_from_l_to_c_in_lidar_frame'] = \
                np.array([-0.522, 0.2925, 0.273])
            cam[i]['E'] = np.eye(4)
            cam[i]['width'] = 1280
            cam[i]['height'] = 800
            cam[i]['fx'] = 1.95205250e+03
            cam[i]['fy'] = 1.95063141e+03
            cam[i]['cu'] = 6.54927553e+02
            cam[i]['cv'] = 4.01883041e+02
            cam[i]['distort'] = np.array([-3.90035052e-01, 6.85186191e-01, 3.22989088e-03, 1.02017567e-03])
            # cam[i]['fx'] = 2.03350359e+03
            # cam[i]['fy'] = 2.03412303e+03
            # cam[i]['cu'] = 5.90322443e+02
            # cam[i]['cv'] = 4.30538351e+02
            # cam[i]['distort'] = np.array([-4.36806404e-01, 2.16674616e+00, -1.02905742e-02, 1.81030435e-03, -1.05642273e+01])

        elif i == 604: # left camera
            R_to_c_from_l_in_camera_frame = euler_matrix(0.025000,-0.011500,0.000000)[0:3,0:3]
            cam[i]['R_to_c_from_l_in_camera_frame'] = R_to_c_from_l_in_camera_frame
            cam[i]['displacement_from_l_to_c_in_lidar_frame'] = \
                np.array([-0.522, -0.2925, 0.273])
            cam[i]['E'] = np.eye(4)
            cam[i]['width'] = 1280
            cam[i]['height'] = 800
            cam[i]['fx'] = 1.95205250e+03
            cam[i]['fy'] = 1.95063141e+03
            cam[i]['cu'] = 6.54927553e+02
            cam[i]['cv'] = 4.01883041e+02
            cam[i]['distort'] = np.array([-3.90035052e-01, 6.85186191e-01, 3.22989088e-03, 1.02017567e-03])

        cam[i]['KK'] = np.array([[cam[i]['fx'], 0.0, cam[i]['cu']],
                                 [0.0, cam[i]['fy'], cam[i]['cv']],
                                 [0.0, 0.0, 1.0]])
        cam[i]['f'] = (cam[i]['fx'] + cam[i]['fy']) / 2

    return cam
示例#11
0
    def update_local_transformation(self):
        """
        update local transformation w.r.t element's parent

        .. seealso:: VRML transform calculation http://www.web3d.org/x3d/specifications/vrml/ISO-IEC-14772-VRML97/part1/nodesRef.html#Transform
        """
        if self.jointType in ["free", "freeflyer"]:
            self.localTransformation = tf.euler_matrix(self.rpy[0], self.rpy[1], self.rpy[2])

            scale = [1,1,1]
            if self.parent:
                scale = self.cumul_scale()
            scale_translation = [self.translation[i]*scale[i] for i in range(3)]
            self.localTransformation[0:3,3]=numpy.array(scale_translation)+\
                numpy.dot(numpy.eye(3)-self.localTransformation[0:3,:3],numpy.array(self.center))

        elif ( type(self) == Joint and self.jointType in [ "rotate", "rotation", "revolute"]
               and self.jointId >= 0):
            if self.jointAxis in ["x","X"]:
                axis = [1, 0, 0]
            elif self.jointAxis in ["y","Y"]:
                axis = [0, 1, 0]
            elif self.jointAxis in ["z","Z"]:
                axis = [0, 0, 1]
            else:
                raise RuntimeError("Invalid joint axis {0}".format(self.jointAxis))
            self.localR2= tf.rotation_matrix(self.angle, axis)[:3,:3]
            self.localTransformation[0:3,0:3] = numpy.dot(self.localR1, self.localR2)
    def __load_imu(self):
        num_frames = self.get_num_frames()

        for i in range(0, num_frames):
            wx = self.data_imu_kitti.oxts[i].packet.wx
            wy = self.data_imu_kitti.oxts[i].packet.wy
            wz = self.data_imu_kitti.oxts[i].packet.wz
            ax = self.data_imu_kitti.oxts[i].packet.ax
            ay = self.data_imu_kitti.oxts[i].packet.ay
            az = self.data_imu_kitti.oxts[i].packet.az

            self.imu_measurements.append(np.array([wx, wy, wz, ax, ay, az, ]))

            # now we need to simulate IMU measurements in the reverse direction
            roll = self.data_imu_kitti.oxts[i].packet.roll
            pitch = self.data_imu_kitti.oxts[i].packet.pitch
            yaw = self.data_imu_kitti.oxts[i].packet.yaw

            # for accelerometer need to remove gravity vector and negate the values
            # first put the acceleration measurement in earth frame
            # Rotation of car imu (c) with respect to the ground truth (g)
            R_gc = transformations.euler_matrix(yaw, pitch, roll, axes="rzyx")[0:3, 0:3]
            R_gc_inv = np.linalg.inv(R_gc)

            g_accel_cc = R_gc.dot(np.array([ax, ay, az]))
            g_accel_cc[2] -= scipy.constants.g  # subtract gravity
            g_accel_cc = -g_accel_cc
            g_accel_cc[2] += scipy.constants.g  # add gravity back in
            c_accel_cc_reversed = R_gc_inv.dot(g_accel_cc)

            imu_reversed = np.array(
                    [-wx, -wy, -wz, c_accel_cc_reversed[0], c_accel_cc_reversed[1], c_accel_cc_reversed[2]])
            self.imu_measurements_reversed.append(imu_reversed)
示例#13
0
 def setRotationIndex(self, index, angle, useQuat):
     """
     Set the rotation for one of the three rotation channels, either as
     quaternion or euler matrix. index should be 1,2 or 3 and represents
     x, y and z axis accordingly
     """
     if useQuat:
         quat = tm.quaternion_from_matrix(self.matPose)
         log.debug("%s", str(quat))
         quat[index] = angle/1000
         log.debug("%s", str(quat))
         _normalizeQuaternion(quat)
         log.debug("%s", str(quat))
         self.matPose = tm.quaternion_matrix(quat)
         return quat[0]*1000
     else:
         angle = angle*D
         ax,ay,az = tm.euler_from_matrix(self.matPose, axes='sxyz')
         if index == 1:
             ax = angle
         elif index == 2:
             ay = angle
         elif index == 3:
             az = angle
         mat = tm.euler_matrix(ax, ay, az, axes='sxyz')
         self.matPose[:3,:3] = mat[:3,:3]
         return 1000.0
示例#14
0
def GetQ50LidarParams():
    params = {}
    params['R_from_i_to_l'] = euler_matrix(-0.005, -0.0081, -0.0375)[0:3, 0:3]
    params['T_from_l_to_i'] = np.eye(4)
    params['T_from_l_to_i'][0:3, 0:3] = params['R_from_i_to_l'].transpose()

    return params
示例#15
0
def GetQ50LidarParams():
    params = {}
    params['R_from_i_to_l'] = euler_matrix(-0.04, -0.0146, -0.0165)[0:3, 0:3]
    params['T_from_l_to_i'] = np.eye(4)
    params['T_from_l_to_i'][0:3, 0:3] = params['R_from_i_to_l'].transpose()
    params['height'] = 2.0
    return params
示例#16
0
def test_wrist_ik(goal_roll, goal_pitch, goal_yaw, or_obj = None):
    from math import cos, sin, atan2
    from transformations import euler_matrix
    # try:
    #     import tf
    #     R = tf.transformations.euler_matrix(goal_roll, goal_pitch, goal_yaw)
    # except ImportError:
    #     print 'TF module not found, using local euler_matrix()'
    #     R = euler_matrix(goal_roll, goal_pitch, goal_yaw)

    R = euler_matrix(goal_roll, goal_pitch, goal_yaw)

    # do the math
    theta_4 = atan2(-R[1,2], -R[0,2])
    
    s_theta_5 = -R[0,2] * cos(theta_4) - R[1,2] * sin(theta_4)
    c_theta_5 = R[2,2]
    theta_5 = atan2(s_theta_5, c_theta_5)

    s_theta_6 = -R[0,0] * sin(theta_4) + R[1,0] * cos(theta_4)
    c_theta_6 = -R[0,1] * sin(theta_4) + R[1,1] * cos(theta_4)
    theta_6 = atan2(s_theta_6, c_theta_6)

    print 'Solution:', (theta_4, theta_5, theta_6)

    # visualize if an OpenRave object is provided
    if or_obj:
        import show_axes

        # show goal
        show_axes.show_axes('goal', R, or_obj, 'small')

        show_wrist_fk(theta_4, theta_5, theta_6, or_obj)

    return 
示例#17
0
 def transition(self, state, control):
     tf_prev = self.state_to_tfm(state)
     mat = np.matrix(tf.euler_matrix(0, 0, control[1], 'rxyz'))
     mat[0, 3] = control[0]
     # mat[1,3]= control[1];
     # mat[2,3]= control[2];
     return self.tfm_to_state(tf_prev * mat)
示例#18
0
def get_se3_from_image_transform(theta, trans, pivot, heightmap, bounds,
                                 pixel_size):
    position_center = pixel_to_position(np.flip(np.int32(np.round(pivot))),
                                        heightmap,
                                        bounds,
                                        pixel_size,
                                        skip_height=False)
    new_position_center = pixel_to_position(np.flip(
        np.int32(np.round(pivot + trans))),
                                            heightmap,
                                            bounds,
                                            pixel_size,
                                            skip_height=True)
    # Don't look up the z height, it might get augmented out of frame
    new_position_center = (new_position_center[0], new_position_center[1],
                           position_center[2])

    delta_position = np.array(new_position_center) - np.array(position_center)

    t_world_center = np.eye(4)
    t_world_center[0:3, 3] = np.array(position_center)

    t_centernew_center = np.eye(4)
    euler_zxy = (-theta, 0, 0)
    t_centernew_center[0:3,
                       0:3] = transformations.euler_matrix(*euler_zxy,
                                                           axes='szxy')[0:3,
                                                                        0:3]

    t_centernew_center_Tonly = np.eye(4)
    t_centernew_center_Tonly[0:3, 3] = -delta_position
    t_centernew_center = t_centernew_center @ t_centernew_center_Tonly

    t_world_centernew = t_world_center @ np.linalg.inv(t_centernew_center)
    return t_world_center, t_world_centernew
示例#19
0
def get_bbox2d_from_bbox3d(cam_proj_matrix, rx, ry, rz, sl, sh, sw, tx, ty,
                           tz):

    bbox_to_velo_matrix = np.matmul(
        transformations.translation_matrix([tx, ty, tz]),
        transformations.euler_matrix(rx, ry, rz, 'sxyz'))
    bbox_proj_matrix = np.matmul(cam_proj_matrix, bbox_to_velo_matrix)

    vertices = np.empty([2, 2, 2, 2])
    for k in [0, 1]:
        for l in [0, 1]:
            for m in [0, 1]:
                v = np.array([(k - 0.5) * sl, -(l) * sh, (m - 0.5) * sw, 1.])
                v = np.matmul(bbox_proj_matrix, v)

                vertices[k, l, m, :] = spherical_project(v[0], v[1], v[2])

    vertices = vertices.astype(np.int32)

    x1 = np.amin(vertices[:, :, :, 0])
    x2 = np.amax(vertices[:, :, :, 0])
    y1 = np.amin(vertices[:, :, :, 1])
    y2 = np.amax(vertices[:, :, :, 1])

    return (x1, y1, x2, y2)
示例#20
0
def prepare_source_and_target_rigid_3d(source_filename,
                                       noise_amp=0.001,
                                       n_random=500,
                                       orientation=np.deg2rad([0.0, 0.0,
                                                               30.0]),
                                       normals=False):
    source = o3.read_point_cloud(source_filename)
    source = o3.voxel_down_sample(source, voxel_size=0.005)
    print(source)
    target = copy.deepcopy(source)
    tp = np.asarray(target.points)
    rg = 1.5 * (tp.max(axis=0) - tp.min(axis=0))
    rands = (np.random.rand(n_random, 3) - 0.5) * rg + tp.mean(axis=0)
    target.points = o3.Vector3dVector(
        np.r_[tp + noise_amp * np.random.randn(*tp.shape), rands])
    ans = trans.euler_matrix(*orientation)
    target.transform(ans)
    if normals:
        o3.estimate_normals(source,
                            search_param=o3.geometry.KDTreeSearchParamHybrid(
                                radius=0.1, max_nn=50))
        o3.orient_normals_to_align_with_direction(source)
        o3.estimate_normals(target,
                            search_param=o3.geometry.KDTreeSearchParamHybrid(
                                radius=0.1, max_nn=50))
        o3.orient_normals_to_align_with_direction(target)
    return source, target
示例#21
0
 def setRotationIndex(self, index, angle, useQuat):
     """
     Set the rotation for one of the three rotation channels, either as
     quaternion or euler matrix. index should be 1,2 or 3 and represents
     x, y and z axis accordingly
     """
     if useQuat:
         quat = tm.quaternion_from_matrix(self.matPose)
         log.debug("%s", str(quat))
         quat[index] = angle/1000
         log.debug("%s", str(quat))
         _normalizeQuaternion(quat)
         log.debug("%s", str(quat))
         self.matPose = tm.quaternion_matrix(quat)
         return quat[0]*1000
     else:
         angle = angle*D
         ax,ay,az = tm.euler_from_matrix(self.matPose, axes='sxyz')
         if index == 1:
             ax = angle
         elif index == 2:
             ay = angle
         elif index == 3:
             az = angle
         mat = tm.euler_matrix(ax, ay, az, axes='sxyz')
         self.matPose[:3,:3] = mat[:3,:3]
         return 1000.0
def GetQ50LidarParams():
    params = { }
    params['R_from_i_to_l'] = euler_matrix(-0.04, -0.0146, -0.0165)[0:3,0:3]
    params['T_from_l_to_i'] = np.eye(4)
    params['T_from_l_to_i'][0:3,0:3] = params['R_from_i_to_l'].transpose()
    params['height'] = 2.0
    return params
示例#23
0
def euler_to_quaternion(euler_angles,
                        rotation_order=DEFAULT_ROTATION_ORDER,
                        filter_values=True):
    """Convert euler angles to quaternion vector [qw, qx, qy, qz]
    Parameters
    ----------
    * euler_angles: list of floats
    \tA list of ordered euler angles in degress
    * rotation_order: Iteratable
    \t a list that specifies the rotation axis corresponding to the values in euler_angles
    * filter_values: Bool
    \t enforce a unique rotation representation

    """
    assert len(euler_angles) == 3, ('The length of euler angles should be 3!')
    euler_angles = np.deg2rad(euler_angles)
    rotmat = euler_matrix(*euler_angles,
                          rotation_order_to_string(rotation_order))
    # convert rotation matrix R into quaternion vector (qw, qx, qy, qz)
    quat = quaternion_from_matrix(rotmat)
    # filter the quaternion see
    # http://physicsforgames.blogspot.de/2010/02/quaternions.html
    if filter_values:
        dot = np.sum(quat)
        if dot < 0:
            quat = -quat
    return [quat[0], quat[1], quat[2], quat[3]]
示例#24
0
    def __init__(self, filename='ipm/camera_info.yaml'):
        self.info = self.load_camera_info(osp.join(PKG_PATH, filename))
        self.frame_width = self.info['width']
        self.frame_height = self.info['height']

        # Intrinsics
        self.K = np.zeros((3, 4))
        self.K[:, :3] = np.asarray(self.info['intrinsics']['K']).reshape(3, 3)
        self.D = np.asarray(self.info['intrinsics']['D'])

        # Extrinsics
        self.extrinsics_euler = np.radians(self.info['extrinsics']['rpy'])
        self.extrinsics_rotation = euler_matrix(self.extrinsics_euler[0],
                                                self.extrinsics_euler[1],
                                                self.extrinsics_euler[2])
        self.extrinsics_translation = translation_matrix(
            self.info['extrinsics']['position'])

        # Overwrite calibration data if available
        if 'calibration' not in self.info:
            print(
                'ERROR: Calibration not performed, run InversePerspectiveMapping.calibrate() first!'
            )
            return
        else:
            print('Loading calibration data from file...')
            self.ipm_matrix = np.asarray(
                self.info['calibration']['ipm_matrix']).reshape(3, 3)
            self.ipm_matrix_inv = np.linalg.inv(self.ipm_matrix)
            self.ipm_image_dims = tuple(
                self.info['calibration']['ipm_image_dims'][::-1])
            self.ipm_px_to_m = self.info['calibration']['ipm_px_to_m']
            self.ipm_m_to_px = self.info['calibration']['ipm_m_to_px']
            self.calibration_ego_y = self.info['calibration'][
                'calibration_ego_y']
示例#25
0
def R_to_c_from_i(cam):
    R_to_c_from_i = cam['R_to_c_from_i']
    R_camera_pitch = euler_matrix(cam['rot_x'], cam['rot_y'], cam['rot_z'],
                                  'sxyz')[0:3, 0:3]
    R_to_c_from_i = dot(R_camera_pitch, R_to_c_from_i)

    return R_to_c_from_i
示例#26
0
def part_canon2urdf_from_urdf_dict(urdf_dict):
    """
        part_canon2urdf: from link frame to urdf frame in rest pose
        Assumption: ".obj"s are in the rest pose,
            which means, urdf_dict['link']['xyz'] + link position in rest pose = 0
            (.obj -> put at 'link''xyz' w.r.t. link frame;
             link frame -> put at link position in urdf frame (rest pose))

        Note the "k + 1" -- it's in accordance w/ get_mobility_urdf
    """
    num_parts = urdf_dict['num_links'] - 1  # exclude the base
    parts_canon2urdf = [None] * num_parts
    for k in range(num_parts):
        parts_urdf_pos = -np.array(urdf_dict['link']['xyz'][k + 1])
        parts_urdf_pos = np.reshape(parts_urdf_pos, (-1))

        parts_urdf_orn = np.array(urdf_dict['link']['rpy'][k + 1])
        parts_urdf_orn = np.reshape(parts_urdf_orn, (-1))

        center_joint_orn = parts_urdf_orn
        my_canon2urdf_r = euler_matrix(center_joint_orn[0],
                                       center_joint_orn[1],
                                       center_joint_orn[2])[:4, :4]
        my_canon2urdf_t = parts_urdf_pos
        my_canon2urdf_mat = my_canon2urdf_r
        my_canon2urdf_mat[:3, 3] = my_canon2urdf_t[:3]
        parts_canon2urdf[k] = my_canon2urdf_mat

    return parts_canon2urdf
示例#27
0
def GPSPos(GPSData, Camera, start_frame):
    roll_start = -deg2rad(start_frame[7])
    pitch_start = deg2rad(start_frame[8])
    yaw_start = -deg2rad(start_frame[9] + 90)

    psi = pitch_start
    cp = cos(psi)
    sp = sin(psi)
    theta = roll_start
    ct = cos(theta)
    st = sin(theta)
    gamma = yaw_start
    cg = cos(gamma)
    sg = sin(gamma)

    R_to_i_from_w = \
            array([[cg*cp-sg*st*sp, -sg*ct, cg*sp+sg*st*cp],
                  [sg*cp+cg*st*sp, cg*ct, sg*sp-cg*st*cp],
                  [-ct*sp, st, ct*cp]]).transpose()

    pts = WGS84toENU(start_frame[1:4], GPSData[:, 1:4])
    world_coordinates = pts
    pos_wrt_imu = dot(R_to_i_from_w, world_coordinates)
    R_to_c_from_i = Camera['R_to_c_from_i']
    R_camera_pitch = euler_matrix(Camera['rot_x'], Camera['rot_y'],\
            Camera['rot_z'], 'sxyz')[0:3,0:3]
    R_to_c_from_i = dot(R_camera_pitch, R_to_c_from_i)

    pos_wrt_camera = dot(R_to_c_from_i, pos_wrt_imu)

    pos_wrt_camera[0, :] += Camera['t_x']  #move to left/right
    pos_wrt_camera[1, :] += Camera['t_y']  #move up/down image
    pos_wrt_camera[2, :] += Camera['t_z']  #move away from cam
    return pos_wrt_camera
示例#28
0
 def update(self, amt, bone):
     target = amt.bones[self.subtar]
     if self.ownsp == 'WORLD':
         halt
     else:
         if self.map_from != 'ROTATION' or self.map_to != 'ROTATION':
             halt
         brad = tm.euler_from_matrix(target.matrixPose, axes='sxyz')
         arad = []
         for n in range(3):
             if self.from_max[n] == self.from_min[n]:
                 cdeg = self.from_max[n]
             else:
                 bdeg = brad[n] / D
                 if bdeg < self.from_min[n]: bdeg = self.from_min[n]
                 if bdeg > self.from_max[n]: bdeg = self.from_max[n]
                 slope = (self.to_max[n] - self.to_min[n]
                          ) / float(self.from_max[n] - self.from_min[n])
                 adeg = slope * (bdeg - self.from_min[n]) + self.to_min[n]
             arad.append(adeg * D)
         mat = tm.euler_matrix(arad[0], arad[1], arad[2], axes='sxyz')
         bone.matrixPose[:3, :3] = mat[:3, :3]
         bone.updateBone()
     return
     log.debug("Transform %s %s", bone.name, target.name)
     log.debug("Arad %s", arad)
     log.debug("Brad %s", brad)
     log.debug("P %s", bone.matrixPose)
     log.debug("R %s", bone.matrixRest)
     log.debug("G %s", bone.matrixGlobal)
     pass
示例#29
0
    def process_sensors(self, state):
        """Put sensor data into state vector. """
        # Orientation Sensor
        orientation = state[Sensors.ORIENTATION_SENSOR]

        # Make orientation 4x4 homogenous matrix.
        homo_orientation = np.diag([1., 1., 1., 1.])
        homo_orientation[0:3, 0:3] = orientation

        # Convert homo_orientation to roll, pitch, yaw as we know.
        rpy = tf.euler_from_matrix(homo_orientation, axes='rxyz')

        # Position Sensor
        position = state[Sensors.LOCATION_SENSOR]

        # Velocity Sensor
        vel = state[Sensors.VELOCITY_SENSOR]

        # Rotate velocities to get vx, vy
        psi_rot = tf.euler_matrix(0, 0, rpy[2])
        rotated_vel = (psi_rot[0:3,0:3].dot(vel))/100.

        # IMU Sensor
        imu = state[Sensors.IMU_SENSOR]

        # Save off states in array.
        self.states[0:3] = np.resize(position, (3,))
        self.states[3:6] = np.resize(rpy, (3,))
        self.states[6:9] = np.resize(rotated_vel, (3,))
        self.states[9:12] = np.resize(imu[3:6], (3,))
示例#30
0
	def get_transformed_model(self, transforms):
		t = transforms

		scaling_matrix = np.matrix([
			[t['sx']/100, 0, 0, 1],
			[0, t['sy']/100, 0, 1],
			[0, 0, t['sz']/100, 1],
			[0, 0, 0, 1]
		])

		translation_matrix = np.matrix([
			[1, 0, 0, t['tx']],
			[0, 1, 0, t['ty']],
			[0, 0, 1, t['tz']],
			[0, 0, 0, 1   ]
		])

		rotation_matrix = np.matrix(euler_matrix(
			rad(t['rx']),
			rad(t['ry']),
			rad(t['rz'])
		))

		matrix = scaling_matrix * translation_matrix * rotation_matrix

		leds_ = leds.copy()
		leds_ = np.pad(leds_, (0,1), 'constant', constant_values=1)[:-1]
		leds_ = np.rot90(leds_, 3)
		leds_ = np.dot(matrix, leds_)
		leds_ = np.rot90(leds_)
		leds_ = np.array(leds_)

		return leds_
示例#31
0
 def _unpack(x):
     trans = x[:3]
     eulxyz = x[3:]
     T = tf.concatenate_matrices(tf.translation_matrix(trans),
                                 tf.euler_matrix(eulxyz[0], eulxyz[1],
                                                 eulxyz[2], 'sxyz'))
     return T
示例#32
0
 def update(self, amt, bone):
     target = amt.bones[self.subtar]
     if self.ownsp == "WORLD":
         halt
     else:
         if self.map_from != "ROTATION" or self.map_to != "ROTATION":
             halt
         brad = tm.euler_from_matrix(target.matrixPose, axes="sxyz")
         arad = []
         for n in range(3):
             if self.from_max[n] == self.from_min[n]:
                 cdeg = self.from_max[n]
             else:
                 bdeg = brad[n] / D
                 if bdeg < self.from_min[n]:
                     bdeg = self.from_min[n]
                 if bdeg > self.from_max[n]:
                     bdeg = self.from_max[n]
                 slope = (self.to_max[n] - self.to_min[n]) / float(self.from_max[n] - self.from_min[n])
                 adeg = slope * (bdeg - self.from_min[n]) + self.to_min[n]
             arad.append(adeg * D)
         mat = tm.euler_matrix(arad[0], arad[1], arad[2], axes="sxyz")
         bone.matrixPose[:3, :3] = mat[:3, :3]
         bone.updateBone()
     return
     log.debug("Transform %s %s", bone.name, target.name)
     log.debug("Arad %s", arad)
     log.debug("Brad %s", brad)
     log.debug("P %s", bone.matrixPose)
     log.debug("R %s", bone.matrixRest)
     log.debug("G %s", bone.matrixGlobal)
     pass
示例#33
0
 def update(self, amt, bone):
     target = amt.bones[self.subtar]
     if self.ownsp == 'WORLD':
         mat = target.matrixGlobal
         bone.matrixGlobal[0, :3] += self.influence * (
             mat[:3, :3] - bone.matrixGlobal[:3, :3])
     else:
         ay, ax, az = tm.euler_from_matrix(bone.matrixPose, axes='syxz')
         by, bx, bz = tm.euler_from_matrix(target.matrixPose, axes='syxz')
         if self.usex:
             ax += self.influence * (bx - ax)
         if self.usey:
             ay += self.influence * (by - ay)
         if self.usez:
             az += self.influence * (bz - az)
         testbones = [
             "DfmUpArm1_L", "DfmUpArm2_L", "DfmLoArm1_L", "DfmLoArm2_L",
             "DfmLoArm3_L"
         ]
         if bone.name in testbones:
             log.debug("%s %s", bone.name, target.name)
             log.debug("%s", str(bone.matrixPose))
             log.debug("%s", str(target.matrixPose))
         bone.matrixPose = tm.euler_matrix(ay, ax, az, axes='syxz')
         if bone.name in testbones:
             log.debug("%s", str(bone.matrixPose))
         bone.updateBone()
示例#34
0
 def setUp(self):
     pcd = o3.io.read_point_cloud('data/horse.ply')
     pcd = pcd.voxel_down_sample(voxel_size=0.01)
     self._source = np.asarray(pcd.points)
     rot = trans.euler_matrix(*np.random.uniform(0.0, np.pi / 4, 3))
     self._tf = tf.RigidTransformation(rot[:3, :3], np.zeros(3))
     self._target = self._tf.transform(self._source)
示例#35
0
def GPSPos(GPSData, Camera, start_frame):
    roll_start = -deg2rad(start_frame[7]);
    pitch_start = deg2rad(start_frame[8]);
    yaw_start = -deg2rad(start_frame[9]+90);

    psi = pitch_start; 
    cp = cos(psi);
    sp = sin(psi);
    theta = roll_start;
    ct = cos(theta);
    st = sin(theta);
    gamma = yaw_start;
    cg = cos(gamma);
    sg = sin(gamma);

    R_to_i_from_w = \
            array([[cg*cp-sg*st*sp, -sg*ct, cg*sp+sg*st*cp],
                  [sg*cp+cg*st*sp, cg*ct, sg*sp-cg*st*cp],
                  [-ct*sp, st, ct*cp]]).transpose()


    pts = WGS84toENU(start_frame[1:4], GPSData[:, 1:4])
    world_coordinates = pts;
    pos_wrt_imu = dot(R_to_i_from_w, world_coordinates);
    R_to_c_from_i = Camera['R_to_c_from_i']
    R_camera_pitch = euler_matrix(Camera['rot_x'], Camera['rot_y'],\
            Camera['rot_z'], 'sxyz')[0:3,0:3]
    R_to_c_from_i = dot(R_camera_pitch, R_to_c_from_i) 

    pos_wrt_camera = dot(R_to_c_from_i, pos_wrt_imu);

    pos_wrt_camera[0,:] += Camera['t_x'] #move to left/right
    pos_wrt_camera[1,:] += Camera['t_y'] #move up/down image
    pos_wrt_camera[2,:] += Camera['t_z'] #move away from cam
    return pos_wrt_camera
示例#36
0
def GetQ50LidarParams():
    params = { } 
    params['R_from_i_to_l'] = euler_matrix(-0.005,-0.0081,-0.0375)[0:3,0:3]
    params['T_from_l_to_i'] = np.eye(4) 
    params['T_from_l_to_i'][0:3,0:3] = params['R_from_i_to_l'].transpose()

    return params
示例#37
0
def prepare_source_and_target_rigid_3d(source_filename,
                                       noise_amp=0.001,
                                       n_random=500,
                                       orientation=np.deg2rad([0.0, 0.0,
                                                               30.0]),
                                       translation=np.zeros(3),
                                       normals=False):
    source = o3.io.read_point_cloud(source_filename)
    source = o3.geometry.PointCloud.voxel_down_sample(source, 0.005)
    print(source)
    target = copy.deepcopy(source)
    tp = np.asarray(target.points)
    np.random.shuffle(tp)
    rg = 1.5 * (tp.max(axis=0) - tp.min(axis=0))
    rands = (np.random.rand(n_random, 3) - 0.5) * rg + tp.mean(axis=0)
    target.points = o3.utility.Vector3dVector(
        np.r_[tp + noise_amp * np.random.randn(*tp.shape), rands])
    ans = trans.euler_matrix(*orientation)
    ans[:3, 3] = translation
    target.transform(ans)
    if normals:
        estimate_normals(
            source, o3.geometry.KDTreeSearchParamHybrid(radius=0.3, max_nn=50))
        estimate_normals(
            target, o3.geometry.KDTreeSearchParamHybrid(radius=0.3, max_nn=50))
    return source, target
示例#38
0
    def frame(self, i):

        rnds = self.rnds
        roll = math.sin(i * .01 * rnds[0] + rnds[1])
        pitch = math.sin(i * .01 * rnds[2] + rnds[3])
        yaw = math.pi * math.sin(i * .01 * rnds[4] + rnds[5])
        x = math.sin(i * 0.01 * rnds[6])
        y = math.sin(i * 0.01 * rnds[7])

        x,y,z = -0.5,0.5,1
        roll,pitch,yaw = (0,0,0)

        z = 4 + 3 * math.sin(i * 0.1 * rnds[8])
        print z

        rz = transformations.euler_matrix(roll, pitch, yaw)
        p = Plane(Vec3(x, y, z), under(Vec3(0,0,-1), rz), under(Vec3(1, 0, 0), rz))

        acc = 0
        for r in self.r:
            (pred, h, norm) = p.hit(r)
            l = numpy.where(pred, texture(p.localxy(r.project(h))), 0.0)
            acc += l
        acc *= (1.0 / len(self.r))

        # print "took", time.time() - st

        img = cv.CreateMat(self.h, self.w, cv.CV_8UC1)
        cv.SetData(img, (clamp(0, acc, 1) * 255).astype(numpy.uint8).tostring(), self.w)
        return img
    def frame(self, i):

        rnds = self.rnds
        roll = math.sin(i * .01 * rnds[0] + rnds[1])
        pitch = math.sin(i * .01 * rnds[2] + rnds[3])
        yaw = math.pi * math.sin(i * .01 * rnds[4] + rnds[5])
        x = math.sin(i * 0.01 * rnds[6])
        y = math.sin(i * 0.01 * rnds[7])

        x, y, z = -0.5, 0.5, 1
        roll, pitch, yaw = (0, 0, 0)

        z = 4 + 3 * math.sin(i * 0.1 * rnds[8])
        print z

        rz = transformations.euler_matrix(roll, pitch, yaw)
        p = Plane(Vec3(x, y, z), under(Vec3(0, 0, -1), rz),
                  under(Vec3(1, 0, 0), rz))

        acc = 0
        for r in self.r:
            (pred, h, norm) = p.hit(r)
            l = numpy.where(pred, texture(p.localxy(r.project(h))), 0.0)
            acc += l
        acc *= (1.0 / len(self.r))

        # print "took", time.time() - st

        img = cv.CreateMat(self.h, self.w, cv.CV_8UC1)
        cv.SetData(img,
                   (clamp(0, acc, 1) * 255).astype(numpy.uint8).tostring(),
                   self.w)
        return img
示例#40
0
def GetQ50CameraParams():
    cam = [{}, {}]
    for i in [1, 0]:
        cam[i]['R_to_c_from_i'] = np.array([[-1, 0, 0], [0, 0, -1], [0, -1,
                                                                     0]])

        if i == 0:
            cam[i]['R_to_c_from_l_in_camera_frame'] = cam[1][
                'R_to_c_from_l_in_camera_frame']
            cam[i]['displacement_from_l_to_c_in_lidar_frame'] = cam[1][
                'displacement_from_l_to_c_in_lidar_frame']

            # extrinsics parameters for transforming points in right camera frame to this camera
            T = np.array([
                -0.5873763710461054, 0.00012196510170337307,
                0.08922401781210791
            ])
            R = np.array([
                0.9999355343485463, -0.00932576944123699, 0.006477435558612815,
                0.009223923954826548, 0.9998360945238545, 0.015578938158992275,
                -0.006621659456863392, -0.01551818647957998, 0.9998576596268203
            ])
            R = R.reshape((3, 3))
            T = T.reshape((3, 1))
            E = np.hstack((R.transpose(), np.dot(-R.transpose(), T)))
            E = np.vstack((E, np.array([0, 0, 0, 1])))
            cam[i]['E'] = E
            cam[i]['fx'] = 2254.76
            cam[i]['fy'] = 2266.30
            cam[i]['cu'] = 655.55
            cam[i]['cv'] = 488.85
            cam[i]['distort'] = np.array([
                -0.22146000368016028, 0.7987879799679538,
                -6.542034918087567e-05, 2.8680581938024014e-05, 0.0
            ])

        elif i == 1:
            R_to_c_from_l_in_camera_frame = euler_matrix(
                0.046, 0.0071, 0.0065)[0:3, 0:3]
            cam[i][
                'R_to_c_from_l_in_camera_frame'] = R_to_c_from_l_in_camera_frame
            cam[i]['displacement_from_l_to_c_in_lidar_frame'] = np.array(
                [-0.07, 0.325, 0.16])
            cam[i]['fx'] = 2250.72
            cam[i]['fy'] = 2263.75
            cam[i]['cu'] = 648.95
            cam[i]['cv'] = 450.24
            cam[i]['distort'] = np.array([
                -0.16879238412882028, 0.11971166628565273,
                -0.0017457365846050555, 0.0001853749033525837, 0.0
            ])
            cam[i]['E'] = np.eye(4)

        cam[i]['KK'] = np.array([[cam[i]['fx'], 0.0, cam[i]['cu']],
                                 [0.0, cam[i]['fy'], cam[i]['cv']],
                                 [0.0, 0.0, 1.0]])
        cam[i]['f'] = (cam[i]['fx'] + cam[i]['fy']) / 2

    return cam
示例#41
0
def global_pose(matrix,x_ob,y_ob,angle): # matrix es la pose del apriltag x_ob e y_ob es el x e y del apriltag
    tag_size = 0.18
    tile_size = 0.585

    T_a = tf.translation_matrix([
        -x_ob, -tag_size*3/4, y_ob]) # esto ya viene multiplicado por el tile_size
    R_a = tf.euler_matrix(0,angle,0)

    T_m_a = tf.concatenate_matrices(T_a, R_a)

    # pose tag con respecto al robot
    T_r_a = np.dot(matrix, tf.euler_matrix(0, np.pi, 0))
    # pose tag con respecto al mapa
    T_a_r = np.linalg.inv(T_r_a) # T_r_a-1
    T_m_r = np.dot(T_m_a, T_a_r)

    return T_m_r
示例#42
0
 def setRotation(self, angles):
     """
     Sets rotation of this bone (in local space) as Euler rotation
     angles x,y and z.
     """
     ax,ay,az = angles
     mat = tm.euler_matrix(ax, ay, az, axes='szyx')
     self.matPose[:3,:3] = mat[:3,:3]
示例#43
0
文件: eulers.py 项目: jkotur/queuler
	def __modify_euler( self , a , d ) :
		glPushMatrix()
		glLoadMatrixf( tr.euler_matrix( *a ) )
		glRotatef( d[0] , 0 , 1 , 0 )
		glRotatef( d[1] , 1 , 0 , 0 )
		a = np.array( tr.euler_from_matrix( glGetDoublev(GL_MODELVIEW_MATRIX) ) )
		glPopMatrix()
		return a
示例#44
0
def coil_transform_pos(pos):
    pos[1] = -pos[1]
    a, b, g = np.radians(pos[3:])
    r_ref = tf.euler_matrix(a, b, g, 'sxyz')
    t_ref = tf.translation_matrix(pos[:3])
    m_img = tf.concatenate_matrices(t_ref, r_ref)

    return m_img
def transform_euler_frame(euler_frame,
                          angles,
                          offset,
                          rotation_order=None,
                          global_rotation=True):
    """
    Calls transform_point for the root parameters and adds theta to the y rotation
    channel of the frame.

    The offset of root is transformed by transform_point
    The orientation of root is rotated by Rotation matrix

    Parameters
    ---------
    *euler_frame: np.ndarray
    \t the parameters of a single frame
    *angles: list of floats
    \tRotation angles in degrees
    *offset: np.ndarray
    \tTranslation
    """
    if rotation_order is None:
        rotation_order = DEFAULT_ROTATION_ORDER
    transformed_frame = deepcopy(euler_frame)
    if global_rotation:
        transformed_frame[:3] = transform_point(euler_frame[:3],
                                                angles,
                                                offset,
                                                rotation_order=rotation_order)
    else:
        transformed_frame[:3] = transform_point(euler_frame[:3],
                                                np.zeros(3),
                                                offset,
                                                rotation_order=rotation_order)
    R = euler_matrix(np.deg2rad(angles[0]),
                     np.deg2rad(angles[1]),
                     np.deg2rad(angles[2]),
                     axes='rxyz')

    src_e = np.deg2rad(euler_frame[3:6])
    rot_string = rotation_order_to_string(rotation_order)
    OR = euler_matrix(*src_e, axes=rot_string)
    rotmat = np.dot(R, OR)
    eul_angles = np.rad2deg(euler_from_matrix(rotmat, rot_string))
    transformed_frame[3:6] = eul_angles
    return transformed_frame
 def _x2transform(self, x):
     trans, eulxyz, scale = self._unpack(x)
     origin = [0, 0, 0]
     T = tf.concatenate_matrices(tf.translation_matrix(trans),
                                 tf.euler_matrix(eulxyz[0], eulxyz[1],
                                                 eulxyz[2], 'sxyz'),
                                 tf.scale_matrix(scale, origin))
     return T
示例#47
0
文件: eulers.py 项目: jkotur/queuler
	def _draw_scene( self ) :
		glPushMatrix()
		glTranslatef( *self.b )
		glPushMatrix()
		glMultMatrixd( tr.euler_matrix( *self.ab ) )
		self.frame.draw()
		glPopMatrix()

		glTranslatef( *self.c )
		glMultMatrixd( tr.euler_matrix( *self.ac ) )
		self.frame.draw()
		glPopMatrix()

		glPushMatrix()
		glTranslatef( *self.e )
		glMultMatrixd( tr.euler_matrix( *self.ae ) )
		self.frame.draw()
		glPopMatrix()
示例#48
0
def channel_from_euler(roll, pitch, yaw, dodeca_array=ar, verbosity = 0):
    """ Get dodecahedron side from Euler angle """
    rr = euler_matrix(roll, pitch, yaw, 'sxyz')
    rr3 = rr[0:3, 0:3]
    #distance,index = spatial.KDTree(dodeca_array).query(rr3[2])
    index = get_closest(dodeca_array, rr3[2])
    if verbosity > 0: 
        print "{}".format(index)
    return index
示例#49
0
def computeReprojection(C, raw_pts, cam):
    pts = raw_pts[:, 0:3].copy()
    pts[:, 0] += C[0]
    pts[:, 1] += C[1]
    pts[:, 2] += C[2]
    R = euler_matrix(C[3], C[4], C[5])[0:3,0:3]
    pts_wrt_cam = np.dot(R, np.dot(R_to_c_from_l_old(cam), pts.transpose()))
    pix = np.around(np.dot(cam['KK'], np.divide(pts_wrt_cam[0:3,:], pts_wrt_cam[2, :])))
    pix = pix.astype(np.int32)
    return (pix, pts_wrt_cam)
示例#50
0
    def rotatePoint(self, point, theta, phi):

        R = euler_matrix(0, np.deg2rad(theta), np.deg2rad(phi))

        if len(point) == 3:
            point = np.append(point, 1)

        new_point = np.dot(R, point)[0:3]

        return new_point
示例#51
0
def setPersp(scale_factor = 1, distort_location = '/afs/cs.stanford.edu/u/twangcat/scratch/sail-car-log/process/'):    
    distort = { }
    distort['M_1'] = [np.eye(3)] # perspective distrotion in cam1 image space
    distort['M_2'] = [np.eye(3)] # perspective distrotion in cam2 image space
    distort['T'] = [np.eye(4)] # corresponding real-world transformation


    distort['T'].insert(0,translation_matrix(np.array([1.0,0,0])))
    distort['T'].insert(0,translation_matrix(np.array([-1.0,0,0])))
    distort['T'].insert(0,euler_matrix(0, 0.06, 0))
    distort['T'].insert(0,euler_matrix(0, -0.06, 0))
    distort['T'].insert(0,euler_matrix(0, 0.12, 0))
    distort['T'].insert(0,euler_matrix(0, -0.12, 0))


    distort['M_1'].insert(0,pickle.load(open(distort_location+'M_shift1.0_cam1.pickle'))) # shift 1.0
    distort['M_1'].insert(0,pickle.load(open(distort_location+'M_shift-1.0_cam1.pickle'))) # shift -1.0
    distort['M_1'].insert(0,pickle.load(open(distort_location+'M_rot0.06_cam1.pickle','r'))) # rotate 0.06
    distort['M_1'].insert(0,pickle.load(open(distort_location+'M_rot-0.06_cam1.pickle','r'))) # rotate -0.06
    distort['M_1'].insert(0,pickle.load(open(distort_location+'M_rot0.12_cam1.pickle','r'))) # rotate 0.12
    distort['M_1'].insert(0,pickle.load(open(distort_location+'M_rot-0.12_cam1.pickle','r'))) # rotate -0.12




    distort['M_2'].insert(0,pickle.load(open(distort_location+'M_shift1.0_cam2.pickle'))) # shift 1.0
    distort['M_2'].insert(0,pickle.load(open(distort_location+'M_shift-1.0_cam2.pickle'))) # shift -1.0
    distort['M_2'].insert(0,pickle.load(open(distort_location+'M_rot0.06_cam2.pickle','r'))) # rotate 0.06
    distort['M_2'].insert(0,pickle.load(open(distort_location+'M_rot-0.06_cam2.pickle','r'))) # rotate -0.06
    distort['M_2'].insert(0,pickle.load(open(distort_location+'M_rot0.12_cam2.pickle','r'))) # rotate 0.12
    distort['M_2'].insert(0,pickle.load(open(distort_location+'M_rot-0.12_cam2.pickle','r'))) # rotate -0.12

    assert len(distort['M_1']) == len(distort['M_2']) and len(distort['M_1'])==len(distort['T'])
    if scale_factor != 1:
      for i in xrange(len(distort['M_1'])-1): # last one is always identity, no need change.
        distort['M_1'][i][:,0:2]/=scale_factor
        distort['M_1'][i][2,:]/=scale_factor
        distort['M_2'][i][:,0:2]/=scale_factor
        distort['M_2'][i][2,:]/=scale_factor


    return distort
示例#52
0
def GPSPosCamera(pos_wrt_imu, Camera):
    R_to_c_from_i = Camera['R_to_c_from_i']
    R_camera_pitch = euler_matrix(Camera['rot_x'], Camera['rot_y'],Camera['rot_z'], 'sxyz')[0:3,0:3]
    R_to_c_from_i = dot(R_camera_pitch, R_to_c_from_i) 

    pos_wrt_camera = dot(R_to_c_from_i, pos_wrt_imu);

    pos_wrt_camera[0,:] += Camera['t_x'] #move to left/right
    pos_wrt_camera[1,:] += Camera['t_y'] #move up/down image
    pos_wrt_camera[2,:] += Camera['t_z'] #move away from cam
    return pos_wrt_camera
示例#53
0
def state_to_hmat(Xs):
    """
    Converts a list of 12 dof state vector (used in the kalman filteR) to a list of transforms.
    """
    Ts = []
    for x in Xs:
        trans = x[0:3]
        rot   = x[6:9]
        T = tfms.euler_matrix(rot[0], rot[1], rot[2])
        T[0:3,3] = np.reshape(trans, 3)
        Ts.append(T)
    return Ts
示例#54
0
    def update_config(self,config):
        self.count += 1
        if not config:
            raise Exception("Failed to update_config for {0} with {1}".format(self.name, config))

        self.translation = config[:3]
        self.rpy = config[3:6]
        rot_mat = tf.euler_matrix(self.rpy[0], self.rpy[1], self.rpy[2])
        angle, direction, point = tf.rotation_from_matrix(rot_mat)
        self.rotation = [ direction[0],direction[1],direction[2], angle ]
        self.init_local_transformation()
        self.origin.update()
示例#55
0
def fast_icp(range_image, point_model, rate=0.001, dist=0.01):
    """Returns ICP between a range image and a point model.
    The range_image is held still, an altered 'point_model' is returned
    that's optimally aligned to the range image.

    The normals for point_model are used in the computation, normals
    for the range_image are ignored.

    Points are selected randomly from point_model and projected into the
    image. This is a terrible choice if:
       - only a portion of the point model is visible to the camera,
         e.g., very large object, narrow zoomed-in camera

    Args:
        range_image: a RangeImage
        point_model: a PointModel

    Returns:
        pnew: a PointModel identical to point_model except that
            pnew.RT has been modified so that pnew is aligned to
            range_image.
    """
    global A, B, uv, mask, npairs, matKKBtoA, matB


    # The computation must actually take place in the coordinate system
    # of range_image.camera.RT
    camRTinv = np.linalg.inv(range_image.camera.RT)
    matBtoA = np.dot(camRTinv, point_model.RT)
    matBtoA = np.ascontiguousarray(matBtoA)

    matKKBtoA = np.dot(range_image.camera.KK, matBtoA)

    mask = (range_image.depth < np.inf).astype('u1')
    err, npairs, uv, A, B = _fasticp(range_image.xyz,
                                     range_image.normals,
                                     mask,
                                     point_model.xyz,
                                     matBtoA,
                                     matKKBtoA,
                                     rate, dist*dist)

    xvec = np.linalg.solve(A, B)
    RT = np.eye(4, dtype='f')
    RT[:3,:3] = transformations.euler_matrix(*-xvec[:3])[:3,:3]
    RT[:3,3] = -xvec[3:]

    # camera.RT * RT * camRTinv * point_model.RT
    RT = np.dot(range_image.camera.RT, np.dot(RT, np.dot(camRTinv, point_model.RT)))
    pnew = pointmodel.PointModel(point_model.xyz, point_model.normals, np.ascontiguousarray(RT))
    pnew.mask = mask

    return pnew, err, npairs, uv
示例#56
0
def compute_state_space_trajectory_and_derivatives(p,psi,dt):

    num_timesteps = len(p)

    psi = trigutils.compute_continuous_angle_array(psi)

    p_dot     = c_[ gradient(p[:,0],dt),     gradient(p[:,1],dt),     gradient(p[:,2],dt)]
    p_dot_dot = c_[ gradient(p_dot[:,0],dt), gradient(p_dot[:,1],dt), gradient(p_dot[:,2],dt)]

    f_thrust_world            = m*p_dot_dot - f_external_world.T.A
    f_thrust_world_normalized = sklearn.preprocessing.normalize(f_thrust_world)

    z_axis_intermediate = c_[ cos(psi), zeros_like(psi), -sin(psi) ]
    y_axis              = f_thrust_world_normalized
    x_axis              = sklearn.preprocessing.normalize(cross(z_axis_intermediate, y_axis))
    z_axis              = sklearn.preprocessing.normalize(cross(y_axis, x_axis))

    theta          = zeros(num_timesteps)
    psi_recomputed = zeros(num_timesteps)
    phi            = zeros(num_timesteps)

    for ti in range(num_timesteps):

        z_axis_ti = c_[matrix(z_axis[ti]),0].T
        y_axis_ti = c_[matrix(y_axis[ti]),0].T
        x_axis_ti = c_[matrix(x_axis[ti]),0].T

        R_world_from_body_ti              = c_[z_axis_ti,y_axis_ti,x_axis_ti,[0,0,0,1]]
        psi_recomputed_ti,theta_ti,phi_ti = transformations.euler_from_matrix(R_world_from_body_ti,"ryxz")

        assert allclose(R_world_from_body_ti, transformations.euler_matrix(psi_recomputed_ti,theta_ti,phi_ti,"ryxz"))

        theta[ti]          = theta_ti
        psi_recomputed[ti] = psi_recomputed_ti
        phi[ti]            = phi_ti

    theta          = trigutils.compute_continuous_angle_array(theta)
    psi_recomputed = trigutils.compute_continuous_angle_array(psi_recomputed)
    phi            = trigutils.compute_continuous_angle_array(phi)

    assert allclose(psi_recomputed, psi)

    psi = psi_recomputed

    theta_dot     = gradient(theta,dt)
    psi_dot       = gradient(psi,dt)
    phi_dot       = gradient(phi,dt)

    theta_dot_dot = gradient(theta_dot,dt)
    psi_dot_dot   = gradient(psi_dot,dt)
    phi_dot_dot   = gradient(phi_dot,dt)

    return p, p_dot, p_dot_dot, theta, theta_dot, theta_dot_dot, psi, psi_dot, psi_dot_dot, phi, phi_dot, phi_dot_dot
示例#57
0
def pixelTo3d(pixels, cam):
  height = 1.105 # constants based on camera setup 
  R_camera_pitch = euler_matrix(cam['rot_x'], cam['rot_y'], cam['rot_z'], 'sxyz')[0:3,0:3]
  KK = cam['KK']
  N = pixels.shape[0]
  assert(pixels.shape[1] == 2)
  # compute X/Z and Y/Z using inv(KK*R)*pixels
  Pos_S = np.linalg.solve(np.dot(KK,R_camera_pitch), np.concatenate((pixels,np.ones([N,1])), axis=1).transpose()).transpose()
  Y = np.ones((N))*height
  Z = Y/Pos_S[:,1]
  X = Pos_S[:,0]*Z
  return np.vstack((X,Y,Z)).transpose()
示例#58
0
    def get_projection_matrix(self, t, r):

        # proj = tf.rotation(r[0], r[1], r[2])
        # t.shape = (3, 1)
        # proj = np.append(proj, t, 1)
        # proj = np.vstack([proj, np.array([0, 0, 0, 1])])
        proj = tf.euler_matrix(r[0], r[1], r[2])
        proj[0,3] = t[0]
        proj[1,3] = t[1]
        proj[2,3] = t[2]

        return proj
def apply_pose_error(tf1s, errors, euler_representation=True):
    tf0s = []
    for (tf1, error) in zip(tf1s, errors):
        tf0 = np.eye(4)
        tf0[:3,3] = tf1[:3,3] + error[:3]
        #tf0[:3,:3] = rave.rotationMatrixFromAxisAngle(error[3:]).dot(tf1[:3,:3])
        if euler_representation:
            tf0[:3,:3] = euler_matrix(*error[3:])[:3,:3].dot(tf1[:3,:3])
        else:
            tf0[:3,:3] = quaternion_matrix(*error[3:])[:3,:3].dot(tf1[:3,:3])
        tf0s.append(tf0)
    return tf0s
示例#60
0
def show_wrist_fk(theta_4, theta_5, theta_6, or_obj):
        import show_axes
        from numpy import identity, matrix, array, pi
        from transformations import euler_matrix

        origin_pose = identity(4)

        # show ground plane
        #show_axes.show_ground_plane(or_obj)

        # show origin
        show_axes.show_axes('origin', origin_pose, or_obj, 'small')
        
        # show the solution
        # theta 4
        raw_input("<Enter> to show theta_4")
        joint_xform = euler_matrix(0.0, 0.0, theta_4)
        theta_4_pose = matrix(origin_pose) * matrix(joint_xform)
        show_axes.show_axes('theta_4', array(theta_4_pose), or_obj, 'colored')
        
        # theta 5
        raw_input("<Enter> to show theta_5")
        show_axes.hide_axes('theta_4', or_obj)  # hide previous axes (comment this out to keep it)
        offset_xform = euler_matrix(pi/2, 0.0, 0.0)
        joint_xform = euler_matrix(0.0, 0.0, theta_5)
        theta_5_pose = theta_4_pose * matrix(offset_xform) * matrix(joint_xform)
        show_axes.show_axes('theta_5', array(theta_5_pose), or_obj, 'colored')

        # theta 6
        raw_input("<Enter> to show theta_6")
        show_axes.hide_axes('theta_5', or_obj)  # hide previous axes (comment this out to keep it)
        offset_xform = euler_matrix(-pi/2, 0.0, 0.0)
        joint_xform = euler_matrix(0.0, 0.0, theta_6)
        theta_6_pose = theta_5_pose * matrix(offset_xform) * matrix(joint_xform)
        show_axes.show_axes('theta_6', array(theta_6_pose), or_obj, 'colored')

        raw_input("<Enter> to hide solution")
        show_axes.hide_axes('theta_6', or_obj)
        
        return