コード例 #1
0
def test_as_rotation_matrix(Rs):
    def quat_mat(quat):
        return np.array([(quat * v * quat.inverse()).vec for v in [quaternion.x, quaternion.y, quaternion.z]]).T

    def quat_mat_vec(quats):
        mat_vec = np.array([quaternion.as_float_array(quats * v * np.invert(quats))[..., 1:]
                            for v in [quaternion.x, quaternion.y, quaternion.z]])
        return np.transpose(mat_vec, tuple(range(mat_vec.ndim))[1:-1]+(-1, 0))

    with pytest.raises(ZeroDivisionError):
        quaternion.as_rotation_matrix(quaternion.zero)

    for R in Rs:
        # Test correctly normalized rotors:
        assert allclose(quat_mat(R), quaternion.as_rotation_matrix(R), atol=2*eps)
        # Test incorrectly normalized rotors:
        assert allclose(quat_mat(R), quaternion.as_rotation_matrix(1.1*R), atol=2*eps)

    Rs0 = Rs.copy()
    Rs0[Rs.shape[0]//2] = quaternion.zero
    with pytest.raises(ZeroDivisionError):
        quaternion.as_rotation_matrix(Rs0)

    # Test correctly normalized rotors:
    assert allclose(quat_mat_vec(Rs), quaternion.as_rotation_matrix(Rs), atol=2*eps)
    # Test incorrectly normalized rotors:
    assert allclose(quat_mat_vec(Rs), quaternion.as_rotation_matrix(1.1*Rs), atol=2*eps)
コード例 #2
0
def test_from_rotation_matrix(Rs):
    rot_mat_eps = 10*eps
    try:
        from scipy import linalg
    except ImportError:
        rot_mat_eps = 400*eps

    for i, R1 in enumerate(Rs):
        R2 = quaternion.from_rotation_matrix(quaternion.as_rotation_matrix(R1))
        d = quaternion.rotation_intrinsic_distance(R1, R2)
        assert d < rot_mat_eps, (i, R1, R2, d)  # Can't use allclose here; we don't care about rotor sign

    Rs2 = quaternion.from_rotation_matrix(quaternion.as_rotation_matrix(Rs))
    for R1, R2 in zip(Rs, Rs2):
        d = quaternion.rotation_intrinsic_distance(R1, R2)
        assert d < rot_mat_eps, (R1, R2, d)  # Can't use allclose here; we don't care about rotor sign
コード例 #3
0
ファイル: Quad.py プロジェクト: DocSohl/NeuroEvolve
 def _apply_acceleration(self, orientation, props):
     thrust = 0
     self._acceleration = np.array([0, 0, 0], dtype='float32').T
     accel_rotation = np.array([0,0,0], dtype='float32').T
     for i, p in enumerate(props):
         response = Quad._accels[i]
         accel_rotation += p * response
         thrust += p
     rotate = quaternion.as_rotation_matrix(orientation)
     thrust_vector = rotate.dot(np.array([0, 1, 0], dtype='float32').T)
     self._acceleration += thrust * thrust_vector
     gravity_vector = np.array([0, -10, 0], dtype='float32').T
     self._acceleration += gravity_vector
コード例 #4
0
ファイル: render_plane.py プロジェクト: jinlinyi/SparsePlanes
def get_plane_params_in_local(planes, camera_info):
    """
    input: 
    @planes: plane params
    @camera_info: plane params from camera info, type = dict, must contain 'position' and 'rotation' as keys
    output:
    plane parameters in global frame.
    """
    tran = camera_info['position']
    rot = camera_info['rotation']
    b = planes
    a = np.ones((len(planes),3))*tran
    planes_world = a + b - ((a*b).sum(axis=1) / np.linalg.norm(b, axis=1)**2).reshape(-1,1)*b
    end = (quaternion.as_rotation_matrix(rot.inverse())@(planes_world - tran).T).T #world2cam
    planes_local = end*np.array([1, -1, -1])# habitat2suncg
    return planes_local
コード例 #5
0
    def calc_rotation_matrix(self):  # DONE
        """Calculates the euler rotation matrix.
    R(phi,theta) = Ry(theta)Rx(phi)
    
    See https://en.wikipedia.org/wiki/Davenport_chained_rotations. 
    """
        '''
    sp,cp = self.sin_phi,self.cos_phi
    st,ct = self.sin_theta,self.cos_theta
    return np.array([[ct,sp*st,-st*cp],
                     [0,cp,sp],
                     [st,-sp*ct,cp*ct]])'''

        ## BEGIN CHANGE
        q = np.quaternion(self.q0, self.q1, self.q2, self.q3)
        return quaternion.as_rotation_matrix(q)
コード例 #6
0
    def publish_bearings(self):
        #print("calculating bearings")
        self._msg.header.stamp = rospy.Time.now()
        self._msg.drones = self._drones
        self._msg.links = []
        for i in range(len(self._drones)):  # loop through list of drones
            self._msg.links.append(FormationLink())
            self._msg.links[i].drone_name = self._drones[i]
            for j in range(len(
                    self._from)):  # loop through list of drones observing
                if self._from[j] == self._drones[i]:
                    self._msg.links[i].targets.append(self._to[j])
                    target_index = self._drones.index(self._to[j])
                    from_index = self._drones.index(self._from[j])
                    bearing = Vector3()

                    dx = self._poses[target_index].position.x - self._poses[
                        from_index].position.x
                    dy = self._poses[target_index].position.y - self._poses[
                        from_index].position.y
                    dz = self._poses[target_index].position.z - self._poses[
                        from_index].position.z
                    vect = np.array([[dx], [dy], [dz]])
                    distance = Float64()
                    distance.data = np.linalg.norm(vect)
                    self._msg.links[i].distances.append(distance)
                    vect = vect / np.linalg.norm(vect)

                    # find world frame bearing vector
                    w = self._poses[from_index].orientation.w
                    x = self._poses[from_index].orientation.x
                    y = self._poses[from_index].orientation.y
                    z = self._poses[from_index].orientation.z

                    # find local frame bearing vector
                    q = np.array([quaternion.quaternion(w, x, y, z)])
                    R = quaternion.as_rotation_matrix(q)
                    RT = np.transpose(R).reshape(1, 3, 3)
                    vect_rotatated = np.matmul(RT, vect)

                    bearing.x = vect_rotatated[0][0]
                    bearing.y = vect_rotatated[0][1]
                    bearing.z = vect_rotatated[0][2]
                    self._msg.links[i].bearings.append(bearing)

        self._pub.publish(self._msg)
コード例 #7
0
    def check_box_collision(self, joints, box):
        '''
        Arguments: joints represents the current location of the robot
                   box contains the position of the center of the box [x, y, z, r, p, y] and the length, width, and height [l, w, h]
        Returns: A boolean where True means the box is in collision with the arm and false means that there are no collisions.
        '''
        box_pos, box_rpy, box_hsizes = box[:3], box[3:6], box[6:]/2
        box_q = quaternion.from_euler_angles(box_rpy)
        box_axes = quaternion.as_rotation_matrix(box_q)

        self._box_vertices_offset[:,:] = self._vertex_offset_signs * box_hsizes
        box_vertices = (box_axes.dot(self._box_vertices_offset.T) + np.expand_dims(box_pos, 1)).T

        box_hdiag = np.linalg.norm(box_hsizes)
        min_col_dists = box_hdiag + self._collision_box_hdiags

        franka_box_poses = self.get_collision_boxes_poses(joints)
        for i, franka_box_pose in enumerate(franka_box_poses):
            fbox_pos = franka_box_pose[:3, 3]
            fbox_axes = franka_box_pose[:3, :3]

            # coarse collision check
            if np.linalg.norm(fbox_pos - box_pos) > min_col_dists[i]:
                continue

            fbox_vertex_offsets = self._collision_box_vertices_offset[i]
            fbox_vertices = fbox_vertex_offsets.dot(fbox_axes.T) + fbox_pos

            # construct axes
            cross_product_pairs = np.array(list(product(box_axes.T, fbox_axes.T)))
            cross_axes = np.cross(cross_product_pairs[:,0], cross_product_pairs[:,1]).T
            self._collision_proj_axes[:, :3] = box_axes
            self._collision_proj_axes[:, 3:6] = fbox_axes
            self._collision_proj_axes[:, 6:] = cross_axes

            # projection
            box_projs = box_vertices.dot(self._collision_proj_axes)
            fbox_projs = fbox_vertices.dot(self._collision_proj_axes)
            min_box_projs, max_box_projs = box_projs.min(axis=0), box_projs.max(axis=0)
            min_fbox_projs, max_fbox_projs = fbox_projs.min(axis=0), fbox_projs.max(axis=0)

            # check if no separating planes exist
            if np.all([min_box_projs <= max_fbox_projs, max_box_projs >= min_fbox_projs]):
                return True
        
        return False
コード例 #8
0
ファイル: linemod_sixd2017.py プロジェクト: CNchence/cp-net
 def generate_pose(self, min_z=0.3, max_z=1.5, edge_offset=3):
     z = min_z + np.random.rand() * (max_z - min_z)
     ## determine x and y by z value
     min_x = (edge_offset - self.K[0, 2]) * z / self.K[0, 0]
     max_x = (
         (self.img_width - edge_offset) - self.K[0, 2]) * z / self.K[0, 0]
     min_y = (edge_offset - self.K[1, 2]) * z / self.K[1, 1]
     max_y = (
         (self.img_height - edge_offset) - self.K[1, 2]) * z / self.K[1, 1]
     x = min_x + np.random.rand() * (max_x - min_x)
     y = min_y + np.random.rand() * (max_y - min_y)
     pos = np.array([x, y, z])
     quat = quaternion.from_euler_angles(np.random.rand() * 360,
                                         np.random.rand() * 360,
                                         np.random.rand() * 360)
     rot = quaternion.as_rotation_matrix(quat)
     return pos, rot
コード例 #9
0
ファイル: PoseTransform.py プロジェクト: zsqiang001/kapture
    def transform_points(self, points3d: np.ndarray) -> np.ndarray:
        """
        Applies the self transformation to the given 3d points.

        :param points3d: input 3d points, stored row wise (one point per row)
        :return: another array with the 3d points transformed using the PoseTransform.
        """
        assert self._r is not None
        assert self._t is not None
        assert isinstance(points3d, np.ndarray)

        if points3d.shape[1] == 6:  # expunge RGB
            points3d = points3d[:, 0:3]
        points3d = points3d.transpose()
        rotation_matrix = quaternion.as_rotation_matrix(self.r)
        points3d = np.add(np.matmul(rotation_matrix, points3d), self.t)
        return points3d.transpose()
コード例 #10
0
    def generate_image(self, pose, grid=5):
        if self._pts4d is None:
            x, y, z = pose.loc
            if 0:
                xx, yy = np.meshgrid(
                    np.linspace(x - z * 3, x + z * 3,
                                math.ceil(z * 3 / grid) * 2 + 1),
                    np.linspace(y - z * 3, y + z * 3,
                                math.ceil(z * 3 / grid) * 2 + 1))
            else:
                xx = np.random.uniform(x - z * 3, x + z * 3,
                                       2 * (math.ceil(z * 3 / grid) * 2 + 1, ))
                yy = np.random.uniform(x - z * 3, x + z * 3,
                                       2 * (math.ceil(z * 3 / grid) * 2 + 1, ))

            if 0:
                zz = np.zeros_like(xx)
            else:
                zz = np.random.uniform(0, -40, xx.shape)

            pts3d = np.stack((xx.flatten(), yy.flatten(), zz.flatten()),
                             axis=1).reshape((-1, 3))
            self._pts4d = np.hstack((pts3d, np.ones((len(pts3d), 1))))

        T = np.hstack(
            (quaternion.as_rotation_matrix(pose.quat), pose.loc.reshape(
                (-1, 1))))
        proj_pts2d = self.real_cam.cam_mx.dot(T).dot(self._pts4d.T).T
        uvp = proj_pts2d[:, :2] / proj_pts2d[:, 2:]
        I = np.logical_and.reduce((
            uvp[:, 0] >= 0,
            uvp[:, 1] >= 0,
            uvp[:, 0] < self.real_cam.width,
            uvp[:, 1] < self.real_cam.height,
        ))
        uvp = self.real_cam.distort(uvp[I, :])
        uvp = (uvp + 0.5).astype(int)

        img = np.ones(
            (self.real_cam.height, self.real_cam.width), dtype=np.uint8) * 64
        for i in range(5):
            for j in range(5):
                img[np.clip(uvp[:, 1] + i - 2, 0, self.real_cam.height - 1),
                    np.clip(uvp[:, 0] + j - 2, 0, self.real_cam.width -
                            1)] = 224
        return img
コード例 #11
0
def getGradientRespectToTranslation(agent, targetPositionW, NBV):

    R_w_s = quaternion.as_rotation_matrix(
        agent.get_state().sensor_states["left_sensor"].rotation)

    t_w_s = agent.state.sensor_states["left_sensor"].position

    R = R_w_s.dot(R_s_c)

    sum_delta = np.zeros(3)

    delta = 2 * (R.dot(NBV) - targetPositionW + t_w_s)

    while (np.linalg.norm(sum_delta) < 0.25):
        sum_delta += delta

    return sum_delta
コード例 #12
0
def compute_rotation(ground_point):
    x_plus = np.array([1, 0, 0])
    z_plus = np.array([0, 0, 1])
    look_vector = -ground_point / np.linalg.norm(ground_point)

    # First compute the rotation that takes +Z to the look vector
    look_rotation = rotation_between(z_plus, look_vector)

    # Next compute the rotation that aligns North up, the rotation that takes
    # the rotated +X to the component of +Z that is orthogonal the look vector.
    rotated_x = quaternion.as_rotation_matrix(look_rotation) @ x_plus
    north_up = z_plus - np.dot(z_plus, look_vector) * look_vector
    # We need to make sure that if the rotated x and north up vector are
    # opposites, then we use a 180 rotation about the look vector so as
    # to not screw it up
    north_rotation = rotation_between(rotated_x, north_up, look_vector)
    return north_rotation * look_rotation
コード例 #13
0
    def updateEyeX(self):
        global gEye, gAt, gUp
        wp = gEye - gAt
        w = wp / np.sqrt(np.dot(wp, wp))
        upt = np.cross(gUp, w)
        u = upt / np.sqrt(np.dot(upt, upt))
        v = np.cross(w, u)

        print("vvvvv", v)
        print("xsubrad", self.xsubrad)

        quat = quaternion.quaternion(np.cos(self.xsubrad),
                                     np.sin(self.xsubrad) * gUp[0],
                                     np.sin(self.xsubrad) * gUp[1],
                                     np.sin(self.xsubrad) * gUp[2])
        qrm = quaternion.as_rotation_matrix(quat)

        gEye = qrm @ wp + gAt
コード例 #14
0
 def drawCoord():
     rot = quaternion.as_rotation_matrix(quaternion.one)
     arrow_prop_dict = dict(mutation_scale=20,
                            arrowstyle='->',
                            shrinkA=0,
                            shrinkB=0)
     a = Arrow3D(rot[:, 0], **arrow_prop_dict, color='r')
     ax.add_artist(a)
     b = Arrow3D(rot[:, 1], **arrow_prop_dict, color='g')
     ax.add_artist(b)
     c = Arrow3D(rot[:, 2], **arrow_prop_dict, color='b')
     ax.add_artist(c)
     cap1 = ax.text(0.0, 0.0, -0.15, r'$o$')
     cap2 = ax.text(rot[:, 0][0], rot[:, 0][1], rot[:, 0][2] + 0.1, r'$x$')
     cap3 = ax.text(rot[:, 1][0], rot[:, 1][1], rot[:, 1][2] + 0.1, r'$y$')
     cap4 = ax.text(rot[:, 2][0], rot[:, 2][1], rot[:, 2][2] + 0.1, r'$z$')
     title0 = ax.set_title('Current Step: 0.0')
     return a, b, c, cap1, cap2, cap3, cap4, title0
コード例 #15
0
def get_plane_params_in_global(planes, camera_info):
    """
    input:
    @planes: plane params
    @camera_info: plane params from camera info, type = dict, must contain 'position' and 'rotation' as keys
    output:
    plane parameters in global frame.
    """
    tran = camera_info["position"]
    rot = camera_info["rotation"]
    start = np.ones((len(planes), 3)) * tran
    end = planes * np.array([1, -1, -1])  # suncg2habitat
    end = (quaternion.as_rotation_matrix(rot) @ (end).T).T + tran  # cam2world
    a = end
    b = end - start
    planes_world = (
        (a * b).sum(axis=1) / np.linalg.norm(b, axis=1)**2).reshape(-1, 1) * b
    return planes_world
コード例 #16
0
def computeObsCovariance(agent, depth_pair, targetPositionW):

    pixels = targetPixelInCurrentCamera(agent, targetPositionW)

    pixels_withoutOffset = np.array(
        [pixels[0] - cx, pixels[1] - cx, pixels[2] - cy])

    J = makeJacobian(pixels_withoutOffset)
    Q = makeQ(depth_pair[int(pixels[2]), int(pixels[0])])

    R_w_s = quaternion.as_rotation_matrix(
        agent.get_state().sensor_states["left_sensor"].rotation)

    R = R_w_s.dot(R_s_c)

    U_obs = np.linalg.multi_dot([R, J, Q, J.T, R.T])

    return U_obs
コード例 #17
0
def setAgentPosition(agent, nextBestCameraPostionIncy):

    R_w_s = quaternion.as_rotation_matrix(
        agent.get_state().sensor_states["left_sensor"].rotation)
    R_w_c = R_w_s.dot(R_s_c)  # R_w_c

    nextBestCameraPosition = nextBestCameraPostionIncy + R_w_c.dot(
        tcyclopean_leftcamera)

    agent_state = agent.get_state()
    y = agent_state.position[1]
    # y = nextBestCameraPostionIncy[1]-1.5
    agent_state.position = np.array([
        nextBestCameraPosition[0] + cam_baseline / 2, y,
        nextBestCameraPosition[2]
    ])
    agent.set_state(agent_state)
    return agent.state  # agent.scene_node.transformation_matrix()
コード例 #18
0
def IMU_double_integration(t,
                           rotation,
                           acceleration,
                           no_transform=False,
                           only_xy=False):
    """
    Compute position and orientation by integrating angular velocity and double integrating acceleration
    Expect the drift to be as large as hell
    :param t: time sequence, Nx1 array
    :param rotation: device orientation as quaternion, Nx4 array
    :param acceleration: acceleration data, Nx3 array
    :param no_transform: if set to true, assume acceleration vectors to be inside the global frame
    :return: position: Nx3 array
    """
    # Sanity check
    assert t.shape[0] == rotation.shape[0], "{}, {}".format(
        t.shape[0], rotation.shape[0])
    assert t.shape[0] == acceleration.shape[0]
    if not no_transform:
        assert rotation.shape[1] == 4

    # quats = quaternion.as_quat_array(rotation)
    # convert the acceleration vector to world coordinate frame

    if no_transform:
        result = acceleration
    else:
        result = np.empty([acceleration.shape[0], 3], dtype=float)
        for i in range(acceleration.shape[0]):
            q = quaternion.quaternion(*rotation[i])
            result[i, :] = np.dot(quaternion.as_rotation_matrix(q),
                                  acceleration[i, :].reshape([3,
                                                              1])).flatten()
    # double integration with trapz rule
    position = integrate.cumtrapz(integrate.cumtrapz(result,
                                                     t,
                                                     axis=0,
                                                     initial=0),
                                  t,
                                  axis=0,
                                  initial=0)
    if only_xy:
        position[:, 2] = 0
    return position
コード例 #19
0
def project(pts_3d, camera_params, mobile_captures, pts_2d_3d_indices, pts_2d_cam_indices, viz=0):

    PTS_2D = np.empty((0, 2))
    n_cams = len(mobile_captures.keys())

    for c in range(n_cams):

        fx, fy = camera_params[c, :2]
        px, py = camera_params[c, 2:4]
        R = quat.as_rotation_matrix(quat.quaternion(*camera_params[c, 4:8]))
        C = camera_params[c, 8:]

        # select the indices of only the 3d points triangulated from the current camera
        pts_3d_idx = pts_2d_3d_indices[pts_2d_cam_indices == c]

        _pts_3d = pts_3d[:, pts_3d_idx]

        K = np.asarray([[fx, 0, px],
                        [0, fy, py],
                        [0, 0, 1]])

        P = np.dot(K, np.c_[R.T, - np.dot(R.T, C)])

        pts_2d = np.dot(P, np.vstack((_pts_3d, np.ones((1, _pts_3d.shape[1])))))

        pts_2d = pts_2d[:2, :] / pts_2d[2, :]

        PTS_2D = np.vstack((PTS_2D, pts_2d.T))

        cam_key = c + 1  # todo: substitute this by taking the key directly from the key list
        if viz == cam_key or viz == -1:
            plt.figure()
            _img = copy.deepcopy(mobile_captures[cam_key]['im'])
            _img[:, :, 0] = mobile_captures[cam_key]['im'][:, :, 2]
            _img[:, :, 2] = mobile_captures[cam_key]['im'][:, :, 0]
            plt.imshow(_img)
            plt.plot(mobile_captures[cam_key]['person_pose'][::3], mobile_captures[cam_key]['person_pose'][1::3],
                     'o', color='green', markersize=4)
            plt.plot(pts_2d[0, :], pts_2d[1, :],
                     'o', color='red', markersize=4)
            plt.draw()
            plt.pause(.001)

    return PTS_2D.T
コード例 #20
0
def get_camera_pose(tf_buffer,
                    map_frame_id,
                    camera_frame_id,
                    target_time=rospy.Time()):
    trans = get_tf_pose(tf_buffer, map_frame_id, camera_frame_id, target_time)

    if trans is None:
        return None, None

    translation = np.asarray([
        trans.transform.translation.x, trans.transform.translation.y,
        trans.transform.translation.z
    ]).reshape(-1, 1)
    rotation = quaternion.as_rotation_matrix(
        quaternion.quaternion(trans.transform.rotation.w,
                              trans.transform.rotation.x,
                              trans.transform.rotation.y,
                              trans.transform.rotation.z))
    return translation, rotation
コード例 #21
0
  def __init__(self,x,y,z,vx,vy,vz,phi,theta,gamma,
               phidot,thetadot,gammadot,debug=False):
    """
    Constructor

    x,y,z: euclidean positions
    vx,vy,vz: velocities
    phi,theta,gamma: euler angles
    phidot,thetadot,gammadot: euler angle time derivatives
    debug: default False; for printing

    Note: all kinematic variables are collectively
    referred to as 'coordinates' since they are the
    variables that change in the equations of motion (EOM).

    Calls update_data_fields to calculate quantities 
    that depend only on positions.
    """
    self.x=x 
    self.y=y
    self.z=z
    self.vx=vx
    self.vy=vy
    self.vz=vz
    self.phi=phi
    self.theta=theta
    self.gamma=gamma
    q = toQuaternion.euler_to_quaternion(phi,theta,gamma) # CHANGE
    self.q0, self.q1, self.q2, self.q3 = q.w, q.x, q.y, q.z

    # Convert from angle_dots to wxb,wyb,wzb
    self.rotation_matrix = quaternion.as_rotation_matrix(q)
    self.calc_trig_functions()
    st,ct = self.sin_theta,self.cos_theta
    angular_velocity_B = np.array([phidot*ct,
                     thetadot,
                     phidot*st + gammadot])
    self.wxl,self.wyl,self.wzl = np.dot(angular_velocity_B,self.rotation_matrix)
    
    # CHANGE
    self.debug=debug
    self.has_model=False
    self.update_data_fields()
コード例 #22
0
    def as_matrix(self) -> np.ndarray:
        """Return the transform as a 4x4 transform matrix.

        Returns:
            A 4x4 numpy array represents the transform matrix.

        Examples:
            >>> transform = Transform3D([1, 2, 3], [0, 1, 0, 0])
            >>> transform.as_matrix()
            array([[ 1.,  0.,  0.,  1.],
                   [ 0., -1.,  0.,  2.],
                   [ 0.,  0., -1.,  3.],
                   [ 0.,  0.,  0.,  1.]])

        """
        matrix: np.ndarray = np.eye(4)
        matrix[:3, 3] = self._translation
        matrix[:3, :3] = as_rotation_matrix(self._rotation)
        return matrix
コード例 #23
0
ファイル: Annotation2Mesh.py プロジェクト: johnjsb/Scan2CAD
def calc_Mbbox(model):
    trs_obj = model["trs"]
    bbox_obj = np.asarray(model["bbox"], dtype=np.float64)
    center_obj = np.asarray(model["center"], dtype=np.float64)
    trans_obj = np.asarray(trs_obj["translation"], dtype=np.float64)
    rot_obj = np.asarray(trs_obj["rotation"], dtype=np.float64)
    q_obj = np.quaternion(rot_obj[0], rot_obj[1], rot_obj[2], rot_obj[3])
    scale_obj = np.asarray(trs_obj["scale"], dtype=np.float64)

    tcenter1 = np.eye(4)
    tcenter1[0:3, 3] = center_obj
    trans1 = np.eye(4)
    trans1[0:3, 3] = trans_obj
    rot1 = np.eye(4)
    rot1[0:3, 0:3] = quaternion.as_rotation_matrix(q_obj)
    scale1 = np.eye(4)
    scale1[0:3, 0:3] = np.diag(scale_obj) * np.diag(bbox_obj)
    M = trans1.dot(rot1).dot(scale1).dot(tcenter1)
    return M
コード例 #24
0
ファイル: PoseTransform.py プロジェクト: zsqiang001/kapture
    def compose(pose_list: List['PoseTransform']) -> 'PoseTransform':
        """
        Merges multiple pose transformation into a single one.

        :param pose_list: the list of pose transformation to be merged. They are merged from right to left.
        :return: the pose transformation being the composition of the given ones.
        """
        assert isinstance(pose_list, list)
        pose_composed = pose_list[0]
        for pose_current in pose_list[1:]:
            # shrink the poses from the left
            assert pose_current._r is not None
            assert pose_current._t is not None
            new_r = pose_composed.r * pose_current.r
            new_t = np.add(
                np.matmul(quaternion.as_rotation_matrix(pose_composed.r),
                          pose_current.t), pose_composed.t)
            pose_composed = PoseTransform(new_r, new_t)
        return pose_composed
コード例 #25
0
ファイル: DataLoader.py プロジェクト: KAIST-NMAIL/DFMNET
    def forwardKinematics(self, y_data, tag):
        hip_data = self.dataset[tag]['test_hip']
        hip_quart = quaternion.as_quat_array(hip_data[:, :4].copy())
        hip_position = hip_data[:, 4:].copy()

        y_data = deepcopy(y_data)
        outs = np.zeros_like(y_data)

        marker_idxs = np.array(range(y_data.shape[1])).reshape(-1, 3)

        for idx in range(y_data.shape[0]):
            rtMat = quaternion.as_rotation_matrix(hip_quart[idx])
            pos = hip_position[idx]
            for marker_idx in marker_idxs:
                outs[idx,
                     marker_idx] = np.matmul(rtMat.T,
                                             (y_data[idx, marker_idx])) + pos

        return outs
コード例 #26
0
    def _get_transform_mtrx(self, idx):
        c_idx = idx
        n_idx = idx+1

        c_pose = self.poses[c_idx]
        n_pose = self.poses[n_idx]

        local_dtrans = np.linalg.inv(c_pose) @ n_pose[:, 3]

        quat_c = quat.from_rotation_matrix(c_pose[:3,:3])
        quat_n = quat.from_rotation_matrix(n_pose[:3,:3])
        quat_t = quat_c.inverse() * quat_n

        transform = np.eye(4)

        transform[:3, :3] = quat.as_rotation_matrix(quat_t)
        transform[:3, 3] = local_dtrans[:3]

        return transform
コード例 #27
0
    def set_K_frame_to_link(self, frame_name, timeout=5.0):
        """
        Set new K frame to the same frame as the link frame given by 'frame_name'
        Motion controllers are stopped for switching

        @type frame_name: str 
        @param frame_name: desired tf frame name in the tf tree
        @rtype: [bool, str]
        @return: [success status of service request, error msg if any]
        """

        trans = False
        listener = tf.TransformListener()
        err = "FrankaFramesInterface: Error while looking up transform from EE frame to link frame %s" % frame_name

        def body():
            try:
                listener.lookupTransform('/panda_EE', frame_name,
                                         rospy.Time(0))
            except (tf.LookupException, tf.ConnectivityException,
                    tf.ExtrapolationException) as e:
                err = e
                return False
            return True

        franka_dataflow.wait_for(lambda: body(),
                                 timeout=timeout,
                                 raise_on_error=True,
                                 timeout_msg=err)

        t, rot = listener.lookupTransform('/panda_EE', frame_name,
                                          rospy.Time(0))

        rot = np.quaternion(rot[3], rot[0], rot[1], rot[2])

        rot = quaternion.as_rotation_matrix(rot)

        trans_mat = np.eye(4)

        trans_mat[:3, :3] = rot
        trans_mat[:3, 3] = np.array(t)

        return self.set_K_frame(trans_mat)
コード例 #28
0
def save_parts(cube_params, output_file, level='1'):
  n_part = np.shape(cube_params)[0]
  mtl_filename = output_file.replace('.obj', '.mtl')
  with open(mtl_filename, 'w') as f:
    palette = color_palette['L{}'.format(level)]
    n_color = len(palette)
    for i in range(n_part):
      color_index = i
      if level == '1':
        step = n_color/float(n_part)
        color_index = int(i*step)
      part_color = palette[color_index]
      f.write('newmtl m{}\nKd {} {} {}\nKa 0 0 0\n'.format(i, float(part_color[0])/256, float(part_color[1])/256, float(part_color[2])/256))

  with open(output_file, 'w') as f:
    vert_offset = 0
    n_vert = np.shape(cube_vert)[0]
    f.write('mtllib {}\n'.format(mtl_filename.split('/')[-1]))
    for i in range(n_part):
      verts = cube_vert
      scale = cube_params[i][0:3]
      rot = cube_params[i][3:7]
      translation = cube_params[i][7:10]
      verts = verts * scale
      quat = np.quaternion(rot[0], rot[1], rot[2], rot[3])
      rotation = quaternion.as_rotation_matrix(quat)
      verts = np.transpose(np.matmul(rotation, np.transpose(verts)))
      verts = verts + translation

      faces = cube_face + vert_offset
      f.write('# {} {} {} {} {} {} {} {} {} {}\n'.format(
          scale[0], scale[1], scale[2],
          rot[0], rot[1], rot[2], rot[3],
          translation[0], translation[1], translation[2])
      )
      f.write('usemtl m{}\n'.format(i))
      for j in range(n_vert):
        sigma = 0.1
        f.write('v {} {} {}\n'.format(verts[j][0], verts[j][1], verts[j][2]))
      for j in range(np.shape(cube_face)[0]):
        f.write('f {} {} {} {}\n'.format(faces[j][0], faces[j][1], faces[j][2], faces[j][3]))
      vert_offset += n_vert
コード例 #29
0
def nextBestCameraPosition(agent, delta):

    #need Rwc twc=tws
    R_c_s = R_s_c.T

    R_w_s = quaternion.as_rotation_matrix(
        agent.get_state().sensor_states["left_sensor"].rotation)
    R_w_c = R_w_s.dot(R_s_c)  # R_w_c
    R_s_w = R_w_s.T
    t_w_s = agent.state.sensor_states["left_sensor"].position

    t_s_w = -R_s_w.dot(t_w_s)

    t_c_w = R_c_s.dot(t_s_w)

    sum_delta = 0.25 * (delta / np.linalg.norm(delta))
    print("sum_delta {0}".format(sum_delta))
    NBCameraPosition = -R_w_c.dot(t_c_w - sum_delta)

    return NBCameraPosition
コード例 #30
0
def computeObsCovariance(agent, depth_pair, targetPositionW):

    pixels = targetPixelInCurrentCamera(agent, targetPositionW)

    isValid = isObserved(pixels)

    if isValid:
        J = makeJacobian(pixels, cam_baseline)
        Q = makeQ(depth_pair[pixels[2], pixels[0]])

        R_w_s = quaternion.as_rotation_matrix(
            agent.get_state().sensor_states["left_sensor"].rotation)

        R = R_w_s.dot(R_s_c)

        U_obs = np.linalg.multi_dot([R, J, Q, J.T, R.T])

        return True, U_obs
    else:
        return False, np.identity(3)
コード例 #31
0
def computeCameraPostion(agent, targetPositionNext_, targetPositionW_):

    R_w_s = quaternion.as_rotation_matrix(
        agent.state.sensor_states["left_sensor"].rotation)

    tws = agent.state.sensor_states["left_sensor"].position

    targetPositionNextS_ = R_s_c.dot(targetPositionNext_)

    delta_tws = 2 * (R_w_s.dot(
        (targetPositionNextS_)) + tws - targetPositionW_)

    delta_tws = delta_tws / np.linalg.norm(delta_tws)

    nextBestCameraPosition = tws - 0.25 * delta_tws

    print("delta_tws: {0}, norm of delta_tws: {1}".format(
        delta_tws, np.linalg.norm(delta_tws)))
    print("nextBestCameraPostion: {0}".format(nextBestCameraPosition))
    return nextBestCameraPosition
コード例 #32
0
        def _imuCallback(data):
            self.prev_time = time.time()
            self.func_data[theme_name].setdefault('value', [])
            self.func_data[theme_name].setdefault('ros_value', [])
            try:
                frame_id = data.header.frame_id
                value = data.linear_acceleration
                self.func_data[theme_name]['value'].append(value)

                if (self.tfBuffer.can_transform('camera_link', frame_id, rclpy.time.Time(), rclpy.time.Duration(nanoseconds=3e6))):
                    msg = self.tfBuffer.lookup_transform('camera_link', frame_id, rclpy.time.Time(), rclpy.time.Duration(nanoseconds=3e6)).transform
                    quat = np.quaternion(msg.rotation.x, msg.rotation.y, msg.rotation.z, msg.rotation.w)
                    point = np.matrix([value.x, value.y, value.z], dtype='float32')
                    point.resize((3, 1))
                    rotated = quaternion.as_rotation_matrix(quat) * point
                    rotated.resize(1,3)
                    self.func_data[theme_name]['ros_value'].append(rotated)
            except Exception as e:
                print(e)
                return
コード例 #33
0
ファイル: simulator.py プロジェクト: cameronus/control-sim
    def state_dot(self, t, state):
        velocity = state[3:6]
        orientation = quaternion.from_float_array(state[6:10])
        omega = state[10:]

        drag = 0.5 * self.FLUID_DENSITY * self.controller.DRAG_COEFF * self.controller.AREA * (velocity - self.WIND) * np.absolute(velocity - self.WIND)

        force = np.array([0, 0, -self.GRAVITY]) - drag + self.control_force
        torque = self.control_torque

        state_dot = np.empty((13)) # [x, y, z, vx, vy, vz, qw, qx, qy, qz, ox, oy, oz]
        state_dot[0:3] = velocity
        state_dot[3:6] = force / self.controller.MASS

        state_dot[6:10] = quaternion.as_float_array(0.5 * np.quaternion(0, *omega) * orientation)
        rotation_matrix = quaternion.as_rotation_matrix(orientation)
        rot_inertia_tensor = np.asarray(rotation_matrix * self.controller.INERTIA_TENSOR * np.transpose(rotation_matrix))
        state_dot[10:] = np.matmul(np.linalg.inv(rot_inertia_tensor), (torque - np.cross(omega, np.matmul(rot_inertia_tensor, omega))))

        return state_dot
コード例 #34
0
ファイル: Simulate.py プロジェクト: DocSohl/NeuroEvolve
    def run(self):
        t = threading.Thread(target=runQuad, args=(self.quad,))
        t.start()
        while 1:
            for event in pygame.event.get():
                if event.type == pygame.QUIT:
                    self.quad.quit()
                    pygame.quit()
                    sys.exit()

            self.clock.tick(50)
            self.screen.fill((0, 32, 0))

            position, orientation = self.quad.params
            rotation = quaternion.as_rotation_matrix(orientation)
            t = self.vertices.dot(rotation) + position

            factors = 256 / (4 + t[:,2]) # fov / viewer_distance + z
            t[:, 0] =  t[:, 0] * factors + self.screen.get_width() / 2
            t[:, 1] = -t[:, 1] * factors + self.screen.get_height() / 2

            avg_z = []
            for i, f in enumerate(self.faces):
                z = (t[f[0],2] + t[f[1],2] + t[f[2],2] + t[f[3],2]) / 4.0
                avg_z.append([i, z])

            for tmp in sorted(avg_z, key=itemgetter(1), reverse=True):
                face_index = tmp[0]
                f = self.faces[face_index]
                pointlist = [(t[f[0],0], t[f[0],1]), (t[f[1],0], t[f[1],1]),
                             (t[f[1],0], t[f[1],1]), (t[f[2],0], t[f[2],1]),
                             (t[f[2],0], t[f[2],1]), (t[f[3],0], t[f[3],1]),
                             (t[f[3],0], t[f[3],1]), (t[f[0],0], t[f[0],1])]
                pygame.draw.polygon(self.screen, self.colors[face_index], pointlist)
            self.angle += 1

            pygame.display.flip()
コード例 #35
0
ファイル: script.py プロジェクト: jflamant/quaternion
#!/usr/bin/env python

from __future__ import print_function, division, absolute_import
import sys
import numpy as np
import quaternion
from numpy import *


eps = np.finfo(float).eps


rot_mat_eps = 200*eps


R1 = np.array([quaternion.quaternion(0, -math.sqrt(0.5), 0, -math.sqrt(0.5))])
print(R1)
print(quaternion.as_rotation_matrix(R1))
R2 = quaternion.from_rotation_matrix(quaternion.as_rotation_matrix(R1))
print(R2)
d = quaternion.rotation_intrinsic_distance(R1[0], R2[0])
print(d)
print()
sys.stdout.flush()
sys.stderr.flush()
assert d < rot_mat_eps, (R1, R2, d)  # Can't use allclose here; we don't care about rotor sign