Esempio n. 1
0
 def get_pose(self, key):
     obj_id = self._key_to_id[key]
     obj_t, obj_q_xyzw = pybullet.getBasePositionAndOrientation(obj_id, physicsClientId=self._physics_client)
     obj_q_wxyz = np.roll(obj_q_xyzw, 1)
     pose = RigidTransform(rotation=obj_q_wxyz,
                           translation=obj_t,
                           from_frame='obj',
                           to_frame='world')
     pose = self._deconvert_pose(pose, self._key_to_com[key])
     return pose
 def get_camera_external_param(self, pos_data):
     rotation_quaternion = np.asarray([pos_data.pose.pose.orientation.w, pos_data.pose.pose.orientation.x,
                                       pos_data.pose.pose.orientation.y, pos_data.pose.pose.orientation.z])
     translation = np.asarray([pos_data.pose.pose.position.x, pos_data.pose.pose.position.y,
                               pos_data.pose.pose.position.z])
     T_qua2rota = RigidTransform(rotation_quaternion, translation)
     self.external_mat = np.zeros((3, 4))
     self.external_mat[:, :3] = T_qua2rota._rotation
     self.rotation_mat = T_qua2rota._rotation
     self.external_mat[:, 3] = T_qua2rota._translation
Esempio n. 3
0
def only_catch_full(catching_speed):
    y.set_v(catching_speed)
    write_log.write_log(1, 2, catching_speed, 1, time_diff)
    y.right.goto_pose(
        RigidTransform(translation=[0.3, 0, 0.2],
                       rotation=robot_action.rpy_to_wxyz(90, 0, 90)))
    robot_action.plate_catch(y)
    time.sleep(5)
    y.reset_home()
    y.right.open_gripper()
Esempio n. 4
0
def only_give_empty(giving_speed):
    y.set_v(giving_speed)
    write_log.write_log(2, 1, giving_speed, 1, time_diff)
    y.right.goto_pose(
        RigidTransform(translation=[0.3, 0, 0.2],
                       rotation=robot_action.rpy_to_wxyz(90, 0, 90)))
    robot_action.plate_drop(y)
    time.sleep(5)
    y.reset_home()
    y.right.open_gripper()
Esempio n. 5
0
def matrixToPoseStamped(mat):

        rot_mat = mat[:3, :3]
        trans = mat[:3,3]

        rigid_transform = RigidTransform(rot_mat, trans, from_frame='from', to_frame='to')

        pose = RigidTransformToPoseStamped(rigid_transform)

        return pose
Esempio n. 6
0
def lookup_transform(to_frame, from_frame='base'):
    """
    Returns the AR tag position in world coordinates

    Parameters
    ----------
    to_frame : string
        examples are: ar_marker_7, gearbox, pawn, ar_marker_3, etc
    from_frame : string
        lets be real, you're probably only going to use 'base'

    Returns
    -------
    :obj:`autolab_core.RigidTransform` AR tag position or object in world coordinates
    """
    tag_rot = None
    tag_pos = None

    print('CALLING lookup_transform')
    print('to_frame: {}, from_frame: {}'.format(to_frame, from_frame))
    if not ros_enabled:
        print 'I am the lookup transform function!  ' \
            + 'You\'re not using ROS, so I\'m returning the Identity Matrix.'
        return RigidTransform(to_frame=from_frame, from_frame=to_frame)
    print('initializing transformlistener')
    listener = tf.TransformListener()
    attempts, max_attempts, rate = 0, 10, rospy.Rate(1.0)
    while attempts < max_attempts:
        print('attempt {}'.format(attempts))
        try:
            t = listener.getLatestCommonTime(from_frame, to_frame)
            tag_pos, tag_rot = listener.lookupTransform(
                from_frame, to_frame, t)
        except Exception as e:
            print(e)
            rate.sleep()
        attempts += 1
    tag_rot = np.array([tag_rot[3], tag_rot[0], tag_rot[1], tag_rot[2]])
    rot = RigidTransform.rotation_from_quaternion(tag_rot)
    return RigidTransform(rot,
                          tag_pos,
                          to_frame=to_frame,
                          from_frame=from_frame)
Esempio n. 7
0
def visualize_test():
    I = RigidTransform()
    g_ab = RigidTransform()
    g_ab.translation = np.array([0.05, 0, 0])
    g_ab.rotation = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])

    q_a = np.array([0., 0., 0.])

    p_b = np.array([0., 0., 0.])
    p_a = apply_transform(p_b, g_ab)

    print('g_ab = \n{}'.format(g_ab.matrix))

    vis3d.pose(I, alpha=0.01, tube_radius=0.001, center_scale=0.001)
    vis3d.points(q_a, color=(1, 0, 0), scale=0.005)

    vis3d.pose(g_ab, alpha=0.01, tube_radius=0.001, center_scale=0.001)
    vis3d.points(p_a, color=(0, 1, 0), scale=0.005)
    vis3d.show()
Esempio n. 8
0
    def _compute_scene_bounds(self):
        """The axis aligned bounds of the scene.

        Returns
        -------
        (2,3) float
            The bounding box with [min, max] coordinates.
        """
        lb = np.array([np.infty, np.infty, np.infty])
        ub = -1.0 * np.array([np.infty, np.infty, np.infty])
        for on in self.scene.objects:
            o = self.scene.objects[on]
            poses = [
                RigidTransform(from_frame=o.T_obj_world.from_frame,
                               to_frame=o.T_obj_world.to_frame)
            ]
            if isinstance(o, InstancedSceneObject):
                # Cheat for instanced objects -- just find the min/max translations and create poses from those
                # Complile translations
                translations = np.array([p.translation for p in o.poses])
                min_trans = np.min(translations, axis=0)
                max_trans = np.max(translations, axis=0)
                poses = [
                    RigidTransform(translation=min_trans,
                                   from_frame=o.poses[0].from_frame,
                                   to_frame=o.poses[0].to_frame),
                    RigidTransform(translation=max_trans,
                                   from_frame=o.poses[0].from_frame,
                                   to_frame=o.poses[0].to_frame)
                ]
            for pose in poses:
                tf_verts = pose.matrix[:3, :3].dot(
                    o.mesh.vertices.T).T + pose.matrix[:3, 3]
                tf_verts = o.T_obj_world.matrix[:3, :3].dot(
                    tf_verts.T).T + o.T_obj_world.matrix[:3, 3]
                lb_mesh = np.min(tf_verts, axis=0)
                ub_mesh = np.max(tf_verts, axis=0)
                lb = np.minimum(lb, lb_mesh)
                ub = np.maximum(ub, ub_mesh)
        if np.any(lb > ub):
            return np.array([[-1.0, -1.0, -1.0], [1.0, 1.0, 1.0]])
        return np.array([lb, ub])
Esempio n. 9
0
    def test_inverse(self):
        R_a_b = RigidTransform.random_rotation()
        t_a_b = RigidTransform.random_translation()
        T_a_b = RigidTransform(R_a_b, t_a_b, 'a', 'b')
        T_b_a = T_a_b.inverse()

        # multiple with numpy arrays
        M_a_b = np.r_[np.c_[R_a_b, t_a_b], [[0, 0, 0, 1]]]
        M_b_a = np.linalg.inv(M_a_b)

        self.assertTrue(np.sum(np.abs(T_b_a.matrix - M_b_a)) < 1e-5,
                        msg='Inverse gave incorrect transformation')

        # check frames
        self.assertEqual(T_b_a.from_frame,
                         'b',
                         msg='Inverse has incorrect input frame')
        self.assertEqual(T_b_a.to_frame,
                         'a',
                         msg='Inverse has incorrect output frame')
Esempio n. 10
0
 def __init__(self,
              name,
              ar_marker,
              R_ar_obj=np.eye(3),
              t_ar_obj=np.zeros(3)):
     self.name = name
     self.ar_marker = ar_marker
     self.T_ar_obj = RigidTransform(rotation=R_ar_obj,
                                    translation=t_ar_obj,
                                    from_frame=name,
                                    to_frame=ar_marker)
Esempio n. 11
0
    def set_graspable_object(self, graspable, T_obj_world=RigidTransform(from_frame='obj',
                                                                         to_frame='world')):
        """ Adds and sets the target object in the environment.

        Parameters
        ----------
        graspable : :obj:`GraspableObject3D`
            the object to grasp
        """
        self.set_object(graspable.key, graspable.model_name, T_obj_world)
        self.set_target_object(graspable.key)
Esempio n. 12
0
    def pose(self, grasp_approach_dir=None):
        """Computes the 3D pose of the grasp relative to the camera.

        If an approach direction is not specified then the camera
        optical axis is used.

        Parameters
        ----------
        grasp_approach_dir : :obj:`numpy.ndarray`
            Approach direction for the grasp in camera basis (e.g. opposite to
            table normal).

        Returns
        -------
        :obj:`autolab_core.RigidTransform`
            The transformation from the grasp to the camera frame of reference.
        """
        # Check intrinsics.
        if self.camera_intr is None:
            raise ValueError(
                "Must specify camera intrinsics to compute 3D grasp pose")

        # Compute 3D grasp center in camera basis.
        grasp_center_im = self.center.data
        center_px_im = Point(grasp_center_im, frame=self.camera_intr.frame)
        grasp_center_camera = self.camera_intr.deproject_pixel(
            self.depth, center_px_im)
        grasp_center_camera = grasp_center_camera.data

        # Compute 3D grasp axis in camera basis.
        grasp_axis_im = self.axis
        grasp_axis_im = grasp_axis_im / np.linalg.norm(grasp_axis_im)
        grasp_axis_camera = np.array([grasp_axis_im[0], grasp_axis_im[1], 0])
        grasp_axis_camera = grasp_axis_camera / np.linalg.norm(
            grasp_axis_camera)

        # Convert to 3D pose.
        grasp_rot_camera, _, _ = np.linalg.svd(grasp_axis_camera.reshape(3, 1))
        grasp_x_camera = grasp_approach_dir
        if grasp_approach_dir is None:
            grasp_x_camera = np.array([0, 0, 1])  # Align with camera Z axis.
        grasp_y_camera = grasp_axis_camera
        grasp_z_camera = np.cross(grasp_x_camera, grasp_y_camera)
        grasp_z_camera = grasp_z_camera / np.linalg.norm(grasp_z_camera)
        grasp_y_camera = np.cross(grasp_z_camera, grasp_x_camera)
        grasp_rot_camera = np.array(
            [grasp_x_camera, grasp_y_camera, grasp_z_camera]).T
        if np.linalg.det(grasp_rot_camera) < 0:  # Fix reflections due to SVD.
            grasp_rot_camera[:, 0] = -grasp_rot_camera[:, 0]
        T_grasp_camera = RigidTransform(rotation=grasp_rot_camera,
                                        translation=grasp_center_camera,
                                        from_frame="grasp",
                                        to_frame=self.camera_intr.frame)
        return T_grasp_camera
Esempio n. 13
0
    def __init__(self,
                 rosnode_name='franka_arm_client',
                 ros_log_level=rospy.INFO,
                 robot_num=1):

        self._execute_skill_action_server_name = \
                '/execute_skill_action_server_node_'+str(robot_num)+'/execute_skill'
        self._robot_state_server_name = \
                '/get_current_robot_state_server_node_'+str(robot_num)+'/get_current_robot_state_server'
        self._robolib_status_server_name = \
                '/get_current_robolib_status_server_node_'+str(robot_num)+'/get_current_robolib_status_server'

        self._connected = False
        self._in_skill = False

        # set signal handler to handle ctrl+c and kill sigs
        signal.signal(signal.SIGINT, self._sigint_handler_gen())

        # init ROS
        rospy.init_node(rosnode_name,
                        disable_signals=True,
                        log_level=ros_log_level)

        rospy.wait_for_service(self._robolib_status_server_name)
        self._get_current_robolib_status = rospy.ServiceProxy(
            self._robolib_status_server_name, GetCurrentRobolibStatusCmd)

        self._state_client = FrankaArmStateClient(
            new_ros_node=False,
            robot_state_server_name=self._robot_state_server_name)
        self._client = actionlib.SimpleActionClient(
            self._execute_skill_action_server_name, ExecuteSkillAction)
        self._client.wait_for_server()
        self.wait_for_robolib()

        # done init ROS
        self._connected = True

        # set default identity tool delta pose
        self._tool_delta_pose = RigidTransform(from_frame='franka_tool',
                                               to_frame='franka_tool_base')
Esempio n. 14
0
    def get_hand_pose(self, start, end):
        def get_hand_rot():
            """
            Computes the pose of the hand to be perpendicular to the direction of the push

            Parameters
            ----------
            start : 3x' :obj:`numpy.ndarray`
            end : 3x' :obj:`numpy.ndarray`

            Returns
            -------
            3x3 :obj:`numpy.ndarray`
                3D Rotation Matrix
            """
            # z = normalize(end - start)
            # y = normalize(np.cross(z, -up))
            # x = normalize(np.cross(z, -y))
            # y = normalize(start - end)
            # z = normalize(np.cross(y, up))
            # x = normalize(np.cross(y, z))
            # x = normalize(start - end)
            x = normalize(end - start)
            y = normalize(np.cross(x, up))
            z = normalize(np.cross(x, y))
            return np.hstack((x.reshape((-1,1)), y.reshape((-1,1)), z.reshape((-1,1))))

        R_push = get_hand_rot()
        start_pose = RigidTransform(
            rotation=R_push,
            translation=start,
            from_frame='grasp',
            to_frame='world'
        )
        end_pose = RigidTransform(
            rotation=R_push,
            translation=end,
            from_frame='grasp',
            to_frame='world'
        )
        return start_pose, end_pose
Esempio n. 15
0
 def sample_scene_objs(self):
     model_pose = RigidTransform(
         rotation=np.eye(3),
         translation=np.array([0.0, 0.0, 0.0]),
         from_frame='obj',
         to_frame='world')  #model is already transformed
     for mesh, model_name in self.sample_models_set():
         model_material = self.sample_model_material()
         model_obj = SceneObject(mesh, model_pose, model_material)
         grip_obj = SceneObject(get_top_surface(mesh), model_pose,
                                model_material)
         yield model_obj, grip_obj, model_name
Esempio n. 16
0
    def T_grasp_obj(self):
        """ Rigid transformation from grasp frame to object frame.
        Rotation matrix is X-axis along approach direction, Y axis pointing between the jaws, and Z-axis orthogonal.
        Translation vector is the grasp center.

        Returns
        -------
        :obj:`RigidTransform`
            transformation from grasp to object coordinates
        """
        T_grasp_obj = RigidTransform(self.rotated_full_axis, self.center, from_frame='grasp', to_frame='obj')
        return T_grasp_obj
Esempio n. 17
0
    def add_graspable_object(self, graspable, T_obj_world=RigidTransform(from_frame='obj',
                                                                         to_frame='world')):
        """ Adds the target object to the environment.

        Parameters
        ----------
        graspable : :obj:`GraspableObject3D`
            the object to add
        T_obj_world : :obj:`autolab_core.RigidTransform`
            the transformation from obj to world frame
        """
        self.set_object(graspable.key, graspable.model_name, T_obj_world)
Esempio n. 18
0
def camera_pose():
    CAMERA_ROT = np.array([[0, -1, 0], [-1, 0, 0], [0, 0, -1]])
    theta = np.pi / 3
    c = np.cos(theta)
    s = np.sin(theta)
    CAMERA_ROT = np.array([[c, 0, -s], [0, 1, 0], [s, 0, c]]).dot(CAMERA_ROT)
    CAMERA_TRANS = np.array([-.25, -.25, .35])
    CAMERA_TRANS = 1.5 * np.array([-.4, 0, .3])
    return RigidTransform(CAMERA_ROT,
                          CAMERA_TRANS,
                          from_frame='camera',
                          to_frame='world')
Esempio n. 19
0
    def scroll(self, clicks):
        """Zoom using a mouse scroll wheel motion.

        Parameters
        ----------
        clicks : int
            The number of clicks. Positive numbers indicate forward wheel movement.
        """
        target = self._target
        ratio = 0.90

        z_axis = self._n_T_camera_world.matrix[:3, 2].flatten()
        eye = self._n_T_camera_world.matrix[:3, 3].flatten()
        radius = np.linalg.norm(eye - target)
        translation = clicks * (1 - ratio) * radius * z_axis
        t_tf = RigidTransform(translation=translation,
                              from_frame='world',
                              to_frame='world')
        self._n_T_camera_world = t_tf.dot(self._n_T_camera_world)

        z_axis = self._T_camera_world.matrix[:3, 2].flatten()
        eye = self._T_camera_world.matrix[:3, 3].flatten()
        radius = np.linalg.norm(eye - target)
        translation = clicks * (1 - ratio) * radius * z_axis
        t_tf = RigidTransform(translation=translation,
                              from_frame='world',
                              to_frame='world')
        self._T_camera_world = t_tf.dot(self._T_camera_world)
Esempio n. 20
0
    def test_composition(self):
        R_a_b = RigidTransform.random_rotation()
        t_a_b = RigidTransform.random_translation()
        R_b_c = RigidTransform.random_rotation()
        t_b_c = RigidTransform.random_translation()
        T_a_b = RigidTransform(R_a_b, t_a_b, "a", "b")
        T_b_c = RigidTransform(R_b_c, t_b_c, "b", "c")

        # multiply with numpy arrays
        M_a_b = np.r_[np.c_[R_a_b, t_a_b], [[0, 0, 0, 1]]]
        M_b_c = np.r_[np.c_[R_b_c, t_b_c], [[0, 0, 0, 1]]]
        M_a_c = M_b_c.dot(M_a_b)

        # use multiplication operator
        T_a_c = T_b_c * T_a_b

        self.assertTrue(
            np.sum(np.abs(T_a_c.matrix - M_a_c)) < 1e-5,
            msg="Composition gave incorrect transformation",
        )

        # check frames
        self.assertEqual(
            T_a_c.from_frame, "a", msg="Composition has incorrect input frame"
        )
        self.assertEqual(
            T_a_c.to_frame, "c", msg="Composition has incorrect output frame"
        )
Esempio n. 21
0
    def rotate(self, azimuth, axis=None):
        """Rotate the trackball about the "Up" axis by azimuth radians.

        Parameters
        ----------
        azimuth : float
            The number of radians to rotate.
        """
        target = self._target

        y_axis = self._n_T_camera_world.matrix[:3, 1].flatten()
        if axis is not None:
            y_axis = axis
        x_rot_mat = transformations.rotation_matrix(-azimuth, y_axis, target)
        x_rot_tf = RigidTransform(x_rot_mat[:3, :3],
                                  x_rot_mat[:3, 3],
                                  from_frame='world',
                                  to_frame='world')
        self._n_T_camera_world = x_rot_tf.dot(self._n_T_camera_world)

        y_axis = self._T_camera_world.matrix[:3, 1].flatten()
        if axis is not None:
            y_axis = axis
        x_rot_mat = transformations.rotation_matrix(-azimuth, y_axis, target)
        x_rot_tf = RigidTransform(x_rot_mat[:3, :3],
                                  x_rot_mat[:3, 3],
                                  from_frame='world',
                                  to_frame='world')
        self._T_camera_world = x_rot_tf.dot(self._T_camera_world)
Esempio n. 22
0
def go2start(fa, joint_gains=None, cart_gains=None):
    """
    Resets joints to home and goes to starting position near door

    Inputs:
        fa: franka arm object
        joint_gains (list): list of 7 joint impedances to use for
            inital movements 
        cart_gains (list): list of 6 cartesian impedances to use for
            rest of movements to door, [x,y,z,roll,pitch,yaw]       

    Notes:
        Start position:
        start joints: [ 2.28650215 -1.36063288 -1.4431576  -1.93011263
                        0.23962597  2.6992652  0.82820212]
        start pose:
            Tra: [0.60516914 0.33243171 0.45531707]
            Rot: [[ 0.99917643 -0.02115786  0.03434495]
                  [-0.03350342  0.03890612  0.99868102]
                  [-0.02246619 -0.99900921  0.03816595]]
    """
    if cart_gains is None:
        cart_gains = [1000,1000,1000,50,50,50]
    if joint_gains is None:
        joint_gains = [500,500,500,500,500,500,500]

    reset_from_door(fa)

    #pose1
    joints1 = [1.25932568, -0.90343739, -1.24127123, -2.00878656,
               0.1644398, 1.77009426, 0.7167884]
    fa.goto_joints(joints1, duration=5, joint_impedances=joint_gains)

    #pose2
    joints2 = [1.77866878, -1.45379609, -1.45567126, -1.91807283,
               0.11080599, 2.21234057, 0.91823287]
    fa.goto_joints(joints2, duration=5, joint_impedances=joint_gains)

    #go to start pose
    print("Going to start position...")
    desired_tool_pose = RigidTransform(
        rotation=np.array([[0.99917643, -0.02115786, 0.03434495],
                           [-0.03350342, 0.03890612, 0.99868102],
                           [-0.02246619, -0.99900921, 0.03816595]]),
        translation=np.array([0.60516914, 0.33243171, 0.45531707]),
        from_frame='franka_tool', to_frame='world')
    fa.goto_pose_with_cartesian_control(desired_tool_pose,
                                        duration=10.0,
                                        cartesian_impedances=cart_gains)
    joints3 = [2.28650215, -1.36063288, -1.4431576, -1.93011263,
               0.23962597, 2.6992652, 0.82820212]
    fa.goto_joints(joints3, duration=5, joint_impedances=joint_gains)
Esempio n. 23
0
def find_stable_poses_mesh(mesh):
    stable_poses, probs = mesh.compute_stable_poses()
    assert len(stable_poses) == len(probs)
    # Find the most probable stable pose
    i, value = max(enumerate(probs), key=operator.itemgetter(1))
    best_pose = stable_poses[i]
    # Visualize the mesh in the most probable stable state
    rotation = np.asarray(best_pose[:3, :3])
    translation = np.asarray(best_pose[:3, 3])
    rt = RigidTransform(rotation, translation, from_frame='obj', to_frame='world')
    # vis3d.mesh(mesh, rt)
    # vis3d.show()
    return best_pose, rt
Esempio n. 24
0
 def gripper_on_object(gripper, grasp, obj, stable_pose=None,
                       T_table_world=RigidTransform(from_frame='table', to_frame='world'),
                       gripper_color=(0.5, 0.5, 0.5), object_color=(0.5, 0.5, 0.5),
                       style='surface', plot_table=True, table_dim=0.15):
     """ Visualize a gripper on an object.
     
     Parameters
     ----------
     gripper : :obj:`dexnet.grasping.RobotGripper`
         gripper to plot
     grasp : :obj:`dexnet.grasping.Grasp`
         grasp to plot the gripper in
     obj : :obj:`dexnet.grasping.GraspableObject3D`
         3D object to plot the gripper on
     stable_pose : :obj:`autolab_core.RigidTransform`
         stable pose of the object on a planar worksurface
     T_table_world : :obj:`autolab_core.RigidTransform`
         pose of table, specified as a transformation from mesh frame to world frame
     gripper_color : 3-tuple
         color of the gripper mesh
     object_color : 3-tuple
         color of the object mesh
     style : :obj:`str`
         color of the object mesh
     plot_table : bool
         whether or not to plot the table
     table_dim : float
         dimension of the table
     """
     if stable_pose is None:
         Visualizer3D.mesh(obj.mesh.trimesh, color=object_color, style=style)
         T_obj_world = RigidTransform(from_frame='obj',
                                      to_frame='world')
     else:
         T_obj_world = Visualizer3D.mesh_stable_pose(obj.mesh.trimesh, stable_pose,
                                                     T_table_world=T_table_world,
                                                     color=object_color, style=style,
                                                     plot_table=plot_table, dim=table_dim)
     DexNetVisualizer3D.gripper(gripper, grasp, T_obj_world, color=gripper_color)
Esempio n. 25
0
def get_friction_cone_basis(vertex, normal, num_facets, mu):
    axes_rot_3d = look_at_general(vertex, normal)
    axes_rt = RigidTransform(rotation=axes_rot_3d, translation=vertex)
    y_axis = axes_rt.y_axis
    angle = np.arctan(mu)
    rot = quaternion_matrix(quaternion_about_axis(angle, y_axis))
    cone_vector = rot.dot(normal)
    cone_basis_vecs = []
    for angle in np.linspace(0, 2. * np.pi - 1e-8, num_facets):
        cone_basis_vec = quaternion_matrix(quaternion_about_axis(
            angle, normal)).dot(cone_vector)
        cone_basis_vecs.append(cone_basis_vec)
    return np.array(cone_basis_vecs).reshape((3, num_facets))
Esempio n. 26
0
def process_action_data(action_data):
    """
    returns in x,y,z, quat form
    """
    fourbyfour = action_data.reshape((action_data.shape[0], 4, 4))
    trans = fourbyfour[:, -1, 0:3]
    quats = []
    for i in range(action_data.shape[0]):
        rot = RigidTransform(rotation=fourbyfour[i, :3, :3])
        quat = rot.quaternion
        quats.append(quat)
    quat = np.vstack(quats)
    return np.hstack([trans, quat])
def object_pose_from_json(scene_object_json):
    # type: (dict) -> (np.array, np.array)
    """
    Parses object pose from "location" and "quaternion_xyzw".

    :param scene_object_json: JSON fragment of a single scene object
    :return (translation, rotation) in meters
    """
    quaternion_xyzw = np.array(scene_object_json['quaternion_xyzw'])
    quaternion_wxyz = np.roll(quaternion_xyzw, 1)
    rotation = RigidTransform.rotation_from_quaternion(quaternion_wxyz)
    translation = np.array(scene_object_json['location'])
    return translation, rotation
Esempio n. 28
0
    def test_bad_transformation(self, num_points=10):
        R_a_b = RigidTransform.random_rotation()
        t_a_b = RigidTransform.random_translation()
        T_a_b = RigidTransform(R_a_b, t_a_b, "a", "b")

        # bad point frame
        caught_bad_frame = False
        try:
            x_c = np.random.rand(3)
            p_c = Point(x_c, "c")
            T_a_b * p_c
        except ValueError:
            caught_bad_frame = True
        self.assertTrue(
            caught_bad_frame, msg="Failed to catch bad point frame"
        )

        # bad point cloud frame
        caught_bad_frame = False
        try:
            x_c = np.random.rand(3, num_points)
            pc_c = PointCloud(x_c, "c")
            T_a_b * pc_c
        except ValueError:
            caught_bad_frame = True
        self.assertTrue(
            caught_bad_frame, msg="Failed to catch bad point cloud frame"
        )

        # illegal input
        caught_bad_input = False
        try:
            x_a = np.random.rand(3, num_points)
            T_a_b * x_a
        except ValueError:
            caught_bad_input = True
        self.assertTrue(
            caught_bad_input, msg="Failed to catch numpy array input"
        )
    def object_to_camera_pose(self, radius, elev, az, roll, x, y):
        """ Convert spherical coords to an object-camera pose. """
        # generate camera center from spherical coords
        delta_t = np.array([x, y, 0])
        camera_center_obj = np.array([sph2cart(radius, az, elev)
                                      ]).squeeze() + delta_t
        camera_z_obj = -np.array([sph2cart(radius, az, elev)]).squeeze()
        camera_z_obj = camera_z_obj / np.linalg.norm(camera_z_obj)

        # find the canonical camera x and y axes
        camera_x_par_obj = np.array([camera_z_obj[1], -camera_z_obj[0], 0])
        if np.linalg.norm(camera_x_par_obj) == 0:
            camera_x_par_obj = np.array([1, 0, 0])
        camera_x_par_obj = camera_x_par_obj / np.linalg.norm(camera_x_par_obj)
        camera_y_par_obj = np.cross(camera_z_obj, camera_x_par_obj)
        camera_y_par_obj = camera_y_par_obj / np.linalg.norm(camera_y_par_obj)
        if camera_y_par_obj[2] > 0:
            camera_x_par_obj = -camera_x_par_obj
            camera_y_par_obj = np.cross(camera_z_obj, camera_x_par_obj)
            camera_y_par_obj = camera_y_par_obj / np.linalg.norm(
                camera_y_par_obj)

        # rotate by the roll
        R_obj_camera_par = np.c_[camera_x_par_obj, camera_y_par_obj,
                                 camera_z_obj]
        R_camera_par_camera = np.array([[np.cos(roll), -np.sin(roll), 0],
                                        [np.sin(roll),
                                         np.cos(roll), 0], [0, 0, 1]])
        R_obj_camera = R_obj_camera_par.dot(R_camera_par_camera)
        t_obj_camera = camera_center_obj

        # create final transform
        T_obj_camera = RigidTransform(R_obj_camera,
                                      t_obj_camera,
                                      from_frame=self.frame,
                                      to_frame='obj')

        return T_obj_camera.inverse()
Esempio n. 30
0
    def grasp(grasp,
              T_obj_world=RigidTransform(from_frame='obj', to_frame='world'),
              tube_radius=0.002,
              endpoint_color=(0, 1, 0),
              endpoint_scale=0.004,
              grasp_axis_color=(0, 1, 0)):
        """ Plots a grasp as an axis and center.

        Parameters
        ----------
        grasp : :obj:`dexnet.grasping.Grasp`
            the grasp to plot
        T_obj_world : :obj:`autolab_core.RigidTransform`
            the pose of the object that the grasp is referencing in world frame
        tube_radius : float
            radius of the plotted grasp axis
        endpoint_color : 3-tuple
            color of the endpoints of the grasp axis
        endpoint_scale : 3-tuple
            scale of the plotted endpoints
        grasp_axis_color : 3-tuple
            color of the grasp axis
        """
        g1, g2 = grasp.endpoints
        center = grasp.center
        g1 = Point(g1, 'obj')
        g2 = Point(g2, 'obj')
        center = Point(center, 'obj')

        g1_tf = T_obj_world.apply(g1)
        g2_tf = T_obj_world.apply(g2)
        center_tf = T_obj_world.apply(center)
        grasp_axis_tf = np.array([g1_tf.data, g2_tf.data])

        mlab.points3d(g1_tf.data[0],
                      g1_tf.data[1],
                      g1_tf.data[2],
                      color=endpoint_color,
                      scale_factor=endpoint_scale)
        mlab.points3d(g2_tf.data[0],
                      g2_tf.data[1],
                      g2_tf.data[2],
                      color=endpoint_color,
                      scale_factor=endpoint_scale)

        mlab.plot3d(grasp_axis_tf[:, 0],
                    grasp_axis_tf[:, 1],
                    grasp_axis_tf[:, 2],
                    color=grasp_axis_color,
                    tube_radius=tube_radius)