Example #1
0
 def gripper_on_object(gripper,
                       grasp,
                       obj,
                       stable_pose=None,
                       T_table_world=RigidTransform(from_frame='table',
                                                    to_frame='world'),
                       gripper_color=(0.5, 0.5, 0.5),
                       object_color=(0.5, 0.5, 0.5),
                       style='surface',
                       plot_table=True,
                       table_dim=0.15):
     """ Visualize a gripper on an object.
     
     Parameters
     ----------
     gripper : :obj:`dexnet.grasping.RobotGripper`
         gripper to plot
     grasp : :obj:`dexnet.grasping.Grasp`
         grasp to plot the gripper in
     obj : :obj:`dexnet.grasping.GraspableObject3D`
         3D object to plot the gripper on
     stable_pose : :obj:`meshpy.StablePose` or :obj:`autolab_core.RigidTransform`
         stable pose of the object on a planar worksurface
     T_table_world : :obj:`autolab_core.RigidTransform`
         pose of table, specified as a transformation from mesh frame to world frame
     gripper_color : 3-tuple
         color of the gripper mesh
     object_color : 3-tuple
         color of the object mesh
     style : :obj:`str`
         color of the object mesh
     plot_table : bool
         whether or not to plot the table
     table_dim : float
         dimension of the table
     """
     if stable_pose is None:
         Visualizer3D.mesh(obj.mesh, color=object_color, style=style)
         T_obj_world = RigidTransform(from_frame='obj', to_frame='world')
     elif isinstance(stable_pose, StablePose):
         T_obj_world = Visualizer3D.mesh_stable_pose(
             obj.mesh,
             stable_pose,
             T_table_world=T_table_world,
             color=object_color,
             style=style,
             plot_table=plot_table,
             dim=table_dim)
     else:
         T_obj_world = Visualizer3D.mesh_table(obj.mesh,
                                               stable_pose,
                                               T_table_world=T_table_world,
                                               color=object_color,
                                               style=style,
                                               plot_table=plot_table,
                                               dim=table_dim)
     DexNetVisualizer3D.gripper(gripper,
                                grasp,
                                T_obj_world,
                                color=gripper_color)
Example #2
0
    def test_composition(self):
        R_a_b = RigidTransform.random_rotation()
        t_a_b = RigidTransform.random_translation()
        R_b_c = RigidTransform.random_rotation()
        t_b_c = RigidTransform.random_translation()
        T_a_b = RigidTransform(R_a_b, t_a_b, "a", "b")
        T_b_c = RigidTransform(R_b_c, t_b_c, "b", "c")

        # multiply with numpy arrays
        M_a_b = np.r_[np.c_[R_a_b, t_a_b], [[0, 0, 0, 1]]]
        M_b_c = np.r_[np.c_[R_b_c, t_b_c], [[0, 0, 0, 1]]]
        M_a_c = M_b_c.dot(M_a_b)

        # use multiplication operator
        T_a_c = T_b_c * T_a_b

        self.assertTrue(
            np.sum(np.abs(T_a_c.matrix - M_a_c)) < 1e-5,
            msg="Composition gave incorrect transformation",
        )

        # check frames
        self.assertEqual(
            T_a_c.from_frame, "a", msg="Composition has incorrect input frame"
        )
        self.assertEqual(
            T_a_c.to_frame, "c", msg="Composition has incorrect output frame"
        )
Example #3
0
def visualize_scene(mesh, T_world_ar, T_ar_cam, T_cam_obj):
    T_cam_cam = RigidTransform()
    T_obj_cam = T_cam_obj.inverse()
    T_cam_ar = T_ar_cam.inverse()

    Twc = np.matmul(T_world_ar.matrix, T_ar_cam.matrix)
    T_world_cam = RigidTransform(Twc[:3, :3], Twc[:3, 3])
    T_cam_world = T_world_cam.inverse()

    o = np.array([0., 0., 0.])
    centroid_cam = apply_transform(o, T_cam_obj)
    tag_cam = apply_transform(o, T_cam_ar)
    robot_cam = apply_transform(o, T_cam_world)

    # camera
    vis3d.points(o, color=(0, 1, 0), scale=0.01)
    vis3d.pose(T_cam_cam, alpha=0.05, tube_radius=0.005, center_scale=0.002)

    # object
    vis3d.mesh(mesh)
    vis3d.points(centroid_cam, color=(0, 0, 0), scale=0.01)
    vis3d.pose(T_cam_obj, alpha=0.05, tube_radius=0.005, center_scale=0.002)

    # AR tag
    vis3d.points(tag_cam, color=(1, 0, 1), scale=0.01)
    vis3d.pose(T_cam_ar, alpha=0.05, tube_radius=0.005, center_scale=0.002)
    vis3d.table(T_cam_ar, dim=0.074)

    # robot
    vis3d.points(robot_cam, color=(1, 0, 0), scale=0.01)
    vis3d.pose(T_cam_world, alpha=0.05, tube_radius=0.005, center_scale=0.002)

    vis3d.show()
Example #4
0
def lookup_transform(to_frame, from_frame='base'):
    """
    Returns the AR tag position in world coordinates 

    Parameters
    ----------
    to_frame : string
        examples are: ar_marker_7, gearbox, pawn, ar_marker_3, etc
    from_frame : string
        lets be real, you're probably only going to use 'base'

    Returns
    -------
    :obj:`autolab_core.RigidTransform` AR tag position or object in world coordinates
    """
    if not ros_enabled:
        print 'I am the lookup transform function!  ' \
            + 'You\'re not using ROS, so I\'m returning the Identity Matrix.'
        return RigidTransform(to_frame=from_frame, from_frame=to_frame)
    listener = tf.TransformListener()
    attempts, max_attempts, rate = 0, 10, rospy.Rate(1.0)
    while attempts < max_attempts:
        try:
            t = listener.getLatestCommonTime(from_frame, to_frame)
            tag_pos, tag_rot = listener.lookupTransform(from_frame, to_frame, t)
        except:
            rate.sleep()
            attempts += 1
    rot = RigidTransform.rotation_from_quaternion(tag_rot)
    return RigidTransform(rot, tag_pos, to_frame=from_frame, from_frame=to_frame)
Example #5
0
def render_depth_maps(mesh_filename, intrinsic, extrinsics, mesh_scale=1.0):
    scene = mr.Scene()

    # setup camera
    ci = CameraIntrinsics(frame='camera',
                          fx=intrinsic.get_focal_length()[0],
                          fy=intrinsic.get_focal_length()[1],
                          cx=intrinsic.get_principal_point()[0],
                          cy=intrinsic.get_principal_point()[1],
                          skew=0,
                          height=intrinsic.height,
                          width=intrinsic.width)
    cp = RigidTransform(from_frame='camera', to_frame='world')
    camera = mr.VirtualCamera(ci, cp)
    scene.camera = camera

    obj = trimesh.load_mesh(mesh_filename)
    obj.apply_scale(mesh_scale)

    dmaps = []
    for T in extrinsics:
        obj_pose = RigidTransform(rotation=T[:3, :3],
                                  translation=T[:3, 3],
                                  from_frame='obj',
                                  to_frame='camera')
        sobj = mr.SceneObject(obj, obj_pose)
        scene.add_object('obj', sobj)
        dmap = scene.render(render_color=False)
        dmap = np.uint16(dmap * 1000.0)
        dmaps.append(dmap)
        scene.remove_object('obj')

    return np.stack(dmaps)
Example #6
0
    def scroll(self, clicks):
        """Zoom using a mouse scroll wheel motion.

        Parameters
        ----------
        clicks : int
            The number of clicks. Positive numbers indicate forward wheel movement.
        """
        target = self._target
        ratio = 0.90

        z_axis = self._n_T_camera_world.matrix[:3, 2].flatten()
        eye = self._n_T_camera_world.matrix[:3, 3].flatten()
        radius = np.linalg.norm(eye - target)
        translation = clicks * (1 - ratio) * radius * z_axis
        t_tf = RigidTransform(translation=translation,
                              from_frame='world',
                              to_frame='world')
        self._n_T_camera_world = t_tf.dot(self._n_T_camera_world)

        z_axis = self._T_camera_world.matrix[:3, 2].flatten()
        eye = self._T_camera_world.matrix[:3, 3].flatten()
        radius = np.linalg.norm(eye - target)
        translation = clicks * (1 - ratio) * radius * z_axis
        t_tf = RigidTransform(translation=translation,
                              from_frame='world',
                              to_frame='world')
        self._T_camera_world = t_tf.dot(self._T_camera_world)
Example #7
0
    def rotate(self, azimuth, axis=None):
        """Rotate the trackball about the "Up" axis by azimuth radians.

        Parameters
        ----------
        azimuth : float
            The number of radians to rotate.
        """
        target = self._target

        y_axis = self._n_T_camera_world.matrix[:3, 1].flatten()
        if axis is not None:
            y_axis = axis
        x_rot_mat = transformations.rotation_matrix(-azimuth, y_axis, target)
        x_rot_tf = RigidTransform(x_rot_mat[:3, :3],
                                  x_rot_mat[:3, 3],
                                  from_frame='world',
                                  to_frame='world')
        self._n_T_camera_world = x_rot_tf.dot(self._n_T_camera_world)

        y_axis = self._T_camera_world.matrix[:3, 1].flatten()
        if axis is not None:
            y_axis = axis
        x_rot_mat = transformations.rotation_matrix(-azimuth, y_axis, target)
        x_rot_tf = RigidTransform(x_rot_mat[:3, :3],
                                  x_rot_mat[:3, 3],
                                  from_frame='world',
                                  to_frame='world')
        self._T_camera_world = x_rot_tf.dot(self._T_camera_world)
Example #8
0
def lookup_tag(tag_number):
	""" Returns the AR tag position in world coordinates 

	Parameters
	----------
	tag_number : int
		AR tag number

	Returns
	-------
	:obj:`autolab_core.RigidTransform` AR tag position in world coordinates
	"""
	to_frame = 'ar_marker_{}'.format(tag_number)

	tag_pos = None
	
	while not tag_pos:
		try:
			t = listener.getLatestCommonTime(from_frame, to_frame)
			
			# print(rospy.Time.now())
			# t = rospy.Time.now()
			tag_pos, tag_rot = listener.lookupTransform(from_frame, to_frame, t)
		except:
			continue;
	# if not listener.frameExists(from_frame) or not listener.frameExists(to_frame):
	#     print 'Frames not found'
	#     print 'Did you place AR marker {} within view of the baxter left hand camera?'.format(tag_number)
	#     exit(0)
	angles = tfs.euler_from_quaternion(tag_rot)
	# print(angles)
	trans  = tfs.euler_matrix(*angles)
	print(RigidTransform(trans[:3, :3], tag_pos))
	return RigidTransform(trans[:3, :3], tag_pos)
Example #9
0
    def predict(self):
        rot, trans = RigidTransform.rotation_and_translation_from_matrix(
            self.datapoint['obj_pose'])
        dataset_pose = RigidTransform(rot, trans, 'obj', 'world')
        dataset_final_poses = self.datapoint['final_poses']
        num_vertices = self.datapoint['num_vertices']
        final_poses = []
        for i in range(20):
            final_pose_mat = dataset_final_poses[i * 4:(i + 1) * 4]
            if np.array_equal(final_pose_mat, np.zeros((4, 4))):
                break
            rot, trans = RigidTransform.rotation_and_translation_from_matrix(
                final_pose_mat)
            final_poses.append(RigidTransform(rot, trans, 'obj', 'world'))
        vertices = self.datapoint['vertices'][:num_vertices]
        normals = self.datapoint['normals'][:num_vertices]
        vertices = (self.obj.T_obj_world * dataset_pose.inverse() *
                    PointCloud(vertices.T, 'world')).data.T
        normals = (self.obj.T_obj_world * dataset_pose.inverse() *
                   PointCloud(normals.T, 'world')).data.T

        vertex_probs = self.datapoint[
            'vertex_probs'][:num_vertices, :len(final_poses)]
        required_forces = self.datapoint['min_required_forces'][:num_vertices]

        return vertices, normals, final_poses, vertex_probs, required_forces
Example #10
0
    def generate_images(self, salient_edge_set, n_samples=1):
        """Generate depth image, normal image, and binary edge mask tuples.

        Parameters
        ----------
        salient_edge_set : SalientEdgeSet
            A salient edge set to generate images of.
        n_samples : int
            The number of samples to generate.

        Returns
        -------
        depth_ims : (n,) list of perception.DepthImage
            Randomly-rendered depth images of object.
        normal_ims : (n,) list of perception.PointCloudImage
            Normals for the given image
        edge_masks : (n,) list of perception.BinaryImage
            Masks for pixels on the salient edges of the object.
        """
        # Compute stable poses of mesh
        mesh = salient_edge_set.mesh

        stp_pose_tfs, probs = mesh.compute_stable_poses()
        probs = probs / sum(probs)

        # Generate n renders
        depth_ims, normal_ims, edge_masks = [], [], []
        scene = Scene()
        so = SceneObject(mesh, RigidTransform(from_frame='obj', to_frame='world'))
        scene.add_object('object', so)

        for i in range(n_samples):
            # Sample random stable pose.
            tf_id = np.random.choice(np.arange(len(probs)), p=probs)
            tf = stp_pose_tfs[tf_id]
            T_obj_world = RigidTransform(tf[:3,:3], tf[:3,3], from_frame='obj', to_frame='world')
            so.T_obj_world = T_obj_world

            # Create the random uniform workspace sampler
            ws_cfg = self._config['worksurface_rv_config']
            uvs = UniformPlanarWorksurfaceImageRandomVariable('object', scene, [RenderMode.DEPTH], frame='camera', config=ws_cfg)

            # Sample and extract the depth image, camera intrinsics, and T_obj_camera
            sample = uvs.sample()
            depth_im = sample.renders[RenderMode.DEPTH]
            cs = sample.camera
            ci = CameraIntrinsics(frame='camera', fx=cs.focal, fy=cs.focal, cx=cs.cx, cy=cs.cy,
                                  skew=0.0, height=ws_cfg['im_height'], width=ws_cfg['im_width'])
            T_obj_camera = cs.T_camera_world.inverse().dot(T_obj_world)
            edge_mask = self._compute_edge_mask(salient_edge_set, depth_im, ci, T_obj_camera)
            point_cloud_im = ci.deproject_to_image(depth_im)
            normal_im =  point_cloud_im.normal_cloud_im()


            depth_ims.append(depth_im)
            normal_ims.append(normal_im)
            edge_masks.append(edge_mask)

        return depth_ims, normal_ims, edge_masks
Example #11
0
def figure_0():
    action = policy.action(env.state)
    env.render_3d_scene()
    bottom_points = policy.toppling_model.bottom_points
    vis3d.plot3d(bottom_points[:2], color=[0, 0, 0], tube_radius=.001)

    mesh = env.state.mesh.copy().apply_transform(env.state.T_obj_world.matrix)
    mesh.fix_normals()
    direction = normalize([0, -.04, 0])
    origin = mesh.center_mass + np.array([0, .04, .09])
    intersect, _, face_ind = mesh.ray.intersects_location([origin],
                                                          [direction],
                                                          multiple_hits=False)
    normal = mesh.face_normals[face_ind[0]]
    start_point = intersect[0] + .06 * normal
    end_point = intersect[0]
    shaft_points = [start_point, end_point]
    h1 = np.array([[1, 0, 0], [0, 0.7071, -0.7071], [0, 0.7071,
                                                     0.7071]]).dot(-normal)
    h2 = np.array([[1, 0, 0], [0, 0.7071, 0.7071], [0, -0.7071,
                                                    0.7071]]).dot(-normal)
    head_points = [end_point - 0.02 * h2, end_point, end_point - 0.02 * h1]
    vis3d.plot3d(shaft_points, color=[1, 0, 0], tube_radius=.002)
    vis3d.plot3d(head_points, color=[1, 0, 0], tube_radius=.002)
    vis3d.points(Point(end_point), scale=.004, color=[0, 0, 0])
    hand_pose = RigidTransform(rotation=policy.get_hand_pose(
        start_point, end_point)[0].rotation,
                               translation=start_point,
                               from_frame='grasp',
                               to_frame='world')
    orig_pose = env.state.obj.T_obj_world.copy()
    env.state.obj.T_obj_world = policy.toppling_model.final_poses[1]
    action = policy.grasping_policy.action(env.state)
    env.state.obj.T_obj_world = orig_pose

    gripper = env.gripper(action)
    #vis3d.mesh(gripper.mesh, hand_pose * gripper.T_mesh_grasp, color=light_blue)
    # mesh = trimesh.load('~/Downloads/chris.stl')
    rot = np.array([[0, -1, 0], [1, 0, 0],
                    [0, 0,
                     1]]).dot(np.array([[0, 0, 1], [0, 1, 0], [-1, 0, 0]]))
    T = RigidTransform(rotation=rot,
                       translation=np.array([0, -.09, .1]),
                       to_frame='mesh',
                       from_frame='mesh')
    # vis3d.mesh(mesh, hand_pose * gripper.T_mesh_grasp * T, color=light_blue)
    vis3d.mesh(gripper.mesh,
               hand_pose * gripper.T_mesh_grasp * T,
               color=light_blue)
    vis3d.show(starting_camera_pose=CAMERA_POSE)

    env.state.obj.T_obj_world = policy.toppling_model.final_poses[1]
    action = policy.grasping_policy.action(env.state)
    print 'q:', action.q_value
    env.render_3d_scene()
    vis3d.gripper(env.gripper(action),
                  action.grasp(env.gripper(action)),
                  color=light_blue)
    vis3d.show(starting_camera_pose=CAMERA_POSE)
Example #12
0
    def processing(self):
        """
        Transformation: Link8 -> Grasp -> Camera -> Link0
	"""
        # Transforms using Listener
        listener = tf.TransformListener()
        while True:
            try:
                (tcr, rcr) = listener.lookupTransform('/panda_link0', '/color',
                                                      rospy.Time(0))
                (tref,
                 rref) = listener.lookupTransform('/panda_link0',
                                                  '/panda_link8',
                                                  rospy.Time(0))
            except (tf.LookupException, tf.ConnectivityException,
                    tf.ExtrapolationException):
                continue
            break

        # Ready Position
        qref = [rref[3], rref[0], rref[1], rref[2]]
        self.ready_pose = RigidTransform(rotation=qref,
                                         translation=tref,
                                         from_frame='link8',
                                         to_frame='link0').pose_msg

        # Transformation from Camera to Robot link 0 (base) Frame
        qcr = [rcr[3], rcr[0], rcr[1], rcr[2]]
        T_camera_robot = RigidTransform(rotation=qcr,
                                        translation=tcr,
                                        from_frame='camera',
                                        to_frame='link0')

        # Transformation from Approach Grasp Frame to Camera Frame
        T_gripper_camera = RigidTransform(rotation=self.orientation,
                                          translation=self.position,
                                          from_frame='grasp',
                                          to_frame='camera')

        # Transformation from Grasp Frame to Camera Frame
        T_gripper_camera1 = RigidTransform(rotation=self.orientation,
                                           translation=self.position1,
                                           from_frame='grasp',
                                           to_frame='camera')

        # Transformation from End-Effector/Tool Frame (Panda Link 8) to Grasp Frame
        ### Rotation: YZX = [90,45,0]
        eef_rot = [0.6532815, 0.2705981, 0.6532815, 0.2705981]
        eef_pos = [-0.050, -0.030, -0.01]
        T_eef_gripper = RigidTransform(rotation=eef_rot,
                                       translation=eef_pos,
                                       from_frame='eef',
                                       to_frame='grasp')

        # Transformation from End-Effector/Tool Frame (Panda Link 8) to Robot Base Frame
        # Approach
        self.T_gripper_robot = T_camera_robot * T_gripper_camera * T_eef_gripper
        # Grasp
        self.T_gripper_robot1 = T_camera_robot * T_gripper_camera1 * T_eef_gripper
Example #13
0
def plate_push_catch(self):
    self.right.goto_pose(
        RigidTransform(translation=[0.3, 0, 0.2],
                       rotation=right_front_rotation))
    self.right.goto_pose(
        RigidTransform(translation=[0.3, 0, 0.07],
                       rotation=right_front_rotation))
    time.sleep(3)
Example #14
0
def plate_slide(self):
    self.right.goto_pose(
        RigidTransform(translation=[0.72, 0, 0.35],
                       rotation=right_front_rotation))
    time.sleep(3)
    self.right.goto_pose(
        RigidTransform(translation=[0.75, 0, 0.35],
                       rotation=right_front_give_rotation))
Example #15
0
def plate_drop(self):
    self.right.goto_pose(
        RigidTransform(translation=[0.72, 0, 0.35],
                       rotation=right_front_rotation))
    time.sleep(3)
    self.right.open_gripper()
    self.right.goto_pose(
        RigidTransform(translation=[0.4, 0, 0.3],
                       rotation=right_front_rotation))
Example #16
0
def lookup_transform(to_frame, from_frame='base'):
    """
    Returns the AR tag position in world coordinates 

    Parameters
    ----------
    to_frame : string
        examples are: ar_marker_7, gearbox, pawn, ar_marker_3, etc
    from_frame : string
        lets be real, you're probably only going to use 'base'

    Returns
    -------
    :obj:`autolab_core.RigidTransform` AR tag position or object in world coordinates
    """
    if not ros_enabled:
        print 'I am the lookup transform function!  ' \
            + 'You\'re not using ROS, so I\'m returning the Identity Matrix.'
        return RigidTransform(to_frame=from_frame, from_frame=to_frame)
    listener = tf.TransformListener()
    attempts, max_attempts, rate = 0, 10, rospy.Rate(1.0)
    while attempts < max_attempts:
        try:
            # print("in try")
            # print("1 here")
            t = listener.getLatestCommonTime(from_frame, to_frame)
            # print("here")
            tag_pos, tag_rot = listener.lookupTransform(
                from_frame, to_frame, t)
            real_rot = np.array(
                [tag_rot[3], tag_rot[0], tag_rot[1], tag_rot[2]])

            # real_rot = np.array([tag_rot[1], tag_rot[2], tag_rot[3], tag_rot[0]])

            break

        except:
            rate.sleep()
            attempts += 1
    # print("t",t)
    # print("tag_pos", tag_pos)
    # # print("tag_rot", tag_rot)
    # print("real_rot", real_rot)

    ### comment out when using ar tag

    # tag_rot = [0, 0, 1, 0]
    # tag_pos = [0.570, -0.184, -.243] #changed this so it would work for right gripper better
    ### comment out when using ar tag
    rot = RigidTransform.rotation_from_quaternion(real_rot)
    # rot = tag_rot
    # print(from_frame, to_frame)
    return RigidTransform(rot,
                          tag_pos,
                          to_frame=from_frame,
                          from_frame=to_frame)
Example #17
0
    def test_bad_inits(self):
        # test bad rotation dim
        R = np.random.rand(3)
        caught_bad_rotation = False
        try:
            RigidTransform(R)
        except ValueError:
            caught_bad_rotation = True
        self.assertTrue(
            caught_bad_rotation, msg="Failed to catch 3x1 rotation matrix"
        )

        # test bad rotation dim
        R = np.random.rand(3, 3, 3)
        caught_bad_rotation = False
        try:
            RigidTransform(R)
        except ValueError:
            caught_bad_rotation = True
        self.assertTrue(
            caught_bad_rotation, msg="Failed to catch 3x3x3 rotation matrix"
        )

        # determinant not equal to one
        R = np.random.rand(3, 3)
        caught_bad_rotation = False
        try:
            RigidTransform(R)
        except ValueError:
            caught_bad_rotation = True
        self.assertTrue(
            caught_bad_rotation, msg="Failed to catch rotation with det != 1"
        )

        # translation with illegal dimensions
        t = np.random.rand(3, 3)
        caught_bad_translation = False
        try:
            RigidTransform(translation=t)
        except ValueError:
            caught_bad_translation = True
        self.assertTrue(
            caught_bad_translation, msg="Failed to catch 3x3 translation"
        )

        # translation with illegal dimensions
        t = np.random.rand(2)
        caught_bad_translation = False
        try:
            RigidTransform(translation=t)
        except ValueError:
            caught_bad_translation = True
        self.assertTrue(
            caught_bad_translation, msg="Failed to catch 2x1 translation"
        )
Example #18
0
    def vertices_to_baxter_hand_pose(self,
                                     grasp_vertices,
                                     approach_direction,
                                     Rot,
                                     obj_name='gearbox'):
        """
        takes the contacts positions in the object frame and returns the hand pose T_obj_gripper
        BE CAREFUL ABOUT THE FROM FRAME AND TO FRAME.  the RigidTransform class' frames are 
        weird.
        
        Parameters
        ----------
        grasp_vertices : 2x3 :obj:`numpy.ndarray`
            position of the fingers in object frame
        approach_direction : 3x' :obj:`numpy.ndarray`
            there are multiple grasps that go through contact1 and contact2.  This describes which 
            orientation the hand should be in

        Returns
        -------
        :obj:`autolab_core:RigidTransform` Hand pose in the object frame Note: in object frame
        """
        # YOUR CODE HERE

        v1, v2 = grasp_vertices
        center = (v1 + v2) / 2
        #print("v1,v2,center", v1, v2, center)
        #print("App_dir:", approach_direction)
        rot = look_at_general(center, approach_direction)
        wrist_position = center  #- approach_direction*GRIPPER_LENGTH # note that this only works b/c I made sure approach_direction is normalized
        # print("wrist_position ", wrist_position)
        print("approach direction: ", approach_direction)
        print("vertices", v1, v2)
        # time.sleep(10)

        # wrist_position assumes is the position of the wrist (duh) and not the grippers

        #print("rotation", rot)
        # rot = rot[:3, :3]
        modified_identity = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
        rot = np.dot(modified_identity, np.asarray(Rot[:3, :3]))
        rpy = tf.transformations.euler_from_matrix(rot)
        print("!!!!!!!!!!RPY!!!!!!!!!!")
        print("rot in rpy", rpy)
        g = RigidTransform(rot,
                           wrist_position,
                           from_frame='grasp',
                           to_frame=obj_name)
        print(" ## g ##### ", g)
        # I quite not sure here
        return RigidTransform(rot,
                              wrist_position,
                              from_frame='grasp',
                              to_frame=obj_name)
    def test_linear_trajectory(self):
        R_a = RigidTransform.random_rotation()
        t_a = RigidTransform.random_translation()
        R_b = RigidTransform.random_rotation()
        t_b = RigidTransform.random_translation()
        T_a = RigidTransform(R_a, t_a, "w", "a")
        T_b = RigidTransform(R_b, t_b, "w", "b")

        for i in range(10):
            traj = T_a.linear_trajectory_to(T_b, i)
            self.assertEqual(len(traj), i, "Trajectory has incorrect length")
    def test_registration(self):
        np.random.seed(101)

        source_points = np.random.rand(3, NUM_POINTS).astype(np.float32)
        source_normals = np.random.rand(3, NUM_POINTS).astype(np.float32)
        source_normals = source_normals / np.tile(
            np.linalg.norm(source_normals, axis=0)[np.newaxis, :], [3, 1])

        source_point_cloud = PointCloud(source_points, frame='world')
        source_normal_cloud = NormalCloud(source_normals, frame='world')

        matcher = PointToPlaneFeatureMatcher()
        solver = PointToPlaneICPSolver(sample_size=NUM_POINTS)

        # 3d registration
        tf = RigidTransform(rotation=RigidTransform.random_rotation(),
                            translation=RigidTransform.random_translation(),
                            from_frame='world',
                            to_frame='world')
        tf = RigidTransform(from_frame='world',
                            to_frame='world').interpolate_with(tf, 0.01)
        target_point_cloud = tf * source_point_cloud
        target_normal_cloud = tf * source_normal_cloud

        result = solver.register(source_point_cloud,
                                 target_point_cloud,
                                 source_normal_cloud,
                                 target_normal_cloud,
                                 matcher,
                                 num_iterations=NUM_ITERS)

        self.assertTrue(
            np.allclose(tf.matrix, result.T_source_target.matrix, atol=1e-3))

        # 2d registration
        theta = 0.1 * np.random.rand()
        t = 0.005 * np.random.rand(3, 1)
        t[2] = 0
        R = np.array([[np.cos(theta), -np.sin(theta), 0],
                      [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
        tf = RigidTransform(R, t, from_frame='world', to_frame='world')
        target_point_cloud = tf * source_point_cloud
        target_normal_cloud = tf * source_normal_cloud

        result = solver.register_2d(source_point_cloud,
                                    target_point_cloud,
                                    source_normal_cloud,
                                    target_normal_cloud,
                                    matcher,
                                    num_iterations=NUM_ITERS)

        self.assertTrue(
            np.allclose(tf.matrix, result.T_source_target.matrix, atol=1e-3))
Example #21
0
def plate_slide_2(self):
    # self.right.goto_pose(RigidTransform(translation = [0.72, 0, 0.35], rotation = right_front_rotation))
    self.right.goto_pose(
        RigidTransform(translation=[0.3, 0, 0.25],
                       rotation=right_front_rotation))
    self.right.goto_pose(
        RigidTransform(translation=[0.3, 0, 0.07],
                       rotation=right_front_rotation))
    time.sleep(3)
    self.right.open_gripper()
    self.right.goto_pose(
        RigidTransform(translation=[0.33, 0, 0.2],
                       rotation=right_front_rotation))
Example #22
0
def lookup_transform(to_frame, from_frame='base'):
    """
    Returns the AR tag position in world coordinates 

    Parameters
    ----------
    to_frame : string
        examples are: ar_marker_7, gearbox, pawn, ar_marker_3, etc
    from_frame : string
        lets be real, you're probably only going to use 'base'

    Returns
    -------
    :obj:`autolab_core.RigidTransform` AR tag position or object in world coordinates
    """
    if not ros_enabled:
        print 'I am the lookup transform function!  ' \
            + 'You\'re not using ROS, so I\'m returning the Identity Matrix.'
        return RigidTransform(to_frame=from_frame, from_frame=to_frame)
    listener = tf.TransformListener()
    attempts, max_attempts, rate = 0, 10, rospy.Rate(1.0)

    # Sleep for a bit before looking up transformation
    for _ in range(1):
        rate.sleep()

    while attempts < max_attempts:
        try:
            # import pdb; pdb.set_trace()
            t = listener.getLatestCommonTime(from_frame, to_frame)
            # original
            # tag_pos, tag_rot = listener.lookupTransform(from_frame, to_frame, t)
            # changes from piazza
            tag_pos, tag_rot = listener.lookupTransform(
                to_frame, from_frame, t)
            tag_rot = np.roll(tag_rot, 1)
            break
        except:
            rate.sleep()
            attempts += 1
            print 'Attempt {0} to look up transformation from {1} to {2} unsuccessful.'.format(
                attempts, from_frame, to_frame)
    print 'Successfully found transformation from {0} to {1}.'.format(
        from_frame, to_frame)
    # import pdb; pdb.set_trace()
    rot = RigidTransform.rotation_from_quaternion(tag_rot)
    # original
    return RigidTransform(rot,
                          tag_pos,
                          to_frame=from_frame,
                          from_frame=to_frame)
Example #23
0
def straighten_transform(rt):
    angles = rt.euler_angles
    roll_off = angles[0]
    pitch_off = angles[1]
    roll_fix = RigidTransform(
        rotation=RigidTransform.x_axis_rotation(np.pi - roll_off),
        from_frame=rt.from_frame,
        to_frame=rt.from_frame)
    pitch_fix = RigidTransform(
        rotation=RigidTransform.y_axis_rotation(pitch_off),
        from_frame=rt.from_frame,
        to_frame=rt.from_frame)
    new_rt = rt * roll_fix * pitch_fix
    return new_rt
Example #24
0
def lookup_transform(to_frame, baxter, from_frame='base'):
    """
    Returns the AR tag position in world coordinates

    Parameters
    ----------
    to_frame : string
        examples are: ar_marker_7, gearbox, pawn, ar_marker_3, etc
    from_frame : string
        lets be real, you're probably only going to use 'base'

    Returns
    -------
    :obj:`autolab_core.RigidTransform` AR tag position or object in world coordinates
    """
    # if to_frame =='pawn':
    #     to_frame = 'ar_marker_9'
    if not baxter:
        print( 'I am the lookup transform function!  ' \
            + 'You\'re not using ROS, so I\'m returning the Identity Matrix.')
        tag_pos = [0.642, -0.132, -0.023]
        tag_rot = [-0.001, -0.030, 0.024, 0.999]

        rot = RigidTransform.rotation_from_quaternion(tag_rot)
        return RigidTransform(rot,
                              tag_pos,
                              to_frame=from_frame,
                              from_frame=to_frame)
        #return RigidTransform(translation = tag_pos, to_frame=from_frame, from_frame=to_frame)

    listener = tf.TransformListener()
    attempts, max_attempts, rate = 0, 10, rospy.Rate(1.0)
    while attempts < max_attempts:
        try:
            t = listener.getLatestCommonTime(from_frame, to_frame)
            tag_pos, tag_rot = listener.lookupTransform(
                from_frame, to_frame, t)
            break
        except:
            rate.sleep()
            attempts += 1
    # tag_pos =   [0.642, -0.132, -0.023]
    # tag_rot = [-0.001, -0.030, 0.024, 0.999]

    rot = RigidTransform.rotation_from_quaternion(tag_rot)
    return RigidTransform(rot,
                          tag_pos,
                          to_frame=from_frame,
                          from_frame=to_frame)
Example #25
0
    def grasp_angles_from_stp_z(self, stable_pose):
        """ Get angles of the the grasp from the table plane:
        1) the angle between the grasp axis and table normal
        2) the angle between the grasp approach axis and the table normal
        
        Parameters
        ----------
        stable_pose : :obj:`StablePose` or :obj:`RigidTransform`
            the stable pose to compute the angles for

        Returns
        -------
        psi : float
            grasp y axis rotation from z axis in stable pose
        phi : float
            grasp x axis rotation from z axis in stable pose
        """
        T_grasp_obj = self.T_grasp_obj

        if isinstance(stable_pose, StablePose):
            R_stp_obj = stable_pose.r
        else:
            R_stp_obj = stable_pose.rotation
        T_stp_obj = RigidTransform(R_stp_obj, from_frame='obj', to_frame='stp')

        T_stp_grasp = T_stp_obj * T_grasp_obj

        stp_z = np.array([0, 0, 1])
        grasp_axis_angle = np.arccos(stp_z.dot(T_stp_grasp.y_axis))
        grasp_approach_angle = np.arccos(abs(stp_z.dot(T_stp_grasp.x_axis)))
        nu = stp_z.dot(T_stp_grasp.z_axis)

        return grasp_axis_angle, grasp_approach_angle, nu
    def __init__(self,
                 name,
                 ar_marker,
                 R_ar_obj=np.eye(3),
                 t_ar_obj=np.zeros(3)):
        """
        Struct for specifying object templates

        Parameters
        ----------
        name : string
            name of object
        ar_marker : string
            name of ar marker on object template
        R_ar_obj : 3x3 :obj:`numpy.ndarray`
            rotation between AR marker and object
        t_ar_obj : 3x' :obj:`numpy.ndarray`
            translation between AR marker and objectSS
        """
        self.name = name
        self.ar_marker = ar_marker
        self.T_ar_obj = RigidTransform(rotation=R_ar_obj,
                                       translation=t_ar_obj,
                                       from_frame=name,
                                       to_frame=ar_marker)
Example #27
0
def lookup_tag(tag_number):
    """ Returns the AR tag position in world coordinates 

    Parameters
    ----------
    tag_number : int
        AR tag number

    Returns
    -------
    :obj:`autolab_core.RigidTransform` AR tag position in world coordinates
    """
    listener = tf.TransformListener()
    to_frame = 'ar_marker_{}'.format(tag_number)
    from_frame = 'base'
    while not rospy.is_shutdown():
        try:
            # if not listener.frameExists(from_frame) or not listener.frameExists(to_frame):
            #     print 'Frames not found'
            #     print 'Did you place AR marker {} within view of the baxter left hand camera?'.format(tag_number)
            #     exit(0)
            t = listener.getLatestCommonTime(from_frame, to_frame)
            tag_pos, tag_rot = listener.lookupTransform(
                from_frame, to_frame, t)
            break
        except:
            continue
    tag_rot = np.array(tfs.quaternion_matrix(tag_rot)[0:3, 0:3])
    return RigidTransform(tag_rot, tag_pos)
Example #28
0
def contacts_to_baxter_hand_pose(contact1, contact2, approach_direction):
    """ takes the contacts positions in the object frame and returns the hand pose T_obj_gripper
    
    Parameters
    ----------
    contact1 : :obj:`numpy.ndarray`
        position of finger1 in object frame
    contact2 : :obj:`numpy.ndarray`
        position of finger2 in object frame
    approach_direction : :obj:`numpy.ndarray`
        there are multiple grasps that go through contact1 and contact2.  This describes which 
        orientation the hand should be in

    Returns
    -------
    :obj:`autolab_core:RigidTransform` Hand pose in the object frame
    """
    # translation = average of contact points
    translation = (contact1 + contact2) / 2

    # x axis = vector pointing between contact points
    x_axis = contact1 - contact2

    # approach direction = z axis = always vertical... some grasps will fail, that's just how it is
    # Chris recommended hardcoding the approach direction like this
    z_axis = approach_direction

    # find g transform from object frame to grasp frame...
    transform = utils.look_at_general(translation, x_axis, z_axis)
    print(transform)
    T_obj_gripper = RigidTransform(transform[0:3, 0:3], transform[0:3, 3])
    return T_obj_gripper
    def __init__(self,
                 mesh,
                 poses,
                 T_obj_world=RigidTransform(from_frame='obj',
                                            to_frame='world'),
                 material=MaterialProperties(),
                 enabled=True):
        """Initialize a scene object with the given mesh, pose, and material.

        Parameters
        ----------
        mesh : trimesh.Trimesh
            A mesh representing the object's geometry.
        poses : list of autolab_core.RigidTransform
            A set of poses, one for each instance of the scene object,
            relative to the full object's origin.
        T_obj_world : autolab_core.RigidTransform
            A rigid transformation from the object's frame to the world frame.
        material : MaterialProperties
            A set of material properties for the object.
        enabled : bool
            If False, the object will not be rendered.
        """

        super(InstancedSceneObject, self).__init__(mesh, T_obj_world, material,
                                                   enabled)
        self._poses = poses
    def __init__(self,
                 mesh,
                 T_obj_world=RigidTransform(from_frame='obj',
                                            to_frame='world'),
                 material=MaterialProperties(),
                 enabled=True):
        """Initialize a scene object with the given mesh, pose, and material.

        Parameters
        ----------
        mesh : trimesh.Trimesh
            A mesh representing the object's geometry.
        T_obj_world : autolab_core.RigidTransform
            A rigid transformation from the object's frame to the world frame.
        material : MaterialProperties
            A set of material properties for the object.
        enabled : bool
            If False, the object will not be rendered.
        """
        if not isinstance(mesh, Trimesh):
            raise ValueError('mesh must be an object of type Trimesh')
        if not isinstance(material, MaterialProperties):
            raise ValueError(
                'material must be an object of type MaterialProperties')

        if material.smooth:
            mesh = mesh.smoothed()

        self._mesh = mesh
        self._material = material
        self.T_obj_world = T_obj_world
        self._enabled = True