예제 #1
0
    def compute_tissue_pose(self, nw, ne, sw):
        x = -0.01
        y = 0.1
        z = 0.01
        roll = 1.5 # in degrees
        yaw = 2
        pitch = 3.5

        nw_position = np.hstack(np.array(nw.position))
        ne_position = np.hstack(np.array(ne.position))
        sw_position = np.hstack(np.array(sw.position))
        u = sw_position - nw_position
        v = ne_position - nw_position
        self.tissue_length = norm(u)
        self.tissue_width = norm(v)
        u /= self.tissue_length
        v /= self.tissue_width
        origin = nw_position  # origin of palpation board is the NW corner
        w = np.cross(u, v)
        u = np.cross(v, w)  # to ensure that all axes are orthogonal
        rotation_matrix = np.array([u,v,w]).transpose()
        # offset = np.dot(rotation_matrix, np.array([-0.009, 0.100, 0.01]))
        pose = tfx.pose(origin, rotation_matrix, frame=nw.frame)
        pose = pose.as_tf()*tfx.transform([x,y,z])*tfx.transform(tfx.rotation_tb(0, 0, roll))*tfx.transform(tfx.rotation_tb(0, pitch, 0))*tfx.transform(tfx.rotation_tb(yaw, 0, 0))
        self.tissue_pose = pose
        self.tissue_width = 0.05
        self.tissue_length = 0.025
        return pose
 def broadcast(self):
     f = open(
         "/home/davinci2/catkin_ws/src/davinci_vision/launch/BC_registration/camera_frame.p",
         "rb")
     self.camera_transform = pickle.load(f)
     f.close()
     #SOMETIMES NEED TO INVERT X & Y AXES; just change from 1 to -1 and vice versa
     offset = tfx.transform([SKETCH_OFFSET, 0, CAP_OFFSET],
                            [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
     self.camera_transform = tfx.transform(
         self.camera_transform).as_transform() * offset
     transform = tfx.inverse_tf(self.camera_transform)
     pt = transform.translation
     rot = transform.rotation
     msg = TransformStamped()
     msg.header.stamp = rospy.Time.now()
     msg.transform.rotation.x = rot.x
     msg.transform.rotation.y = rot.y
     msg.transform.rotation.z = rot.z
     msg.transform.rotation.w = rot.w
     msg.child_frame_id = "left_BC"
     msg.transform.translation.x = pt.x
     msg.transform.translation.y = pt.y
     msg.transform.translation.z = pt.z
     msg.header.frame_id = "registration_brick_right"
     msg.header.stamp = rospy.Time.now()
     print('boo')
     while not rospy.is_shutdown():
         msg.header.stamp = rospy.Time.now()
         self.pub.publish([msg])
         rospy.sleep(0.1)
예제 #3
0
    def gripper_pose(self, R_gripper_center = np.eye(3), t_gripper_center = PR2_GRASP_OFFSET):
        """
        Convert a grasp to a gripper pose in SE(3) using PR2 gripper_l_tool_frame as default
        Params:
           R_gripper_center: (numpy 3x3 array) rotation matrix from grasp basis to gripper basis
           t_gripper_center: (numpy 3 array) translation from grasp basis to gripper basis
        Returns:
           pose_gripper: (tfx transform) pose of gripper in the grasp frame
        """
        # convert gripper orientation to rotation matrix
        grasp_axis_y = self.axis_
        grasp_axis_x = np.array([grasp_axis_y[1], -grasp_axis_y[0], 0])
        grasp_axis_x = grasp_axis_x / np.linalg.norm(grasp_axis_x)
        grasp_axis_z = np.cross(grasp_axis_x, grasp_axis_y)

        R_center_ref = np.c_[grasp_axis_x, np.c_[grasp_axis_y, grasp_axis_z]]
        pose_center_ref = tfx.transform(R_center_ref, self.center_) # pose of grasp center in its reference frame

        # rotate along grasp approach angle
        R_center_rot_center = np.array([[ np.cos(self.approach_angle_), 0, np.sin(self.approach_angle_)],
                                        [                            0, 1,                            0],
                                        [-np.sin(self.approach_angle_), 0, np.cos(self.approach_angle_)]])
        pose_center_rot_center = tfx.transform(R_center_rot_center, np.zeros(3))
        pose_gripper_center_rot = tfx.transform(R_gripper_center, t_gripper_center)

        pose_gripper_ref = pose_center_ref.apply(pose_center_rot_center).apply(pose_gripper_center_rot)
        return pose_gripper_ref

        """
 def broadcast(self):
     f = open("/home/davinci2/catkin_ws/src/davinci_vision/launch/BC_registration/camera_frame.p", "rb")
     self.camera_transform = pickle.load(f)
     f.close()
     #SOMETIMES NEED TO INVERT X & Y AXES; just change from 1 to -1 and vice versa
     offset = tfx.transform([SKETCH_OFFSET, 0, CAP_OFFSET], [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
     self.camera_transform = tfx.transform(self.camera_transform).as_transform() * offset
     transform = tfx.inverse_tf(self.camera_transform)
     pt = transform.translation
     rot = transform.rotation
     msg = TransformStamped()
     msg.header.stamp = rospy.Time.now()
     msg.transform.rotation.x = rot.x
     msg.transform.rotation.y = rot.y
     msg.transform.rotation.z = rot.z
     msg.transform.rotation.w = rot.w
     msg.child_frame_id = "left_BC"   
     msg.transform.translation.x = pt.x
     msg.transform.translation.y = pt.y
     msg.transform.translation.z = pt.z
     msg.header.frame_id = "registration_brick_right"
     msg.header.stamp = rospy.Time.now()
     print('boo')
     while not rospy.is_shutdown():
         msg.header.stamp = rospy.Time.now()
         self.pub.publish([msg])
         rospy.sleep(0.1)
예제 #5
0
    def load_tissue_pose_from_registration_brick_pose(self):
        # tissue pose offsets:
        x = -0.048
        y = 0.07
        z = 0.006
        rotation = 0.0

        tissue_frame = pickle.load(open("registration_brick_pose.p", "rb"))
        tissue_frame = tissue_frame.as_tf()*tfx.transform([x,y,z])*tfx.transform(tfx.rotation_tb(rotation, 0, 0))
        self.tissue_pose = tfx.pose(tissue_frame)
        self.tissue_width = 0.056
        self.tissue_length = 0.028
예제 #6
0
 def __init__(self, arms = ['L','R'], calcPosePostAdjustment=None, adjustmentInterpolation=True, errorCorrectionMode='NONE'):
     self.arms = arms
     self.errorCorrectionMode = errorCorrectionMode
     self.truthPose = {}
     self.calcPose = {}
     self.calcPoseAtTruth = {}
     self.sys_error = {}
     self.sys_error= {}
     self.est_pose_pub = {}
     self.pose_error_pub = {}
     self.estimateFromImage = dict()
     self.estimatedPose = defaultdict(lambda: (None,None))
     if self.errorCorrectionMode == 'SYS':
         trained_data_R = pickle.load(open(os.path.join(roslib.packages.get_pkg_subdir('raven_2_params','data/right_arm'),'right_train_data.pkl')))
         trained_data_L =  pickle.load(open(os.path.join(roslib.packages.get_pkg_subdir('raven_2_params','data/left_arm'),'left_train_data.pkl')))
         self.sys_error['R'] = tfx.transform(trained_data_R["sys_robot_tf"]['R'])
         self.sys_error['L'] = tfx.transform(trained_data_L["sys_robot_tf"]['L'])
         print self.sys_error['R']
         print self.sys_error['L']
     elif self.errorCorrectionMode == 'PRE_POST':    
         self.pre_adjustment = dict((arm,tfx.identity_tf()) for arm in self.arms)
         self.post_adjustment = dict((arm,tfx.identity_tf()) for arm in self.arms)
         self.adjustment_side = dict((arm,'post') for arm in self.arms)
         self.switch_adjustment_update = True
         self.adjustmentInterpolation = {'pre': 1, 'post': 1}
         if adjustmentInterpolation is True:
             self.adjustmentInterpolation['pre'] = self.adjustmentInterpolation['post'] = 0.5
         elif isinstance(adjustmentInterpolation,dict):
             self.adjustmentInterpolation['pre'] = adjustmentInterpolation.get('pre',0.5)
             self.adjustmentInterpolation['post'] = adjustmentInterpolation.get('post',0.5)
         elif isinstance(adjustmentInterpolation,(list,tuple)):
             self.adjustmentInterpolation['pre'] = adjustmentInterpolation[0]
             self.adjustmentInterpolation['post'] = adjustmentInterpolation[1]
         else:
             self.adjustmentInterpolation['pre'] = self.adjustmentInterpolation['post'] = adjustmentInterpolation
     
         self.calcPosePostAdjustment = dict((arm,tfx.identity_tf()) for arm in self.arms)
         if calcPosePostAdjustment:
             for k,v in calcPosePostAdjustment:
                 self.calcPosePostAdjustment[k] = tfx.transform(v,copy=True)
     
         self.pre_adj_pub = {}
         self.post_adj_pub = {}
         for arm in self.arms:
             rospy.Subscriber(raven_constants.GripperTape.Topic+'_'+arm, PoseStamped, partial(self._truthCallback,arm))
             self.pre_adj_pub[arm] = rospy.Publisher('estimated_gripper_pose_pre_adjument_%s'%arm, TransformStamped)
             self.post_adj_pub[arm] = rospy.Publisher('estimated_gripper_pose_post_adjument_%s'%arm, TransformStamped)
             
     for arm in self.arms:
         self.est_pose_pub[arm] = rospy.Publisher('estimated_gripper_pose_%s'%arm, PoseStamped)
         self.pose_error_pub[arm] = rospy.Publisher('estimated_gripper_pose_error_%s'%arm, PoseStamped)    
     rospy.Subscriber(raven_constants.RavenTopics.RavenState, RavenState, self._ravenStateCallback)
예제 #7
0
    def __init__(self,
                 arm,
                 sim,
                 tool_to_camera=None,
                 height=480.,
                 width=640.,
                 focal_length=.01,
                 fx=525.,
                 fy=525.,
                 cx=319.5,
                 cy=239.5,
                 min_range=.3,
                 max_range=1.5):
        #fx=640.*2., fy=480.*2., cx=640./2.+0.5, cy=480./2.+0.5, max_range=1.5):
        self.arm = arm
        self.sim = sim
        self.tool_to_camera = tool_to_camera if tool_to_camera is not None else tfx.transform(
            wrist_to_hand)

        self.height = height
        self.width = width

        self.fx, self.fy, self.cx, self.cy = fx, fy, cx, cy

        self.P = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])

        self.focal_length = focal_length  # TODO: not sure if needed
        self.min_range = min_range
        self.max_range = max_range
        self.height_m = focal_length * (height / fy)
        self.width_m = focal_length * (width / fx)
예제 #8
0
def test_2d_transform():
    sdf_2d_file_name = 'data/test/sdf/medium_black_spring_clamp_optimized_poisson_texture_mapped_mesh_clean_0.csv'
    sf2 = sf.SdfFile(sdf_2d_file_name)
    sdf_2d = sf2.read()

    # transform
    pose_mat = np.matrix([[0, 1, 0, 0], [-1, 0, 0, 0], [0, 0, 1, 0],
                          [0, 0, 0, 1]])
    tf = tfx.transform(pose_mat, from_frame='world')

    start_t = time.clock()
    sdf_tf = sdf_2d.transform(tf, scale=1.5)
    end_t = time.clock()
    duration = end_t - start_t
    logging.info('2D Transform took %f sec' % (duration))

    # display
    plt.figure()
    plt.subplot(1, 2, 1)
    sdf_2d.imshow()
    plt.title('Original')

    plt.subplot(1, 2, 2)
    sdf_tf.imshow()
    plt.title('Transformed')

    plt.show()
예제 #9
0
파일: sdf.py 프로젝트: brianhou/GPIS
def test_2d_transform():
    sdf_2d_file_name = 'data/test/sdf/medium_black_spring_clamp_optimized_poisson_texture_mapped_mesh_clean_0.csv'
    sf2 = sf.SdfFile(sdf_2d_file_name)
    sdf_2d = sf2.read()

    # transform
    pose_mat = np.matrix([[0, 1, 0, 0],[-1, 0, 0, 0],[0, 0, 1, 0],[0, 0,0,1]])
    tf = tfx.transform(pose_mat, from_frame='world')

    start_t = time.clock()
    sdf_tf = sdf_2d.transform(tf, scale = 1.5)
    end_t = time.clock()
    duration = end_t - start_t
    logging.info('2D Transform took %f sec' %(duration))

    # display
    plt.figure()
    plt.subplot(1,2,1)
    sdf_2d.imshow()
    plt.title('Original')

    plt.subplot(1,2,2)
    sdf_tf.imshow()
    plt.title('Transformed')

    plt.show()
def Interpolate(robotname):
    r = robot.robot(robotname)

    # Open and close gripper 
    print("Starting gripper opening and closing")
    r.open_gripper(80)
    time.sleep(0.1)
    r.close_gripper()

    # Using move_cartesian_linear_interpolate
    # Move to a different cartesian and rotation pose
    start_pose = r.get_current_cartesian_position()

    pose1 = r.get_current_cartesian_position()
    pose1.position.y -= 0.03
    end_pose = tfx.pose(pose1.as_tf()*tfx.transform(tfx.rotation_tb(30, -60, 30)))

    print("Starting move_cartesian_frame_linear_interpolation")
    r.move_cartesian_frame_linear_interpolation(start_pose, 0.01)
    time.sleep(2)
    r.move_cartesian_frame_linear_interpolation(end_pose, 0.01)
    time.sleep(2)
    r.move_cartesian_frame_linear_interpolation(start_pose, 0.01)

    # Using move_cartesian_frame
    print("Starting move_cartesian_frame")
    r.move_cartesian_frame(start_pose, interpolate=True)
    time.sleep(2)
    r.move_cartesian_frame(end_pose, interpolate=True)
    time.sleep(2)
    r.move_cartesian_frame(start_pose, interpolate=True)

    print start_pose
    print end_pose
예제 #11
0
    def tool_poses(self, msg=False, tf=None):
        if 'tool_pose' not in self.arms.columns.levels[1]:
            return None
        poses = {}
        pose_data = self.arms.xs('tool_pose', axis=1, level='field')
        arm_names = pose_data.columns.levels[0]

        if tf:
            tf = tfx.transform(tf, pose=False)

        for arm in arm_names:
            arm_pose_data = pose_data[arm]
            arm_poses = []
            stamps = []
            for i in xrange(len(arm_pose_data)):
                if np.isnan(arm_pose_data.ix[i, 'x']):
                    continue
                translation = arm_pose_data.ix[i, ['x', 'y', 'z']]
                rotation = arm_pose_data.ix[i, ['qx', 'qy', 'qz', 'qw']]

                if msg:
                    pose = PoseStamped()
                    pose.header.frame_id = frame = self.robot.ix[i, 'frame']
                    pose.header.stamp = tfx.stamp(arm_pose_data.index[i]).ros
                    pose.pose.position.x = translation[0]
                    pose.pose.position.y = translation[1]
                    pose.pose.position.z = translation[2]
                    pose.pose.orientation.x = rotation[0]
                    pose.pose.orientation.y = rotation[1]
                    pose.pose.orientation.z = rotation[2]
                    pose.pose.orientation.w = rotation[3]
                else:
                    pose = tfx.pose(translation,
                                    rotation,
                                    stamp=arm_pose_data.index[i],
                                    frame=self.robot.ix[i, 'frame'],
                                    name=arm)
                    if tf:
                        pose = tf * pose
                arm_poses.append(pose)
                stamps.append(arm_pose_data.index[i])
            if not msg:
                arm_poses = pd.Series(arm_poses, index=stamps, name=arm)
            poses[arm] = arm_poses
        if not msg:
            return pd.DataFrame(poses)
        else:
            return poses
예제 #12
0
    def write_mesh_stable_poses(self, mesh, filename, min_prob=0, vis=False):
        prob_mapping, cv_hull = st.compute_stable_poses(mesh), mesh.convex_hull()

        R_list = []
        for face, p in prob_mapping.items():
            if p >= min_prob:
                x0 = np.array(cv_hull.vertices()[face[0]])
                R_list.append([p, st.compute_basis([cv_hull.vertices()[i] for i in face], cv_hull), x0])

        if vis:
            print 'P', R_list[0][0]
            mv.figure()
            mesh.visualize()
            mv.axes()

            mv.figure()
            cv_hull_tf = cv_hull.transform(stf.SimilarityTransform3D(tfx.transform(R_list[0][1], np.zeros(3))))
            cv_hull_tf.visualize()
            mv.axes()
            mv.show()

        f = open(filename[:-4] + ".stp", "w")
        f.write("#############################################################\n")
        f.write("# STP file generated by UC Berkeley Automation Sciences Lab #\n")
        f.write("#                                                           #\n")
        f.write("# Num Poses: %d" %len(R_list))
        for _ in range(46 - len(str(len(R_list)))):
            f.write(" ")
        f.write(" #\n")
        f.write("# Min Probability: %s" %str(min_prob))
        for _ in range(40 - len(str(min_prob))):
            f.write(" ")
        f.write(" #\n")
        f.write("#                                                           #\n")
        f.write("#############################################################\n")
        f.write("\n")

        # adding R matrices to .stp file
        pose_index = 1
        for i in range(len(R_list)):
            f.write("p %f\n" %R_list[i][0])
            f.write("r %f %f %f\n" %(R_list[i][1][0][0], R_list[i][1][0][1], R_list[i][1][0][2]))
            f.write("  %f %f %f\n" %(R_list[i][1][1][0], R_list[i][1][1][1], R_list[i][1][1][2]))
            f.write("  %f %f %f\n" %(R_list[i][1][2][0], R_list[i][1][2][1], R_list[i][1][2][2]))
            f.write("x0 %f %f %f\n" %(R_list[i][2][0], R_list[i][2][1], R_list[i][2][2]))
        f.write("\n\n")
예제 #13
0
	def tool_poses(self,msg=False,tf=None):
		if 'tool_pose' not in self.arms.columns.levels[1]:
			return None
		poses = {}
		pose_data = self.arms.xs('tool_pose',axis=1,level='field')
		arm_names = pose_data.columns.levels[0]
		
		if tf:
			tf = tfx.transform(tf,pose=False)
		
		for arm in arm_names:
			arm_pose_data = pose_data[arm]
			arm_poses = []
			stamps = []
			for i in xrange(len(arm_pose_data)):
				if np.isnan(arm_pose_data.ix[i,'x']):
					continue
				translation = arm_pose_data.ix[i,['x','y','z']]
				rotation = arm_pose_data.ix[i,['qx','qy','qz','qw']]
				
				if msg:
					pose = PoseStamped()
					pose.header.frame_id = frame=self.robot.ix[i,'frame']
					pose.header.stamp = tfx.stamp(arm_pose_data.index[i]).ros
					pose.pose.position.x = translation[0]
					pose.pose.position.y = translation[1]
					pose.pose.position.z = translation[2]
					pose.pose.orientation.x = rotation[0]
					pose.pose.orientation.y = rotation[1]
					pose.pose.orientation.z = rotation[2]
					pose.pose.orientation.w = rotation[3]
				else:
					pose = tfx.pose(translation,rotation,stamp=arm_pose_data.index[i],frame=self.robot.ix[i,'frame'],name=arm)
					if tf:
						pose = tf * pose
				arm_poses.append(pose)
				stamps.append(arm_pose_data.index[i])
			if not msg:
				arm_poses = pd.Series(arm_poses,index=stamps,name=arm)
			poses[arm] = arm_poses
		if not msg:
			return pd.DataFrame(poses)
		else:
			return poses
 def __init__(self, arms = ['L','R'], calcPosePostAdjustment=None, adjustmentInterpolation=True):
     self.arms = arms
     
     self.truthPose = {}
     self.calcPose = {}
     self.calcPoseAtTruth = {}
     self.estimatedPose = defaultdict(lambda: (None,None))
     
     self.pre_adjustment = dict((arm,tfx.identity_tf()) for arm in self.arms)
     self.post_adjustment = dict((arm,tfx.identity_tf()) for arm in self.arms)
     self.adjustment_side = dict((arm,'post') for arm in self.arms)
     self.switch_adjustment_update = True
     self.adjustmentInterpolation = {'pre': 1, 'post': 1}
     if adjustmentInterpolation is True:
         self.adjustmentInterpolation['pre'] = self.adjustmentInterpolation['post'] = 0.5
     elif isinstance(adjustmentInterpolation,dict):
         self.adjustmentInterpolation['pre'] = adjustmentInterpolation.get('pre',0.5)
         self.adjustmentInterpolation['post'] = adjustmentInterpolation.get('post',0.5)
     elif isinstance(adjustmentInterpolation,(list,tuple)):
         self.adjustmentInterpolation['pre'] = adjustmentInterpolation[0]
         self.adjustmentInterpolation['post'] = adjustmentInterpolation[1]
     else:
         self.adjustmentInterpolation['pre'] = self.adjustmentInterpolation['post'] = adjustmentInterpolation
     
     self.estimateFromImage = dict()
     
     self.calcPosePostAdjustment = dict((arm,tfx.identity_tf()) for arm in self.arms)
     if calcPosePostAdjustment:
         for k,v in calcPosePostAdjustment:
             self.calcPosePostAdjustment[k] = tfx.transform(v,copy=True)
     
     self.est_pose_pub = {}
     self.pose_error_pub = {}
     self.pre_adj_pub = {}
     self.post_adj_pub = {}
     for arm in self.arms:
         rospy.Subscriber(Constants.GripperTape.Topic+'_'+arm, PoseStamped, partial(self._truthCallback,arm))
         self.est_pose_pub[arm] = rospy.Publisher('estimated_gripper_pose_%s'%arm, PoseStamped)
         self.pose_error_pub[arm] = rospy.Publisher('estimated_gripper_pose_error_%s'%arm, PoseStamped)
         self.pre_adj_pub[arm] = rospy.Publisher('estimated_gripper_pose_pre_adjument_%s'%arm, TransformStamped)
         self.post_adj_pub[arm] = rospy.Publisher('estimated_gripper_pose_post_adjument_%s'%arm, TransformStamped)
         
     rospy.Subscriber(Constants.RavenTopics.RavenState, RavenState, self._ravenStateCallback)
예제 #15
0
파일: stp_file.py 프로젝트: brianhou/GPIS
    def write_mesh_stable_poses(self, mesh, filename, min_prob=0, vis=False):
        prob_mapping, cv_hull = st.compute_stable_poses(mesh), mesh.convex_hull()
        R_list = []
        for face, p in prob_mapping.items():
            if p >= min_prob:
                R_list.append([p, st.compute_basis([cv_hull.vertices()[i] for i in face])])

        if vis:
            print 'P', R_list[0][0]
            mv.figure()
            mesh.visualize()
            mv.axes()

            mv.figure()
            cv_hull_tf = cv_hull.transform(stf.SimilarityTransform3D(tfx.transform(R_list[0][1], np.zeros(3))))
            cv_hull_tf.visualize()
            mv.axes()
            mv.show()

        f = open(filename[:-4] + ".stp", "w")
        f.write("#############################################################\n")
        f.write("# STP file generated by UC Berkeley Automation Sciences Lab #\n")
        f.write("#                                                           #\n")
        f.write("# Num Poses: %d" %len(R_list))
        for _ in range(46 - len(str(len(R_list)))):
            f.write(" ")
        f.write(" #\n")
        f.write("# Min Probability: %s" %str(min_prob))
        for _ in range(40 - len(str(min_prob))):
            f.write(" ")
        f.write(" #\n")
        f.write("#                                                           #\n")
        f.write("#############################################################\n")
        f.write("\n")

        # adding R matrices to .stp file
        pose_index = 1
        for i in range(len(R_list)):
            f.write("p %f\n" %R_list[i][0])
            f.write("r %f %f %f\n" %(R_list[i][1][0][0], R_list[i][1][0][1], R_list[i][1][0][2]))
            f.write("  %f %f %f\n" %(R_list[i][1][1][0], R_list[i][1][1][1], R_list[i][1][1][2]))
            f.write("  %f %f %f\n" %(R_list[i][1][2][0], R_list[i][1][2][1], R_list[i][1][2][2]))
        f.write("\n\n")
예제 #16
0
파일: pfc.py 프로젝트: jeffmahler/GPIS
    def sample(self, size=1):
        """ Sample |size| random variables from the model """
        samples = []
        for i in range(size):
            # sample random pose
            xi = self.r_xi_rv_.rvs(size=1)
            S_xi = skew(xi)
            R = scipy.linalg.expm(S_xi).dot(self.obj_.tf.rotation)
            s = self.s_rv_.rvs(size=1)[0]
            t = self.t_rv_.rvs(size=1)
            sample_tf = stf.SimilarityTransform3D(tfx.transform(R.T, t), s)

            # transform object by pose
            obj_sample = self.obj_.transform(sample_tf)
            samples.append(obj_sample)

        # not a list if only 1 sample
        if size == 1:
            return samples[0]
        return samples
예제 #17
0
파일: pfc.py 프로젝트: puneetp/GPIS
    def sample(self, size=1):
        """ Sample |size| random variables from the model """
        samples = []
        for i in range(size):
            # sample random pose
            xi = self.r_xi_rv_.rvs(size=1)
            S_xi = skew(xi)
            R = scipy.linalg.expm(S_xi).dot(self.obj_.tf.rotation)
            s = self.s_rv_.rvs(size=1)[0]
            t = self.t_rv_.rvs(size=1)
            sample_tf = stf.SimilarityTransform3D(tfx.transform(R.T, t), s)

            # transform object by pose
            obj_sample = self.obj_.transform(sample_tf)
            samples.append(obj_sample)

        # not a list if only 1 sample
        if size == 1:
            return samples[0]
        return samples
예제 #18
0
def test_3d_transform():
    np.random.seed(100)
    sdf_3d_file_name = 'data/test/sdf/Co_clean_dim_25.sdf'
    s = sf.SdfFile(sdf_3d_file_name)
    sdf_3d = s.read()

    # transform
    pose_mat = np.matrix([[0, 1, 0, 0], [-1, 0, 0, 0], [0, 0, 1, 0],
                          [0, 0, 0, 1]])
    tf = tfx.transform(pose_mat, from_frame='world')
    tf = tfx.random_tf()
    tf.position = 0.01 * np.random.rand(3)

    start_t = time.clock()
    s_tf = stf.SimilarityTransform3D(tf, scale=1.2)
    sdf_tf = sdf_3d.transform(s_tf)
    end_t = time.clock()
    duration = end_t - start_t
    logging.info('3D Transform took %f sec' % (duration))
    logging.info('Transformed resolution %f' % (sdf_tf.resolution))

    start_t = time.clock()
    sdf_tf_d = sdf_3d.transform(s_tf, detailed=True)
    end_t = time.clock()
    duration = end_t - start_t
    logging.info('Detailed 3D Transform took %f sec' % (duration))
    logging.info('Transformed detailed resolution %f' % (sdf_tf_d.resolution))

    # display
    plt.figure()
    sdf_3d.scatter()
    plt.title('Original')

    plt.figure()
    sdf_tf.scatter()
    plt.title('Transformed')

    plt.figure()
    sdf_tf_d.scatter()
    plt.title('Detailed Transformed')
    plt.show()
예제 #19
0
파일: sdf.py 프로젝트: brianhou/GPIS
def test_3d_transform():
    np.random.seed(100)
    sdf_3d_file_name = 'data/test/sdf/Co_clean_dim_25.sdf'
    s = sf.SdfFile(sdf_3d_file_name)
    sdf_3d = s.read()

    # transform
    pose_mat = np.matrix([[0, 1, 0, 0],[-1, 0, 0, 0],[0, 0, 1, 0],[0, 0,0,1]])
    tf = tfx.transform(pose_mat, from_frame='world')
    tf = tfx.random_tf()
    tf.position = 0.01 * np.random.rand(3)

    start_t = time.clock()
    s_tf = stf.SimilarityTransform3D(tf, scale = 1.2)
    sdf_tf = sdf_3d.transform(s_tf)
    end_t = time.clock()
    duration = end_t - start_t
    logging.info('3D Transform took %f sec' %(duration))
    logging.info('Transformed resolution %f' %(sdf_tf.resolution))

    start_t = time.clock()
    sdf_tf_d = sdf_3d.transform(s_tf, detailed = True)
    end_t = time.clock()
    duration = end_t - start_t
    logging.info('Detailed 3D Transform took %f sec' %(duration))
    logging.info('Transformed detailed resolution %f' %(sdf_tf_d.resolution))

    # display
    plt.figure()
    sdf_3d.scatter()
    plt.title('Original')

    plt.figure()
    sdf_tf.scatter()
    plt.title('Transformed')

    plt.figure()
    sdf_tf_d.scatter()
    plt.title('Detailed Transformed')
    plt.show()
예제 #20
0
파일: camera.py 프로젝트: gkahn13/pr2
 def __init__(self, arm, sim, tool_to_camera=None,
              height=480., width=640., focal_length=.01,
              fx=525., fy=525., cx=319.5, cy=239.5, min_range=.3, max_range=1.5):
              #fx=640.*2., fy=480.*2., cx=640./2.+0.5, cy=480./2.+0.5, max_range=1.5):
     self.arm = arm
     self.sim = sim
     self.tool_to_camera = tool_to_camera if tool_to_camera is not None else tfx.transform(wrist_to_hand)
     
     self.height = height
     self.width = width
     
     self.fx, self.fy, self.cx, self.cy = fx, fy, cx, cy
     
     self.P = np.array([[fx,  0, cx],
                        [ 0, fy, cy],
                        [ 0,  0,  1]])
     
     self.focal_length = focal_length # TODO: not sure if needed
     self.min_range = min_range
     self.max_range = max_range
     self.height_m = focal_length*(height/fy)
     self.width_m = focal_length*(width/fx)
예제 #21
0
roslib.load_manifest('tfx')
import tfx

import numpy as np
from matplotlib import pyplot as plt
from matplotlib import delaunay

import time
import random

from geometry import geometry2d, geometry3d
from pr2_sim import simulator, arm
from utils import utils

wrist_to_hand = tfx.transform(
    (-0.111015481946, -0.0160376173344, -0.0735991062825),
    (0.531327739419, 0.463547417, 0.524778479206, 0.476888009158))


class RelativePyramid:
    def __init__(self, points, point_frames):
        assert (len(points) == 3 and len(point_frames) == 3)
        self.points = points
        self.point_frames = point_frames

    def construct_pyramid(self, cam):
        cam_pose = np.array(cam.get_pose().matrix)
        cam_rot, cam_trans = cam_pose[:3, :3], cam_pose[:3, 3]

        abs_points3d = list()
        for pt, frame in zip(self.points, self.point_frames):
예제 #22
0
def kinect_transform_publisher():
    global head_pose, hand_pose
    head_pose = None
    hand_pose = None
    dummy = tfx.transform([0, 0, 0])
    #pub = rospy.Publisher('kinect_transform', )
    pub = tfx.TransformBroadcaster()
    rospy.init_node('kinect_transform_publisher', anonymous=True)

    rospy.Subscriber("/hand_kinect_ar_kinect_pose", ARMarkers, hand_callback)
    rospy.Subscriber("/head_kinect_ar_kinect_pose", ARMarkers, head_callback)
    transformer = tfx.TransformListener()
    r = rospy.Rate(5)

    #head_pose = True # REMOVE THIS
    #head_pose = False # REMOVE THIS
    i = 0
    transforms = []
    while not rospy.is_shutdown() and i < 500:
        if head_pose and hand_pose:
            head_transform = tfx.transform(head_pose,
                                           parent='camera_rgb_optical_frame',
                                           child='ar_frame')  #.as_transform()
            hand_transform = tfx.transform(hand_pose,
                                           parent='hand_kinect_optical_frame',
                                           child='ar_frame')  #.as_transform()
            #head_to_ar = tfx.transform(transformer.lookupTransform('ar_frame', 'camera_rgb_optical_frame', rospy.Time()), parent='camera_rgb_optical_frame', child='ar_frame')
            #ar_to_hand = tfx.transform(transformer.lookupTransform('hand_kinect_optical_frame', 'ar_frame', rospy.Time()), parent='ar_frame', child='hand_kinect_optical_frame')
            head_to_hand = tfx.transform(head_transform.matrix *
                                         hand_transform.inverse().matrix,
                                         parent='camera_rgb_optical_frame',
                                         child='hand_kinect_optical_frame')
            #head_to_hand = tfx.transform(head_to_ar.inverse().matrix * ar_to_hand.matrix, parent='camera_rgb_optical_frame', child='hand_kinect_optical_frame')
            #rospy.loginfo(head_transform)
            #rospy.loginfo(hand_transform.inverse())
            #rospy.loginfo(head_to_ar)
            #rospy.loginfo(ar_to_hand)
            #print head_to_hand
            wrist_to_head = tfx.lookupTransform('r_gripper_tool_frame',
                                                'camera_rgb_optical_frame')
            #wrist_to_head = tfx.transform(transformer.lookupTransform('r_gripper_tool_frame', 'camera_rgb_optical_frame', rospy.Time()), child='camera_rgb_optical_frame', parent='r_gripper_tool_frame')
            wrist_to_hand = tfx.transform(wrist_to_head.matrix *
                                          head_to_hand.matrix,
                                          parent='r_gripper_tool_frame',
                                          child='hand_kinect_optical_frame')
            #print wrist_to_head
            print wrist_to_hand
            print
            #rospy.loginfo(wrist_to_head)
            #rospy.loginfo(wrist_to_hand)
            #pub.sendTransform(wrist_to_hand.position, wrist_to_hand.rotation, rospy.Time(), wrist_to_hand.child, wrist_to_hand.parent)
            #pub.sendTransform(head_to_hand.position, head_to_hand.rotation, rospy.Time(), head_to_hand.child, head_to_hand.parent)
            transforms.append(wrist_to_hand)
            i += 1
        else:
            if head_pose is None:
                print('Head pose is None')
            if hand_pose is None:
                print('Hand pose is None')
            print('')
        r.sleep()
    transform = transforms[0]
    for i in range(1, len(transforms)):
        transform = transform.interpolate(transforms[i], 1 / len(transforms))
    transform.child = "camera_rgb_optical_frame"
    print transform
    rospack = rospkg.RosPack()
    os.chdir(rospack.get_path("kinect_transform"))
    s = '<launch>\n'
    s += '<node pkg="tf" type="static_transform_publisher" name="static3" args="'
    for value in transform.position.list + transform.quaternion.list:
        s += str(value) + ' '
    s += transform.parent + ' ' + transform.child + ' '
    s += '100" />\n'
    s += '</launch>'
    try:
        f = open("launch/wrist_to_hand.launch", "w")
        f.write(s)
    finally:
        f.close()
예제 #23
0
def kinect_transform_publisher():
    global head_pose, hand_pose
    head_pose = None
    hand_pose = None
    dummy = tfx.transform([0,0,0])
    #pub = rospy.Publisher('kinect_transform', )
    pub = tfx.TransformBroadcaster()
    rospy.init_node('kinect_transform_publisher', anonymous=True)

    rospy.Subscriber("/hand_kinect_ar_kinect_pose", ARMarkers, hand_callback)
    rospy.Subscriber("/head_kinect_ar_kinect_pose", ARMarkers, head_callback)
    transformer = tfx.TransformListener()
    r = rospy.Rate(5)

    #head_pose = True # REMOVE THIS
    #head_pose = False # REMOVE THIS
    i = 0
    transforms = []
    while not rospy.is_shutdown() and i < 500:
        if head_pose and hand_pose:
            head_transform = tfx.transform(head_pose, parent='camera_rgb_optical_frame', child='ar_frame')#.as_transform()
            hand_transform = tfx.transform(hand_pose, parent='hand_kinect_optical_frame', child='ar_frame')#.as_transform()
            #head_to_ar = tfx.transform(transformer.lookupTransform('ar_frame', 'camera_rgb_optical_frame', rospy.Time()), parent='camera_rgb_optical_frame', child='ar_frame')
            #ar_to_hand = tfx.transform(transformer.lookupTransform('hand_kinect_optical_frame', 'ar_frame', rospy.Time()), parent='ar_frame', child='hand_kinect_optical_frame')
            head_to_hand = tfx.transform(head_transform.matrix * hand_transform.inverse().matrix, parent='camera_rgb_optical_frame', child='hand_kinect_optical_frame')
            #head_to_hand = tfx.transform(head_to_ar.inverse().matrix * ar_to_hand.matrix, parent='camera_rgb_optical_frame', child='hand_kinect_optical_frame')
            #rospy.loginfo(head_transform)
            #rospy.loginfo(hand_transform.inverse())
            #rospy.loginfo(head_to_ar)
            #rospy.loginfo(ar_to_hand)
            #print head_to_hand
            wrist_to_head = tfx.lookupTransform('r_gripper_tool_frame', 'camera_rgb_optical_frame')
            #wrist_to_head = tfx.transform(transformer.lookupTransform('r_gripper_tool_frame', 'camera_rgb_optical_frame', rospy.Time()), child='camera_rgb_optical_frame', parent='r_gripper_tool_frame')
            wrist_to_hand = tfx.transform(wrist_to_head.matrix * head_to_hand.matrix, parent='r_gripper_tool_frame', child='hand_kinect_optical_frame')
            #print wrist_to_head
            print wrist_to_hand
            print
            #rospy.loginfo(wrist_to_head)
            #rospy.loginfo(wrist_to_hand)
            #pub.sendTransform(wrist_to_hand.position, wrist_to_hand.rotation, rospy.Time(), wrist_to_hand.child, wrist_to_hand.parent)
            #pub.sendTransform(head_to_hand.position, head_to_hand.rotation, rospy.Time(), head_to_hand.child, head_to_hand.parent)
            transforms.append(wrist_to_hand)
            i += 1
        else:
            if head_pose is None:
                print('Head pose is None')
            if hand_pose is None:
                print('Hand pose is None')
            print('')
        r.sleep()
    transform = transforms[0]
    for i in range(1, len(transforms)):
        transform = transform.interpolate(transforms[i], 1 / len(transforms))
    transform.child = "camera_rgb_optical_frame"
    print transform
    rospack = rospkg.RosPack()
    os.chdir(rospack.get_path("kinect_transform"))
    s = '<launch>\n'
    s += '<node pkg="tf" type="static_transform_publisher" name="static3" args="'
    for value in transform.position.list + transform.quaternion.list:
        s += str(value) + ' '
    s += transform.parent + ' ' + transform.child + ' '
    s += '100" />\n'
    s += '</launch>'
    try:
        f = open("launch/wrist_to_hand.launch", "w")
        f.write(s)
    finally:
        f.close()
예제 #24
0
파일: camera.py 프로젝트: gkahn13/pr2
import roslib
roslib.load_manifest('tfx')
import tfx

import numpy as np
from matplotlib import pyplot as plt
from matplotlib import delaunay

import time
import random

from geometry import geometry2d, geometry3d
from pr2_sim import simulator, arm
from utils import utils

wrist_to_hand = tfx.transform((-0.111015481946, -0.0160376173344, -0.0735991062825),
                              (0.531327739419, 0.463547417, 0.524778479206, 0.476888009158))

class RelativePyramid:
    def __init__(self, points, point_frames):
        assert(len(points) == 3 and len(point_frames) == 3)
        self.points = points
        self.point_frames = point_frames
        
    def construct_pyramid(self, cam):
        cam_pose = np.array(cam.get_pose().matrix)
        cam_rot, cam_trans = cam_pose[:3,:3], cam_pose[:3,3]
        
        abs_points3d = list()
        for pt, frame in zip(self.points, self.point_frames):
            assert(frame.count('base_link') > 0 or frame.count('camera') > 0)
            
예제 #25
0
def actual_world_to_ik_world(armId):
    if armId == GOLD_ARM_ID:
        return tfx.transform([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])
    else:
        return tfx.transform([[0, -1, 0], [1, 0, 0], [0, 0, 1]],
                             (-0.149, 0.005, -0.006))
예제 #26
0
GRASP1 = 6
GRASP2 = 7
YAW = 8
GRASP = 9

A12 = 1.30899694
#Link1 - 75deg in RAD
A23 = 0.907571211  #Link2 - 52deg in RAD - was set to 60

GOLD_ARM_ID = 'L'
GREEN_ARM_ID = 'R'

RAD2DEG = 180. / pi
DEG2RAD = pi / 180.

TOOL_POSE_AXES_TRANSFORM = tfx.transform((1, 0, 0, 0, -1, 0, 0, 0, -1))

SHOULDER_OFFSET_GOLD = atan(0.3471 / 0.9014)  #from original URDF
TOOL_ROT_OFFSET_GOLD = -pi / 2
WRIST_OFFSET_GOLD = 0

SHOULDER_OFFSET_GREEN = atan(0.3471 / 0.9014)  #from original URDF
TOOL_ROT_OFFSET_GREEN = -pi / 2
WRIST_OFFSET_GREEN = 0

THETA_12 = -A12
THETA_23 = -A23
DW = 0.012

Z = lambda theta, d: tfx.transform([[cos(float(theta)), -sin(float(
    theta)), 0], [sin(theta), cos(float(theta)), 0], [0, 0, 1]], (0, 0, d))
예제 #27
0
    def listener(self):
        rospy.init_node('robo_registration', anonymous=True)
        # dvrk_psm1/joint_position_cartesian is right arm
        pose_topic = ""
        if self.arm == "right":
            pose_topic = "dvrk_psm1/joint_position_cartesian"
        else:
            pose_topic = "dvrk_psm2/joint_position_cartesian"
        rospy.Subscriber(pose_topic, PoseStamped, self.record_pose_callback)   
        rospy.Subscriber("/BC/chessboard_pose", PoseStamped, self.get_transform_callback)

        

        save_camera_transform = raw_input("Do you want to save a new camera transform? (yes/no) ")

        if save_camera_transform == "yes":
            while not self.have_camera_transform and not rospy.is_shutdown():
                print("Waiting for camera transform")
                rospy.sleep(0.5)
            print("Saving camera transform in file")
            f = open("/home/davinci2/catkin_ws/src/davinci_vision/launch/BC_registration/camera_frame.p", "wb")
            pickle.dump(self.camera_transform, f)
            f.close()
        else:
            f = open("/home/davinci2/catkin_ws/src/davinci_vision/launch/BC_registration/camera_frame.p", "rb")
            self.camera_transform = pickle.load(f)
            f.close()

        raw_input("Remove chessboard; press any key when done")
        cols = int(raw_input("Enter number of columns on registration block: "))
        rows = int(raw_input("Enter number of rows on registration block: "))
        dimensions = [cols, rows]
        #will have to do both grippers
        print("Please start at top left corner and go row by row, moving from left to right. Use " + self.arm + \
            " gripper")
        print("First coordinate is x, second is y; (0, 0) is top left corner")
        # cols_even = cols % 2 == 0
        # rows_even = rows % 2 == 0
        
        # wrist_poses = []

        for j in range(rows):
            for i in range(cols):
                command = raw_input("Press r to record current pose (cell " + str(i) + ", " + str(j) + "): ")
                if command == "r":
                    self.recording_pose = True
                    print("Pose recorded!")
                    # wrist_poses.append(tfx.lookupTransform("two_tool_wrist_sca_shaft_link", "two_remote_center_link").msg.PoseStamped())
                

                # b = tfx.pose(transform)
                # b.name = None
                # b.as_tf() * self.pose_data[0]
                
        #LOLMAGICNUMBER        
        offset = tfx.transform([0, 0, -0.00483], [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
        trans_data = [tfx.pose(tfx.pose(pose).as_tf() * offset).msg.PoseStamped() for pose in self.pose_data]
        self.pose_data = trans_data
        # while not self.done_recording:
        #     command = raw_input("Press r to record current pose or q to finish: ")
        #     if command == "r":
        #         self.recording_pose = True
        #         print("Pose recorded!")
        #     elif command == "q":
        #         self.done_recording = True
        
        # b = tfx.pose(transform)
        # b.name = None
        # b = b.as_tf()

        # transformed_pose_data = [b * pose for pose in self.pose_data]

        # transform = tfx.lookupTransform("two_tool_base_link", "two_tool_middle_link")
        # b = tfx.pose(transform)
        # b.name = None
        # b = b.as_tf()

        # transformed_pose_data = [(b * pose) for pose in self.pose_data]

        f = open('pose_data_' + self.arm + '.p', 'wb')
        pickle.dump([dimensions, self.pose_data], f)
        f.close()
        # f = open('pose_data_wrist_' + self.arm + '.p', 'wb')
        # pickle.dump([dimensions, wrist_poses], f)
        # f.close()
        
        self.calculate(dimensions)
        # self.pose_data = wrist_poses
        # self.calculate(dimensions)

        rospy.spin()
예제 #28
0
    def execute_raster_tilted(self, theta, direction):
        """ Direction should be 1 for L2R and -1 for R2L"""
        """ Linearly interpolates through a series of palpation points """
        
        print('hi')
        steps = 12
        poses = []

        origin = np.hstack(np.array(self.tissue_pose.position))
        frame = np.array(self.tissue_pose.orientation.matrix)

        u, v, w = frame.T[0], frame.T[1], frame.T[2]
        rotation_matrix = np.array([v, u, -w]).transpose()
        

        # frame = tfx.pose(tfx.pose(frame_og).as_tf()*tfx.transform(tfx.tb_angles(roll=10, pitch=0, yaw=0))).rotation.matrix
        # u, v, w = frame.T[0], frame.T[1], frame.T[2]
        # rot1 = np.array([v, u, -w]).transpose()
        # frame = tfx.pose(tfx.pose(frame_og).as_tf()*tfx.transform(tfx.tb_angles(roll=0, pitch=10, yaw=0))).rotation.matrix
        # u, v, w = frame.T[0], frame.T[1], frame.T[2]
        # rot2 = np.array([v, u, -w]).transpose()
        # frame = tfx.pose(tfx.pose(frame_og).as_tf()*tfx.transform(tfx.tb_angles(roll=0, pitch=0, yaw=10))).rotation.matrix
        # u, v, w = frame.T[0], frame.T[1], frame.T[2]
        # rot3 = np.array([v, u, -w]).transpose()

        # import IPython; IPython.embed()
        dy = self.tissue_length/(steps)
        z = self.probe_offset

        print('what"s')
        # pick up tool
        # self.pick_up_tool()
        print('up')

        self.probe_stop_reset()

        pose = tfx.pose(origin, rotation_matrix, frame=self.tissue_pose.frame)
        # # pose1 = tfx.pose(pose.as_tf()*tfx.transform(tfx.tb_angles(roll=15, pitch=0, yaw=0))) #apparently this tilts head of probe toward base of arms. wait no apparently this tilts head of probe toward monitors? (to the right) this is probably the direction that the probe constrains rotation in
        rotation_matrix = tfx.pose(pose.as_tf()*tfx.transform(tfx.tb_angles(roll=0, pitch=theta*direction, yaw=0))).rotation.matrix

        RASTER_SPEED = 0.005
        INAIR_SPEED = 0.03
        TEST_OFFSET = 0
        TEST_OFFSET2 = 0.3
        # steps = 6
        for i in range(steps):
            if i == 0:
                continue
            # offset = np.dot(frame, np.array([i*dy, 0.0, z+0.03]))
            # pose = tfx.pose(origin+offset, rotation_matrix, frame=self.tissue_pose.frame)
            # # # pose1 = tfx.pose(pose.as_tf()*tfx.transform(tfx.tb_angles(roll=15, pitch=0, yaw=0))) #apparently this tilts head of probe toward base of arms. wait no apparently this tilts head of probe toward monitors? (to the right) this is probably the direction that the probe constrains rotation in
            # rotation_matrix = tfx.pose(pose.as_tf()*tfx.transform(tfx.tb_angles(roll=0, pitch=theta*direction, yaw=0))).rotation.matrix #tilted even more toward monitors? what? also a bit towards base. why are roll & pitch the same o.o joint limits? moves more than pose1. 
            # should be done before for loop? lol
            # pose1 = tfx.pose(pose.as_tf()*tfx.transform(tfx.tb_angles(roll=10, pitch=0, yaw=0))) #away from arms and to the left
            # pose2 = tfx.pose(pose.as_tf()*tfx.transform(tfx.tb_angles(roll=0, pitch=10, yaw=0))) #away from arms and to the left
            # pose3 = tfx.pose(pose.as_tf()*tfx.transform(tfx.tb_angles(roll=0, pitch=0, yaw=10))) #away from arms and to the left

            # pose4 = tfx.pose(origin+offset, rot1) #so apparently these don't work
            # pose5 = tfx.pose(origin+offset, rot2)
            # pose6 = tfx.pose(origin+offset, rot3)
            # print('og pose')
            # self.psm1.move_cartesian_frame_linear_interpolation(pose, SPEED, False)
            # import IPython; IPython.embed()
            # print('pose1')
            # self.psm1.move_cartesian_frame_linear_interpolation(pose1, SPEED, False)
            # import IPython; IPython.embed()
            # print('pose2')
            # self.psm1.move_cartesian_frame_linear_interpolation(pose2, SPEED, False)
            # import IPython; IPython.embed()
            # print('pose3')
            # self.psm1.move_cartesian_frame_linear_interpolation(pose3, SPEED, False)
            # import IPython; IPython.embed()
            if direction == 1:
                offset = np.dot(frame, np.array([i*dy, 0.0, z+0.02+TEST_OFFSET]))
                pose = tfx.pose(origin+offset, rotation_matrix, frame=self.tissue_pose.frame)
                self.psm1.move_cartesian_frame_linear_interpolation(pose, INAIR_SPEED, False)
                
                a = str(self.psm1.get_current_joint_position())
                print(a)
                # import IPython; IPython.embed()
                # print('og pose')
                # self.psm1.move_cartesian_frame_linear_interpolation(pose, 0.01, False)
                # import IPython; IPython.embed()
                # print('pose 1')
                # self.psm1.move_cartesian_frame_linear_interpolation(pose1, 0.005, False)
                # IPython.embed()
                # print('pose 2')
                # self.psm1.move_cartesian_frame_linear_interpolation(pose2, 0.005, False)
                # IPython.embed()
                # print('pose 3')
                # self.psm1.move_cartesian_frame_linear_interpolation(pose3, 0.005, False)
                # IPython.embed()
                # print('pose 4')
                # self.psm1.move_cartesian_frame_linear_interpolation(pose4, 0.005, False)
                # IPython.embed()
                # print('pose 5')
                # self.psm1.move_cartesian_frame_linear_interpolation(pose5, 0.005, False)
                # IPython.embed()
                # print('pose 6')
                # self.psm1.move_cartesian_frame_linear_interpolation(pose6, 0.005, False)
                # IPython.embed()


                offset = np.dot(frame, np.array([i*dy, 0.0, z+TEST_OFFSET]))
                pose = tfx.pose(origin+offset, rotation_matrix, frame=self.tissue_pose.frame)
                self.psm1.move_cartesian_frame_linear_interpolation(pose, INAIR_SPEED, False)
                
                b = str(self.psm1.get_current_joint_position())
                print(b)

                # start recording data
                rospy.sleep(0.2)
                self.probe_start()

                offset = np.dot(frame, np.array([i*dy, self.tissue_width*(0.95+TEST_OFFSET2), z+TEST_OFFSET]))
                pose = tfx.pose(origin+offset, rotation_matrix, frame=self.tissue_pose.frame)
                self.psm1.move_cartesian_frame_linear_interpolation(pose, RASTER_SPEED, False)
                
                c = str(self.psm1.get_current_joint_position())
                print(c)

                # pause recording data
                rospy.sleep(0.2)
                self.probe_pause()
                print(self.probe_data)

                offset = np.dot(frame, np.array([i*dy, self.tissue_width*(0.95+TEST_OFFSET2), z+0.02+TEST_OFFSET]))
                pose = tfx.pose(origin+offset, rotation_matrix, frame=self.tissue_pose.frame)
                self.psm1.move_cartesian_frame_linear_interpolation(pose, INAIR_SPEED, False)
                
                d = str(self.psm1.get_current_joint_position())
                print(d)

                # offset = np.dot(frame, np.array([i*dy+0.04, 0, z+0.03]))
                # pose = tfx.pose(origin+offset, rotation_matrix, frame=self.tissue_pose.frame)
                # self.psm1.move_cartesian_frame_linear_interpolation(pose, SPEED, False)
                # e = str(self.psm1.get_current_joint_position())
                # print(e)

                # import IPython; IPython.embed()
            else:
                offset = np.dot(frame, np.array([i*dy, self.tissue_width*0.95, z+0.02]))
                pose = tfx.pose(origin+offset, rotation_matrix, frame=self.tissue_pose.frame)
                self.psm1.move_cartesian_frame_linear_interpolation(pose, INAIR_SPEED, False)

                offset = np.dot(frame, np.array([i*dy, self.tissue_width*0.95, z]))
                pose = tfx.pose(origin+offset, rotation_matrix, frame=self.tissue_pose.frame)
                self.psm1.move_cartesian_frame_linear_interpolation(pose, INAIR_SPEED, False)

                # start recording data
                rospy.sleep(0.2)
                self.probe_start()

                offset = np.dot(frame, np.array([i*dy, 0.0, z]))
                pose = tfx.pose(origin+offset, rotation_matrix, frame=self.tissue_pose.frame)
                self.psm1.move_cartesian_frame_linear_interpolation(pose, RASTER_SPEED, False)

                # pause recording data
                rospy.sleep(0.2)
                self.probe_pause()
                print(self.probe_data)

                offset = np.dot(frame, np.array([i*dy, 0.0, z+0.02]))
                pose = tfx.pose(origin+offset, rotation_matrix, frame=self.tissue_pose.frame)
                self.psm1.move_cartesian_frame_linear_interpolation(pose, INAIR_SPEED, False)

        print(self.probe_data)
        self.probe_save("probe_data_newdvrk_po" + str(self.probe_offset) + "_s" + str(RASTER_SPEED) + "_t" + str(theta) + "_d" + str(direction) + ".p")
예제 #29
0
def actual_world_to_ik_world(armId):
    if armId == GOLD_ARM_ID:
        return tfx.transform([[0,1,0],[ -1,0,0],[ 0,0,1]])
    else:
        return tfx.transform([[0,-1,0],[1,0,0], [0,0,1]],(-0.149,0.005,-0.006))
예제 #30
0
WRIST      =5
GRASP1     =6
GRASP2     =7
YAW        =8
GRASP      =9

A12  = 1.30899694; #Link1 - 75deg in RAD
A23 = 0.907571211  #Link2 - 52deg in RAD - was set to 60

GOLD_ARM_ID = 'L'
GREEN_ARM_ID = 'R'

RAD2DEG = 180. / pi
DEG2RAD = pi / 180.

TOOL_POSE_AXES_TRANSFORM = tfx.transform((1,0,0,  0,-1,0,  0,0,-1));

SHOULDER_OFFSET_GOLD = atan(0.3471/0.9014) #from original URDF
TOOL_ROT_OFFSET_GOLD = -pi/2
WRIST_OFFSET_GOLD = 0

SHOULDER_OFFSET_GREEN = atan(0.3471/0.9014)#from original URDF
TOOL_ROT_OFFSET_GREEN = -pi/2
WRIST_OFFSET_GREEN = 0

THETA_12 = -A12
THETA_23 = -A23
DW = 0.012

Z = lambda theta,d: tfx.transform([[cos(float(theta)),-sin(float(theta)),0], [sin(theta),cos(float(theta)),0], [0,0,1]],(0,0,d))
X = lambda alpha,a: tfx.transform([[1,0,0], [0,cos(float(alpha)),-sin(float(alpha))], [0,sin(float(alpha)),cos(float(alpha))]],(a,0,0))
예제 #31
0
    def listener(self):
        rospy.init_node('robo_registration', anonymous=True)
        # dvrk_psm1/joint_position_cartesian is right arm
        pose_topic = ""
        if self.arm == "right":
            pose_topic = "dvrk_psm1/joint_position_cartesian"
        else:
            pose_topic = "dvrk_psm2/joint_position_cartesian"
        rospy.Subscriber(pose_topic, PoseStamped, self.record_pose_callback)
        rospy.Subscriber("/BC/chessboard_pose", PoseStamped,
                         self.get_transform_callback)

        save_camera_transform = raw_input(
            "Do you want to save a new camera transform? (yes/no) ")

        if save_camera_transform == "yes":
            while not self.have_camera_transform and not rospy.is_shutdown():
                print("Waiting for camera transform")
                rospy.sleep(0.5)
            print("Saving camera transform in file")
            f = open(
                "/home/davinci2/catkin_ws/src/davinci_vision/launch/BC_registration/camera_frame.p",
                "wb")
            pickle.dump(self.camera_transform, f)
            f.close()
        else:
            f = open(
                "/home/davinci2/catkin_ws/src/davinci_vision/launch/BC_registration/camera_frame.p",
                "rb")
            self.camera_transform = pickle.load(f)
            f.close()

        raw_input("Remove chessboard; press any key when done")
        cols = int(
            raw_input("Enter number of columns on registration block: "))
        rows = int(raw_input("Enter number of rows on registration block: "))
        dimensions = [cols, rows]
        #will have to do both grippers
        print("Please start at top left corner and go row by row, moving from left to right. Use " + self.arm + \
            " gripper")
        print("First coordinate is x, second is y; (0, 0) is top left corner")
        # cols_even = cols % 2 == 0
        # rows_even = rows % 2 == 0

        # wrist_poses = []

        for j in range(rows):
            for i in range(cols):
                command = raw_input("Press r to record current pose (cell " +
                                    str(i) + ", " + str(j) + "): ")
                if command == "r":
                    self.recording_pose = True
                    print("Pose recorded!")
                    # wrist_poses.append(tfx.lookupTransform("two_tool_wrist_sca_shaft_link", "two_remote_center_link").msg.PoseStamped())

                # b = tfx.pose(transform)
                # b.name = None
                # b.as_tf() * self.pose_data[0]

        #LOLMAGICNUMBER
        offset = tfx.transform([0, 0, -0.00483],
                               [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
        trans_data = [
            tfx.pose(tfx.pose(pose).as_tf() * offset).msg.PoseStamped()
            for pose in self.pose_data
        ]
        self.pose_data = trans_data
        # while not self.done_recording:
        #     command = raw_input("Press r to record current pose or q to finish: ")
        #     if command == "r":
        #         self.recording_pose = True
        #         print("Pose recorded!")
        #     elif command == "q":
        #         self.done_recording = True

        # b = tfx.pose(transform)
        # b.name = None
        # b = b.as_tf()

        # transformed_pose_data = [b * pose for pose in self.pose_data]

        # transform = tfx.lookupTransform("two_tool_base_link", "two_tool_middle_link")
        # b = tfx.pose(transform)
        # b.name = None
        # b = b.as_tf()

        # transformed_pose_data = [(b * pose) for pose in self.pose_data]

        f = open('pose_data_' + self.arm + '.p', 'wb')
        pickle.dump([dimensions, self.pose_data], f)
        f.close()
        # f = open('pose_data_wrist_' + self.arm + '.p', 'wb')
        # pickle.dump([dimensions, wrist_poses], f)
        # f.close()

        self.calculate(dimensions)
        # self.pose_data = wrist_poses
        # self.calculate(dimensions)

        rospy.spin()