def head_pose_cb(self, msg): pos = [msg.pose.position.x, msg.pose.position.y, msg.pose.position.z] ori = [ msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w ] if self.head_pose == [pos, ori]: now = rospy.Time.now() self.listener.waitForTransform('/base_link', '/torso_lift_link', now, rospy.Duration(20)) (trans, rot) = self.listener.lookupTransform('/base_link', '/torso_lift_link', now) pos_out, ori_out = Bmat_to_pos_quat( createBMatrix(trans, rot) * createBMatrix(pos, ori)) # pos_out, ori_out = pos, ori record_file_gt = open( ''.join([ self.pkg_path, '/data/goals/', self.task, '/', self.task, '_', self.reference, '_pose_', str(self.file_number), '.txt' ]), 'w') record_file_gt.write(''.join([ '[%f, %f, %f, %f, %f, %f, %f]\n' % (pos_out[0], pos_out[1], pos_out[2], ori_out[0], ori_out[1], ori_out[2], ori_out[3]) ])) record_file_gt.close() print 'Just saved file # ', self.file_number, 'for task ', self.task, ' using reference ', self.reference self.file_number += 1 self.head_pose = [pos, ori]
def update_relations(self): pr2_trans = [self.raw_base_pose.pose.position.x, self.raw_base_pose.pose.position.y, self.raw_base_pose.pose.position.z] pr2_rot = [self.raw_base_pose.pose.orientation.x, self.raw_base_pose.pose.orientation.y, self.raw_base_pose.pose.orientation.z, self.raw_base_pose.pose.orientation.w] world_B_pr2 = createBMatrix(pr2_trans, pr2_rot) head_trans = [self.raw_head_pose.pose.position.x, self.raw_head_pose.pose.position.y, self.raw_head_pose.pose.position.z] head_rot = [self.raw_head_pose.pose.orientation.x, self.raw_head_pose.pose.orientation.y, self.raw_head_pose.pose.orientation.z, self.raw_head_pose.pose.orientation.w] world_B_head = createBMatrix(head_trans, head_rot) pr2_B_head = world_B_pr2.I*world_B_head pos, ori = Bmat_to_pos_quat(pr2_B_head) psm = PoseStamped() psm.header.frame_id = '/base_link' psm.pose.position.x = pos[0] psm.pose.position.y = pos[1] psm.pose.position.z = pos[2] psm.pose.orientation.x = ori[0] psm.pose.orientation.y = ori[1] psm.pose.orientation.z = ori[2] psm.pose.orientation.w = ori[3] self.head_pub(psm)
def head_pose_cb(self, msg): pos = [msg.pose.position.x, msg.pose.position.y, msg.pose.position.z] ori = [ msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w ] if self.head_pose == [pos, ori]: now = rospy.Time.now() self.listener.waitForTransform( '/torso_lift_link', '/head_mount_kinect_depth_optical_frame', now, rospy.Duration(20)) (trans_d, rot_d) = self.listener.lookupTransform( '/torso_lift_link', '/head_mount_kinect_depth_optical_frame', now) # now = rospy.Time.now() # self.listener.waitForTransform('/torso_lift_link', '/head_mount_kinect_rgb_optical_frame', now, # rospy.Duration(20)) # (trans_r, rot_r) = self.listener.lookupTransform('/torso_lift_link', # '/head_mount_kinect_rgb_optical_frame', now) pos_out_d, ori_out_d = Bmat_to_pos_quat( createBMatrix(trans_d, rot_d).I * createBMatrix(pos, ori)) # pos_out_d, ori_out_d = Bmat_to_pos_quat(createBMatrix(pos, ori)) record_file_gt_d = open( ''.join([ self.pkg_path, '/data/', 'subj_', str(self.subject_number), '_img_', str(self.file_number), '_gt_depth', '.txt' ]), 'w') record_file_gt_d.write(''.join([ ' %f %f %f %f %f %f %f \n' % (pos_out_d[0], pos_out_d[1], pos_out_d[2], ori_out_d[0], ori_out_d[1], ori_out_d[2], ori_out_d[3]) ])) record_file_gt_d.close() # pos_out_r, ori_out_r = Bmat_to_pos_quat(createBMatrix(trans_r, rot_r).I*createBMatrix(pos, ori)) # record_file_gt_r = open(''.join([self.pkg_path, '/data/', 'subj_', str(self.subject_number), # '_img_', str(self.file_number), '_gt_rgb', '.txt']), 'w') # record_file_gt_r.write(''.join([' %f %f %f %f %f %f %f \n' % (pos_out_r[0], pos_out_r[1], pos_out_r[2], # ori_out_r[0], ori_out_r[1], ori_out_r[2], # ori_out_r[3])])) # record_file_gt_r.close() print 'Did all the tf things successfully' self.img_save() print 'Just saved file # ', self.file_number, 'for subject ', self.subject_number if self.video: print 'Starting to collect video!' start_time = time.time() while self.count < 9000: # print time.time()-start_time if time.time() - start_time >= .5: self.file_number += 1 self.vid_save() start_time = time.time() print 'Done collecting video data!' self.file_number += 1 self.head_pose = [pos, ori]
def head_pose_cb(self, data): trans = [ data.pose.position.x, data.pose.position.y, data.pose.position.z ] rot = [ data.pose.orientation.x, data.pose.orientation.y, data.pose.orientation.z, data.pose.orientation.w ] self.pr2_B_head = createBMatrix(trans, rot) if self.model == 'chair': ar_trans = [data.pose.position.x, data.pose.position.y, 0.] self.pr2_B_ar = createBMatrix(ar_trans, rot)
def robot_cb(self, data): with self.lock: trans = [ data.transform.translation.x, data.transform.translation.y, data.transform.translation.z ] rot = [ data.transform.rotation.x, data.transform.rotation.y, data.transform.rotation.z, data.transform.rotation.w ] world_B_robot_back = createBMatrix(trans, rot) robot_back_B_robot_center = np.matrix( [[1., 0., 0., (.665 / 2 + .02)], [0., 1., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]]) #world_B_robot = world_B_robot_back # *robot_back_B_base_link z_origin = np.array([0, 0, 1]) x_robot = np.array([ world_B_robot_back[0, 0], world_B_robot_back[1, 0], world_B_robot_back[2, 0] ]) y_robot_project = np.cross(z_origin, x_robot) y_robot_project = y_robot_project / np.linalg.norm(y_robot_project) x_robot_project = np.cross(y_robot_project, z_origin) x_robot_project = x_robot_project / np.linalg.norm(x_robot_project) world_B_robot_back_project = np.eye(4) for i in xrange(3): world_B_robot_back_project[i, 0] = x_robot_project[i] world_B_robot_back_project[i, 1] = y_robot_project[i] world_B_robot_back_project[i, 3] = world_B_robot_back[i, 3] world_B_robot = np.matrix( world_B_robot_back_project) * robot_back_B_robot_center world_B_robot[2, 3] = 0. pos, ori = Bmat_to_pos_quat(world_B_robot.I) pos[2] = 0. self.tf_broadcaster.sendTransform( (pos[0], pos[1], 0.), (ori[0], ori[1], ori[2], ori[3]), rospy.Time.now(), '/optitrak', '/base_link') self.robot_B_world = createBMatrix(pos, ori) pos, ori = Bmat_to_pos_quat(world_B_robot) psm = PoseStamped() psm.header.frame_id = '/optitrak' psm.pose.position.x = pos[0] psm.pose.position.y = pos[1] psm.pose.position.z = pos[2] psm.pose.orientation.x = ori[0] psm.pose.orientation.y = ori[1] psm.pose.orientation.z = ori[2] psm.pose.orientation.w = ori[3] self.robot_center_pub.publish(psm)
def head_pose_cb(self, data): trans = [data.pose.position.x, data.pose.position.y, data.pose.position.z] rot = [data.pose.orientation.x, data.pose.orientation.y, data.pose.orientation.z, data.pose.orientation.w] self.pr2_B_head = createBMatrix(trans, rot) if self.model == 'chair': ar_trans = [data.pose.position.x, data.pose.position.y, 0.] self.pr2_B_ar = createBMatrix(ar_trans, rot)
def head_cb(self, data): with self.lock: trans = [data.transform.translation.x, data.transform.translation.y, data.transform.translation.z] rot = [data.transform.rotation.x, data.transform.rotation.y, data.transform.rotation.z, data.transform.rotation.w] world_B_head_back = createBMatrix(trans, rot) head_back_B_head_center = np.matrix([[1., 0., 0., -0.07], [0., 1., 0., 0.], [0., 0., 1., -0.075], [0., 0., 0., 1.]]) pos, ori = Bmat_to_pos_quat(world_B_head_back*head_back_B_head_center) self.tf_broadcaster.sendTransform((pos[0], pos[1], pos[2]), (ori[0], ori[1], ori[2], ori[3]), rospy.Time.now(), '/head_frame', "/optitrak") robot_B_head = self.robot_B_world*world_B_head_back*head_back_B_head_center pos, ori = Bmat_to_pos_quat(robot_B_head) psm = PoseStamped() psm.header.frame_id = '/base_link' psm.pose.position.x = pos[0] psm.pose.position.y = pos[1] psm.pose.position.z = pos[2] psm.pose.orientation.x = ori[0] psm.pose.orientation.y = ori[1] psm.pose.orientation.z = ori[2] psm.pose.orientation.w = ori[3] self.head_center_pub.publish(psm)
def head_cb(self, data): with self.lock: trans = [ data.transform.translation.x, data.transform.translation.y, data.transform.translation.z ] rot = [ data.transform.rotation.x, data.transform.rotation.y, data.transform.rotation.z, data.transform.rotation.w ] world_B_head_back = createBMatrix(trans, rot) head_back_B_head_center = np.matrix([[1., 0., 0., -0.07], [0., 1., 0., 0.], [0., 0., 1., -0.075], [0., 0., 0., 1.]]) pos, ori = Bmat_to_pos_quat(world_B_head_back * head_back_B_head_center) self.tf_broadcaster.sendTransform( (pos[0], pos[1], pos[2]), (ori[0], ori[1], ori[2], ori[3]), rospy.Time.now(), '/head_frame', "/optitrak") robot_B_head = self.robot_B_world * world_B_head_back * head_back_B_head_center pos, ori = Bmat_to_pos_quat(robot_B_head) psm = PoseStamped() psm.header.frame_id = '/base_link' psm.pose.position.x = pos[0] psm.pose.position.y = pos[1] psm.pose.position.z = pos[2] psm.pose.orientation.x = ori[0] psm.pose.orientation.y = ori[1] psm.pose.orientation.z = ori[2] psm.pose.orientation.w = ori[3] self.head_center_pub.publish(psm)
def robot_cb(self, data): with self.lock: trans = [data.transform.translation.x, data.transform.translation.y, data.transform.translation.z] rot = [data.transform.rotation.x, data.transform.rotation.y, data.transform.rotation.z, data.transform.rotation.w] world_B_robot_back = createBMatrix(trans, rot) robot_back_B_robot_center = np.matrix([[1., 0., 0., (.665/2+.02)], [0., 1., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]]) #world_B_robot = world_B_robot_back # *robot_back_B_base_link z_origin = np.array([0, 0, 1]) x_robot = np.array([world_B_robot_back[0, 0], world_B_robot_back[1, 0], world_B_robot_back[2, 0]]) y_robot_project = np.cross(z_origin, x_robot) y_robot_project = y_robot_project/np.linalg.norm(y_robot_project) x_robot_project = np.cross(y_robot_project, z_origin) x_robot_project = x_robot_project/np.linalg.norm(x_robot_project) world_B_robot_back_project = np.eye(4) for i in xrange(3): world_B_robot_back_project[i, 0] = x_robot_project[i] world_B_robot_back_project[i, 1] = y_robot_project[i] world_B_robot_back_project[i, 3] = world_B_robot_back[i, 3] world_B_robot = np.matrix(world_B_robot_back_project)*robot_back_B_robot_center world_B_robot[2, 3] = 0. pos, ori = Bmat_to_pos_quat(world_B_robot.I) pos[2] = 0. self.tf_broadcaster.sendTransform((pos[0], pos[1], 0.), (ori[0], ori[1], ori[2], ori[3]), rospy.Time.now(), '/optitrak', '/base_link') self.robot_B_world = createBMatrix(pos, ori) pos, ori = Bmat_to_pos_quat(world_B_robot) psm = PoseStamped() psm.header.frame_id = '/optitrak' psm.pose.position.x = pos[0] psm.pose.position.y = pos[1] psm.pose.position.z = pos[2] psm.pose.orientation.x = ori[0] psm.pose.orientation.y = ori[1] psm.pose.orientation.z = ori[2] psm.pose.orientation.w = ori[3] self.robot_center_pub.publish(psm)
def reset_goals(self): liftlink_B_head = createBMatrix([1.249848, -0.013344, 0.1121597], [0.044735, -0.010481, 0.998626, -0.025188]) liftlink_B_goal = [[1.107086, -0.019988, 0.014680, 0.011758, 0.014403, 0.031744, 0.999323], [1.089931, -0.023529, 0.115044, 0.008146, 0.125716, 0.032856, 0.991489], [1.123504, -0.124174, 0.060517, 0.065528, -0.078776, 0.322874, 0.940879], [1.192543, -0.178014, 0.093005, -0.032482, 0.012642, 0.569130, 0.821509], [1.194537, -0.180350, 0.024144, 0.055151, -0.113447, 0.567382, 0.813736], [1.103003, 0.066879, 0.047237, 0.023224, -0.083593, -0.247144, 0.965087], [1.180539, 0.155222, 0.061160, -0.048171, -0.076155, -0.513218, 0.853515], [1.181696, 0.153536, 0.118200, 0.022272, 0.045203, -0.551630, 0.832565]] self.goals = [] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_head.I*createBMatrix(liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.goals = np.array(self.goals) return self.goals
def reference_cb(self, data): with self.lock: trans = [data.transform.translation.x, data.transform.translation.y, data.transform.translation.z] rot = [data.transform.rotation.x, data.transform.rotation.y, data.transform.rotation.z, data.transform.rotation.w] world_B_reference_back = createBMatrix(trans, rot) reference_back_B_reference_center = np.matrix([[1., 0., 0., 0.06], [0., 1., 0., 0.], [0., 0., 1., -0.74], [0., 0., 0., 1.]]) z_origin = np.array([0, 0, 1]) x_ref = np.array([world_B_reference_back[0, 0], world_B_reference_back[1, 0], world_B_reference_back[2, 0]]) y_ref_project = np.cross(z_origin, x_ref) y_ref_project = y_ref_project/np.linalg.norm(y_ref_project) x_ref_project = np.cross(y_ref_project, z_origin) x_ref_project = x_ref_project/np.linalg.norm(x_ref_project) world_B_ref_back_project = np.eye(4) for i in xrange(3): world_B_ref_back_project[i, 0] = x_ref_project[i] world_B_ref_back_project[i, 1] = y_ref_project[i] world_B_ref_back_project[i, 3] = world_B_reference_back[i, 3] world_B_ref = np.matrix(world_B_ref_back_project)*reference_back_B_reference_center world_B_ref[2, 3] = 0. # world_B_reference = world_B_reference_back*reference_back_B_reference # camera_B_reference = ((self.robot_B_world.I)*self.base_link_B_camera).I*world_B_reference pos, ori = Bmat_to_pos_quat(world_B_ref) psm = PoseStamped() psm.header.frame_id = '/optitrak' psm.pose.position.x = pos[0] psm.pose.position.y = pos[1] psm.pose.position.z = pos[2] psm.pose.orientation.x = ori[0] psm.pose.orientation.y = ori[1] psm.pose.orientation.z = ori[2] psm.pose.orientation.w = ori[3] self.reference_pub.publish(psm) self.tf_broadcaster.sendTransform((pos[0], pos[1], pos[2]), (ori[0], ori[1], ori[2], ori[3]), rospy.Time.now(), '/reference', "/optitrak") ar_m = ARMarker() ar_m.header.frame_id = '/r_forearm_cam_optical_frame' ar_m.pose.pose.position.x = pos[0] ar_m.pose.pose.position.y = pos[1] ar_m.pose.pose.position.z = pos[2] ar_m.pose.pose.orientation.x = ori[0] ar_m.pose.pose.orientation.y = ori[1] ar_m.pose.pose.orientation.z = ori[2] ar_m.pose.pose.orientation.w = ori[3] self.ar_pub.publish(ar_m)
def reset_goals(self): liftlink_B_head = createBMatrix( [1.249848, -0.013344, 0.1121597], [0.044735, -0.010481, 0.998626, -0.025188]) liftlink_B_goal = [[ 1.107086, -0.019988, 0.014680, 0.011758, 0.014403, 0.031744, 0.999323 ], [ 1.089931, -0.023529, 0.115044, 0.008146, 0.125716, 0.032856, 0.991489 ], [ 1.123504, -0.124174, 0.060517, 0.065528, -0.078776, 0.322874, 0.940879 ], [ 1.192543, -0.178014, 0.093005, -0.032482, 0.012642, 0.569130, 0.821509 ], [ 1.194537, -0.180350, 0.024144, 0.055151, -0.113447, 0.567382, 0.813736 ], [ 1.103003, 0.066879, 0.047237, 0.023224, -0.083593, -0.247144, 0.965087 ], [ 1.180539, 0.155222, 0.061160, -0.048171, -0.076155, -0.513218, 0.853515 ], [ 1.181696, 0.153536, 0.118200, 0.022272, 0.045203, -0.551630, 0.832565 ]] self.goals = [] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_head.I * createBMatrix( liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.goals = np.array(self.goals) return self.goals
def bed_pose_cb(self, data): trans = [ data.pose.position.x, data.pose.position.y, data.pose.position.z ] rot = [ data.pose.orientation.x, data.pose.orientation.y, data.pose.orientation.z, data.pose.orientation.w ] self.pr2_B_ar = createBMatrix(trans, rot)
def bed_pose_cb(self, data): trans = [data.pose.position.x, data.pose.position.y, data.pose.position.z] rot = [data.pose.orientation.x, data.pose.orientation.y, data.pose.orientation.z, data.pose.orientation.w] self.pr2_B_ar = createBMatrix(trans, rot)
def reference_cb(self, data): trans = [data.pose.position.x, data.pose.position.y, data.pose.position.z] rot = [data.pose.orientation.x, data.pose.orientation.y, data.pose.orientation.z, data.pose.orientation.w] self.world_B_ref_model = createBMatrix(trans, rot)
def robot_frame_cb(self, data): trans = [data.pose.position.x, data.pose.position.y, data.pose.position.z] rot = [data.pose.orientation.x, data.pose.orientation.y, data.pose.orientation.z, data.pose.orientation.w] self.world_B_robot = createBMatrix(trans, rot)
def base_goal_cb(self, data): goal_trans = [data.pose.position.x, data.pose.position.y, data.pose.position.z] goal_rot = [data.pose.orientation.x, data.pose.orientation.y, data.pose.orientation.z, data.pose.orientation.w] pr2_B_goal = createBMatrix(goal_trans, goal_rot) pr2_trans = [data.pose.position.x, data.pose.position.y, data.pose.position.z] pr2_rot = [data.pose.orientation.x, data.pose.orientation.y, data.pose.orientation.z, data.pose.orientation.w] world_B_pr2 = createBMatrix(pr2_trans, pr2_rot) world_B_goal = world_B_pr2*pr2_B_goal
def new_head(self, msg): rospy.loginfo("I have got a head location!") pos_temp = [msg.pose.position.x, msg.pose.position.y, msg.pose.position.z] ori_temp = [msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w] self.head = createBMatrix(pos_temp, ori_temp)
def head_pose_cb(self, msg): pos = [msg.pose.position.x, msg.pose.position.y, msg.pose.position.z] ori = [msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w] if self.head_pose == [pos, ori]: now = rospy.Time.now() self.listener.waitForTransform('/base_link', '/torso_lift_link', now, rospy.Duration(20)) (trans, rot) = self.listener.lookupTransform('/base_link', '/torso_lift_link', now) pos_out, ori_out = Bmat_to_pos_quat(createBMatrix(trans, rot)*createBMatrix(pos, ori)) # pos_out, ori_out = pos, ori record_file_gt = open(''.join([self.pkg_path, '/data/goals/', self.task, '/', self.task, '_', self.reference, '_pose_', str(self.file_number), '.txt']), 'w') record_file_gt.write(''.join(['[%f, %f, %f, %f, %f, %f, %f]\n' % (pos_out[0], pos_out[1], pos_out[2], ori_out[0], ori_out[1], ori_out[2], ori_out[3])])) record_file_gt.close() print 'Just saved file # ', self.file_number, 'for task ', self.task, ' using reference ', self.reference self.file_number += 1 self.head_pose = [pos, ori]
def main(): rospack = rospkg.RosPack() pkg_path = rospack.get_path('hrl_base_selection') bag_path = '/home/ari/svn/robot1_data/2011_ijrr_phri/log_data/bag/' #bag_name = 'sub1_shaver' #bag_name = 'sub3_shaver' bag_name = 'sub6_shaver' adl2_B_toolframe = createBMatrix([0.234, 0.041, -0.015], [0, 0, 0, 1]) toolframe_B_shaver = createBMatrix([0.070, 0., 0.], [0, 0, 0, 1]) shaver_B_pr2goal = createBMatrix([-0.050, 0., 0.], [0, 0, 0, 1]) adl2_B_pr2goal = adl2_B_toolframe*toolframe_B_shaver*shaver_B_pr2goal head_B_headc = createBMatrix([0.21, 0.02, -0.087], [0, 0, 0, 1]) print 'Starting on the tool log file! \n' bag = rosbag.Bag(''.join([bag_path,bag_name,'.bag']), 'r') tool = open(''.join([pkg_path,'/data/',bag_name,'_tool.log']), 'w') for topic, tf_msg, t in bag.read_messages(): if topic == "/tf": if len(tf_msg.transforms) > 0 and tf_msg.transforms[0].child_frame_id == "/adl2": tx = copy.copy(tf_msg.transforms[0].transform.translation.x) ty = copy.copy(tf_msg.transforms[0].transform.translation.y) tz = copy.copy(tf_msg.transforms[0].transform.translation.z) rx = copy.copy(tf_msg.transforms[0].transform.rotation.x) ry = copy.copy(tf_msg.transforms[0].transform.rotation.y) rz = copy.copy(tf_msg.transforms[0].transform.rotation.z) rw = copy.copy(tf_msg.transforms[0].transform.rotation.w) world_B_shaver = createBMatrix([tx,ty,tz],[rx,ry,rz,rw])*adl2_B_pr2goal eul = tft.euler_from_matrix(world_B_shaver,'rxyz') #print world_B_shaver,'\n' #print eul,'\n' #print 'time: \n',t tool.write(''.join([str(t),' %f %f %f %f %f %f \n' % (world_B_shaver[0,3],world_B_shaver[1,3],world_B_shaver[2,3],eul[0],eul[1],eul[2])])) #tool.write(''.join([world_B_shaver[0,3],' ',world_B_shaver[1,3],' ',world_B_shaver[2,3],' ',eul[0],' ',eul[1],' ',eul[2],'\n'])) bag.close() tool.close() print 'Starting on the head log file! \n' bag = rosbag.Bag(''.join([bag_path,bag_name,'.bag']), 'r') head = open(''.join([pkg_path,'/data/',bag_name,'_head.log']), 'w') for topic, tf_msg, t in bag.read_messages(): if topic == "/tf": if len(tf_msg.transforms) > 0 and tf_msg.transforms[0].child_frame_id == "/head": tx = copy.copy(tf_msg.transforms[0].transform.translation.x) ty = copy.copy(tf_msg.transforms[0].transform.translation.y) tz = copy.copy(tf_msg.transforms[0].transform.translation.z) rx = copy.copy(tf_msg.transforms[0].transform.rotation.x) ry = copy.copy(tf_msg.transforms[0].transform.rotation.y) rz = copy.copy(tf_msg.transforms[0].transform.rotation.z) rw = copy.copy(tf_msg.transforms[0].transform.rotation.w) world_B_headc = createBMatrix([tx,ty,tz],[rx,ry,rz,rw])*head_B_headc eul = copy.copy(tft.euler_from_matrix(world_B_headc,'rxyz')) head.write(''.join([str(t),' %f %f %f %f %f %f \n' % (world_B_headc[0,3],world_B_headc[1,3],world_B_headc[2,3],eul[0],eul[1],eul[2])])) #head.write(''.join([t,' ',world_B_head[0,3],' ',world_B_head[1,3],' ',world_B_head[2,3],' ',eul[0],' ',eul[1],' ',eul[2],' ',eul[3],'\n'])) bag.close() head.close() print "Saved files!"
def __init__(self): self.tf_listener = tf.TransformListener() self.tf_broadcaster = tf.TransformBroadcaster() self.lock = Lock() self.robot_pose = None self.target_pose = None self.robot_B_world = np.matrix(np.eye(4)) self.world_B_reference = None now = rospy.Time.now() self.tf_listener.waitForTransform('/base_link', '/r_forearm_cam_optical_frame', rospy.Time(), rospy.Duration(15.0)) (trans, rot) = self.tf_listener.lookupTransform('/base_link', '/r_forearm_cam_optical_frame', rospy.Time()) self.base_link_B_camera = createBMatrix(trans, rot) self.robot_center_pub = rospy.Publisher('/robot_frame', PoseStamped, latch=True) self.head_center_pub = rospy.Publisher('/head_frame', PoseStamped, latch=True) self.reference_pub = rospy.Publisher('/reference', PoseStamped, latch=True) self.ar_pub = rospy.Publisher("/l_pr2_ar_pose_marker", ARMarker, latch=True) self.robot_sub = rospy.Subscriber('/robot_back/pose', TransformStamped, self.robot_cb) self.head_sub = rospy.Subscriber('/head_back/pose', TransformStamped, self.head_cb) self.reference_sub = rospy.Subscriber('/reference_back/pose', TransformStamped, self.reference_cb) print 'The tf_spoofer has initialized without a problem, as far as I can tell!'
def __init__(self, visualize=False, subject='any_subject', task='yogurt', model='chair', tf_listener=None): self.model = model self.task = task self.subject = subject baselink_B_liftlink = createBMatrix([-0.05, 0.0, 0.8897], [0, 0, 0, 1]) goals = [[0.301033944729, 0.461276517595, 0.196885866571, 0.553557277528, 0.336724229346, -0.075691681684, 0.757932650828], [0.377839595079, 0.11569018662, 0.0419789999723, 0.66106069088, 0.337429642677, -0.519856214523, 0.422953367233], [0.2741387011303321, 0.005522571699560719, -0.011919598309888757, -0.023580897114171894, 0.7483633417869068, 0.662774596931439, 0.011228696415565394], [0.13608632401364894, 0.003540318703608347, 0.00607600258150498, -0.015224467044577382, 0.7345761465214938, 0.6783020152473445, -0.008513323454022942]] liftlink_B_goal = createBMatrix([0.5309877259429142, 0.4976163448816489, 0.16719537682372823], [0.7765742993649133, -0.37100605554316285, -0.27784851903166524, 0.42671660945891]) data = np.array([baselink_B_liftlink*createBMatrix(goals[0][0:3], goals[0][3:]), # In reference to base link baselink_B_liftlink*createBMatrix(goals[1][0:3], goals[1][3:]), # In reference to base link createBMatrix(goals[2][0:3], goals[2][3:]), createBMatrix(goals[3][0:3], goals[3][3:])]) # This one is in reference to the head for item in data: print Bmat_to_pos_quat(item) # For my information, these are the [xyz] and quaternion [x,y,z,w] for the PoseStamped messages for the goal # positions. The first two have parent tf /base_link. The third has parent link /head # (array([ 0.48098773, 0.49761634, 0.91837238]), array([ 0.7765743 , -0.37100606, -0.27784852, 0.42671661])) # (array([ 0.4598544 , 0.8806009 , 0.65371782]), array([ 0.45253993, 0.53399713, -0.17283745, 0.69295158])) # (array([ 0.2741387 , 0.05522572, -0.0119196 ]), array([-0.0235809 , 0.74836334, 0.6627746 , 0.0112287 ])) num = np.ones([len(data), 1]) reference_options = ['head', 'base_link'] reference = np.array([[1], [1], [0], [0]]) print 'Starting to convert data!' runData = DataReader(subject=self.subject, model=self.model, task=self.task) runData.receive_input_data(data, num, reference_options, reference) runData.generate_output_goals() runData.generate_score(viz_rviz=True, visualize=False, plot=False)
def __init__(self): self.tf_listener = tf.TransformListener() self.tf_broadcaster = tf.TransformBroadcaster() self.lock = Lock() self.robot_pose = None self.target_pose = None self.robot_B_world = np.matrix(np.eye(4)) self.world_B_reference = None now = rospy.Time.now() self.tf_listener.waitForTransform('/base_link', '/r_forearm_cam_optical_frame', rospy.Time(), rospy.Duration(15.0)) (trans, rot) = self.tf_listener.lookupTransform( '/base_link', '/r_forearm_cam_optical_frame', rospy.Time()) self.base_link_B_camera = createBMatrix(trans, rot) self.robot_center_pub = rospy.Publisher('/robot_frame', PoseStamped, latch=True) self.head_center_pub = rospy.Publisher('/head_frame', PoseStamped, latch=True) self.reference_pub = rospy.Publisher('/reference', PoseStamped, latch=True) self.ar_pub = rospy.Publisher("/l_pr2_ar_pose_marker", ARMarker, latch=True) self.robot_sub = rospy.Subscriber('/robot_back/pose', TransformStamped, self.robot_cb) self.head_sub = rospy.Subscriber('/head_back/pose', TransformStamped, self.head_cb) self.reference_sub = rospy.Subscriber('/reference_back/pose', TransformStamped, self.reference_cb) print 'The tf_spoofer has initialized without a problem, as far as I can tell!'
def reference_cb(self, data): with self.lock: trans = [ data.transform.translation.x, data.transform.translation.y, data.transform.translation.z ] rot = [ data.transform.rotation.x, data.transform.rotation.y, data.transform.rotation.z, data.transform.rotation.w ] world_B_reference_back = createBMatrix(trans, rot) reference_back_B_reference_center = np.matrix([[1., 0., 0., 0.06], [0., 1., 0., 0.], [0., 0., 1., -0.74], [0., 0., 0., 1.]]) z_origin = np.array([0, 0, 1]) x_ref = np.array([ world_B_reference_back[0, 0], world_B_reference_back[1, 0], world_B_reference_back[2, 0] ]) y_ref_project = np.cross(z_origin, x_ref) y_ref_project = y_ref_project / np.linalg.norm(y_ref_project) x_ref_project = np.cross(y_ref_project, z_origin) x_ref_project = x_ref_project / np.linalg.norm(x_ref_project) world_B_ref_back_project = np.eye(4) for i in xrange(3): world_B_ref_back_project[i, 0] = x_ref_project[i] world_B_ref_back_project[i, 1] = y_ref_project[i] world_B_ref_back_project[i, 3] = world_B_reference_back[i, 3] world_B_ref = np.matrix( world_B_ref_back_project) * reference_back_B_reference_center world_B_ref[2, 3] = 0. # world_B_reference = world_B_reference_back*reference_back_B_reference # camera_B_reference = ((self.robot_B_world.I)*self.base_link_B_camera).I*world_B_reference pos, ori = Bmat_to_pos_quat(world_B_ref) psm = PoseStamped() psm.header.frame_id = '/optitrak' psm.pose.position.x = pos[0] psm.pose.position.y = pos[1] psm.pose.position.z = pos[2] psm.pose.orientation.x = ori[0] psm.pose.orientation.y = ori[1] psm.pose.orientation.z = ori[2] psm.pose.orientation.w = ori[3] self.reference_pub.publish(psm) self.tf_broadcaster.sendTransform( (pos[0], pos[1], pos[2]), (ori[0], ori[1], ori[2], ori[3]), rospy.Time.now(), '/reference', "/optitrak") ar_m = ARMarker() ar_m.header.frame_id = '/r_forearm_cam_optical_frame' ar_m.pose.pose.position.x = pos[0] ar_m.pose.pose.position.y = pos[1] ar_m.pose.pose.position.z = pos[2] ar_m.pose.pose.orientation.x = ori[0] ar_m.pose.pose.orientation.y = ori[1] ar_m.pose.pose.orientation.z = ori[2] ar_m.pose.pose.orientation.w = ori[3] self.ar_pub.publish(ar_m)
def new_goal(self, psm): rospy.loginfo("[%s] I just got a goal location. I will now start reaching!" %rospy.get_name()) psm.header.stamp = rospy.Time.now() #self.goal_pose_pub.publish(psm) self.pub_feedback("Reaching toward goal location") #return # This is to use tf to get head location. # Otherwise, there is a subscriber to get a head location. #Comment out if there is no tf to use. #now = rospy.Time.now() + rospy.Duration(0.5) #self.listener.waitForTransform('/base_link', '/head_frame', now, rospy.Duration(10)) #pos_temp, ori_temp = self.listener.lookupTransform('/base_link', '/head_frame', now) #self.head = createBMatrix(pos_temp,ori_temp) #self.pr2_B_wc = np.matrix([[ self.head[0,0], self.head[0,1], 0., self.head[0,3]], # [ self.head[1,0], self.head[1,1], 0., self.head[1,3]], # [ 0., 0., 1., 0.], # [ 0., 0., 0., 1]]) #self.pr2_B_wc = wheelchair_location = self.pr2_B_wc * self.corner_B_head.I self.wheelchair.SetTransform(np.array(wheelchair_location)) pos_goal = wheelchair_location[:3,3] ori_goal = tr.matrix_to_quaternion(wheelchair_location[0:3,0:3]) marker = Marker() marker.header.frame_id = "base_link" marker.header.stamp = rospy.Time() marker.ns = "arm_reacher_wc_model" marker.id = 0 marker.type = Marker.MESH_RESOURCE; marker.action = Marker.ADD marker.pose.position.x = pos_goal[0] marker.pose.position.y = pos_goal[1] marker.pose.position.z = pos_goal[2] marker.pose.orientation.x = ori_goal[0] marker.pose.orientation.y = ori_goal[1] marker.pose.orientation.z = ori_goal[2] marker.pose.orientation.w = ori_goal[3] marker.scale.x = 0.0254 marker.scale.y = 0.0254 marker.scale.z = 0.0254 marker.color.a = 1.0 marker.color.r = 1.0 marker.color.g = 0.0 marker.color.b = 0.0 #only if using a MESH_RESOURCE marker type: marker.mesh_resource = "package://hrl_base_selection/models/ADA_Wheelchair.dae" self.vis_pub.publish( marker ) v = self.robot.GetActiveDOFValues() for name in self.joint_names: v[self.robot.GetJoint(name).GetDOFIndex()] = self.joint_angles[self.joint_names.index(name)] self.robot.SetActiveDOFValues(v) pos_temp = [psm.pose.position.x, psm.pose.position.y, psm.pose.position.z] ori_temp = [psm.pose.orientation.x, psm.pose.orientation.y, psm.pose.orientation.z, psm.pose.orientation.w] self.goal = createBMatrix(pos_temp,ori_temp) self.goal_B_gripper = np.matrix([[ 0., 0., 1., 0.1], [ 0., 1., 0., 0.], [ -1., 0., 0., 0.], [ 0., 0., 0., 1.]]) self.pr2_B_goal = self.goal*self.goal_B_gripper self.sol = self.manip.FindIKSolution(np.array(self.pr2_B_goal), op.IkFilterOptions.CheckEnvCollisions) if self.sol is None: self.pub_feedback("Failed to find a good arm configuration for reaching.") return None rospy.loginfo("[%s] Got an IK solution: %s" % (rospy.get_name(), self.sol)) self.pub_feedback("Found a good arm configuration for reaching.") self.pub_feedback("Looking for path for arm to goal.") traj = None try: #self.res = self.manipprob.MoveToHandPosition(matrices=[np.array(self.pr2_B_goal)],seedik=10) # call motion planner with goal joint angles self.traj=self.manipprob.MoveManipulator(goal=self.sol,outputtrajobj=True) self.pub_feedback("Found a path to reach to the goal.") except: self.traj = None self.pub_feedback("Could not find a path to reach to the goal") return None tmp_traj = tmp_vel = tmp_acc = [] #np.zeros([self.traj.GetNumWaypoints(),7]) trajectory = JointTrajectory() for i in xrange(self.traj.GetNumWaypoints()): point = JointTrajectoryPoint() temp = [] for j in xrange(7): temp.append(float(self.traj.GetWaypoint(i)[j])) point.positions = temp #point.positions.append(temp) #point.accelerations.append([0.,0.,0.,0.,0.,0.,0.]) #point.velocities.append([0.,0.,0.,0.,0.,0.,0.]) trajectory.points.append(point) #tmp_traj.append(temp) #tmp_traj.append(list(self.traj.GetWaypoint(i))) #tmp_vel.append([0.,0.,0.,0.,0.,0.,0.]) #tmp_acc.append([0.,0.,0.,0.,0.,0.,0.]) #print 'tmp_traj is: \n', tmp_traj #for j in xrange(7): #tmp_traj[i,j] = float(self.traj.GetWaypoint(i)[j]) #trajectory = JointTrajectory() #point = JointTrajectoryPoint() #point.positions.append(tmp_traj) #point.velocities.append(tmp_vel) #point.accelerations.append(tmp_acc) #point.velocities=[[0.,0.,0.,0.,0.,0.,0.]] #point.accelerations=[[0.,0.,0.,0.,0.,0.,0.]] trajectory.joint_names = ['l_upper_arm_roll_joint', 'l_shoulder_pan_joint', 'l_shoulder_lift_joint', 'l_forearm_roll_joint', 'l_elbow_flex_joint', 'l_wrist_flex_joint', 'l_wrist_roll_joint'] #trajectory.points.append(point) #self.mpc_weights_pub.publish(self.weights) self.goal_traj_pub.publish(trajectory) self.pub_feedback("Reaching to location")
def __init__(self, visualize=False, subject='any_subject', task='yogurt', model='chair', tf_listener=None): self.model = model self.task = task self.subject = subject baselink_B_liftlink = createBMatrix([-0.05, 0.0, 0.8897], [0, 0, 0, 1]) goals = [[ 0.301033944729, 0.461276517595, 0.196885866571, 0.553557277528, 0.336724229346, -0.075691681684, 0.757932650828 ], [ 0.377839595079, 0.11569018662, 0.0419789999723, 0.66106069088, 0.337429642677, -0.519856214523, 0.422953367233 ], [ 0.2741387011303321, 0.005522571699560719, -0.011919598309888757, -0.023580897114171894, 0.7483633417869068, 0.662774596931439, 0.011228696415565394 ], [ 0.13608632401364894, 0.003540318703608347, 0.00607600258150498, -0.015224467044577382, 0.7345761465214938, 0.6783020152473445, -0.008513323454022942 ]] liftlink_B_goal = createBMatrix( [0.5309877259429142, 0.4976163448816489, 0.16719537682372823], [ 0.7765742993649133, -0.37100605554316285, -0.27784851903166524, 0.42671660945891 ]) data = np.array([ baselink_B_liftlink * createBMatrix( goals[0][0:3], goals[0][3:]), # In reference to base link baselink_B_liftlink * createBMatrix( goals[1][0:3], goals[1][3:]), # In reference to base link createBMatrix(goals[2][0:3], goals[2][3:]), createBMatrix(goals[3][0:3], goals[3][3:]) ]) # This one is in reference to the head for item in data: print Bmat_to_pos_quat(item) # For my information, these are the [xyz] and quaternion [x,y,z,w] for the PoseStamped messages for the goal # positions. The first two have parent tf /base_link. The third has parent link /head # (array([ 0.48098773, 0.49761634, 0.91837238]), array([ 0.7765743 , -0.37100606, -0.27784852, 0.42671661])) # (array([ 0.4598544 , 0.8806009 , 0.65371782]), array([ 0.45253993, 0.53399713, -0.17283745, 0.69295158])) # (array([ 0.2741387 , 0.05522572, -0.0119196 ]), array([-0.0235809 , 0.74836334, 0.6627746 , 0.0112287 ])) num = np.ones([len(data), 1]) reference_options = ['head', 'base_link'] reference = np.array([[1], [1], [0], [0]]) print 'Starting to convert data!' runData = DataReader(subject=self.subject, model=self.model, task=self.task) runData.receive_input_data(data, num, reference_options, reference) runData.generate_output_goals() runData.generate_score(viz_rviz=True, visualize=False, plot=False)
def handle_select_base(self, req): service_initial_time = time.time() start_time = time.time() print 'The initial configuration selection service has been called!' model = req.model task = req.task self.task = task # Check if we have previously loaded this task/model (assuming the data file exists). if self.model != model: print 'The model in the service request differs from what was given to the service on initialization. As' \ 'a result, data for a user in that location (autobed/chair) has not been loaded!' if self.load != 'all': if self.load != task: print 'The task asked of the service request differs from what was given to the service on ' \ 'initialization. As a result, data for that task has not been loaded!' # Subscribe to the autobed state if we are using autobed if model == 'autobed': self.autobed_sub = rospy.Subscriber('/abdout0', FloatArrayBare, self.bed_state_cb) # In normal mode, gets locations of things from tf. Intended to use head registration for head pose, AR tag # detection, and servoing. if self.task == 'shaving_test': self.mode = 'test' elif self.mode == 'normal': try: now = rospy.Time.now() self.listener.waitForTransform('/base_link', '/head_frame', now, rospy.Duration(15)) (trans, rot) = self.listener.lookupTransform('/base_link', '/head_frame', now) self.pr2_B_head = createBMatrix(trans, rot) if model == 'chair': now = rospy.Time.now() self.listener.waitForTransform('/base_link', '/ar_marker', now, rospy.Duration(15)) (trans, rot) = self.listener.lookupTransform('/base_link', '/ar_marker', now) self.pr2_B_ar = createBMatrix(trans, rot) elif model == 'autobed': now = rospy.Time.now() self.listener.waitForTransform('/base_link', '/ar_marker', now, rospy.Duration(15)) (trans, rot) = self.listener.lookupTransform('/base_link', '/ar_marker', now) self.pr2_B_ar = createBMatrix(trans, rot) # Here I do some manual conversion to covert between the coordinate frame of the bed, which should # be located in the center of the headboard of the bed on the floor, to the AR tag's coordinate # frame. To make the manual transformation calculation easier, I split it into 3 homogeneous # transforms, one translation, one rotation about Z, one rotation about x. This should be adjusted # depending on the actual location of the AR tag. ar_trans_B = np.eye(4) # -.445 if right side of body. .445 if left side. # This is the translational transform from bed origin to the ar tag tf. # ar_trans_B[0:3,3] = np.array([0.625, -.445, .275+(self.bed_state_z-9)/100]) ar_trans_B[0:3,3] = np.array([-.04, 0., .74]) ar_rotz_B = np.eye(4) # If left side of body should be np.array([[-1,0],[0,-1]]) # If right side of body should be np.array([[1,0],[0,1]]) # ar_rotz_B [0:2,0:2] = np.array([[-1, 0],[0, -1]]) # ar_rotz_B ar_rotx_B = np.eye(4) # ar_rotx_B[1:3,1:3] = np.array([[0,1],[-1,0]]) self.model_B_ar = np.matrix(ar_trans_B)*np.matrix(ar_rotz_B)*np.matrix(ar_rotx_B) # now = rospy.Time.now() # self.listener.waitForTransform('/ar_marker', '/bed_frame', now, rospy.Duration(3)) # (trans, rot) = self.listener.lookupTransform('/ar_marker', '/bed_frame', now) # self.ar_B_model = createBMatrix(trans, rot) # Probably for the best to not try to do things from too far away. Also, if the AR tag is more than 4m # away, it is likely that an error is occurring with its detection. if np.linalg.norm(trans) > 4: rospy.loginfo('AR tag is too far away. Use the \'Testing\' button to move PR2 to 1 meter from AR ' 'tag. Or just move it closer via other means. Alternatively, the PR2 may have lost ' 'sight of the AR tag or it is having silly issues recognizing it. ') return None, None except Exception as e: rospy.loginfo("TF Exception. Could not get the AR_tag location, bed location, or " "head location:\r\n%s" % e) return None, None # Demo mode is to use motion capture to get locations. In this case, some things simplify out. elif self.mode == 'demo': try: now = rospy.Time.now() self.listener.waitForTransform('/base_link', '/head_frame', now, rospy.Duration(15)) (trans, rot) = self.listener.lookupTransform('/base_link', '/head_frame', now) self.pr2_B_head = createBMatrix(trans, rot) if model == 'chair': now = rospy.Time.now() self.listener.waitForTransform('/base_link', '/ar_marker', now, rospy.Duration(15)) (trans, rot) = self.listener.lookupTransform('/base_link', '/ar_marker', now) self.pr2_B_ar = createBMatrix(trans, rot) elif model == 'autobed': now = rospy.Time.now() self.listener.waitForTransform('/base_link', '/reference', now, rospy.Duration(15)) (trans, rot) = self.listener.lookupTransform('/base_link', '/reference', now) self.pr2_B_ar = createBMatrix(trans, rot) ar_trans_B_model = np.eye(4) # -.445 if right side of body. .445 if left side. # This is the translational transform from reference markers to the bed origin. # ar_trans_B[0:3,3] = np.array([0.625, -.445, .275+(self.bed_state_z-9)/100]) # ar_trans_B_model[0:3,3] = np.array([.06, 0., -.74]) # ar_rotz_B = np.eye(4) # If left side of body should be np.array([[-1,0],[0,-1]]) # If right side of body should be np.array([[1,0],[0,1]]) # ar_rotz_B [0:2,0:2] = np.array([[-1, 0],[0, -1]]) # ar_rotz_B # ar_rotx_B = np.eye(4) # ar_rotx_B[1:3,1:3] = np.array([[0,1],[-1,0]]) # self.model_B_ar = np.matrix(ar_trans_B_model).I # *np.matrix(ar_rotz_B)*np.matrix(ar_rotx_B) self.model_B_ar = np.matrix(np.eye(4)) # now = rospy.Time.now() # self.listener.waitForTransform('/ar_marker', '/bed_frame', now, rospy.Duration(3)) # (trans, rot) = self.listener.lookupTransform('/ar_marker', '/bed_frame', now) # self.ar_B_model = createBMatrix(trans, rot) if np.linalg.norm(trans) > 4: rospy.loginfo('AR tag is too far away. Use the \'Testing\' button to move PR2 to 1 meter from AR ' 'tag. Or just move it closer via other means. Alternatively, the PR2 may have lost ' 'sight of the AR tag or it is having silly issues recognizing it. ') return None, None except Exception as e: rospy.loginfo("TF Exception. Could not get the AR_tag location, bed location, or " "head location:\r\n%s" % e) return None, None # In sim mode, we expect that the head and bed pose (if using autobed) are being published. elif self.mode == 'sim': self.head_pose_sub = rospy.Subscriber('/haptic_mpc/head_pose', PoseStamped, self.head_pose_cb) if self.model == 'autobed': self.bed_pose_sub = rospy.Subscriber('/haptic_mpc/bed_pose', PoseStamped, self.bed_pose_cb) self.model_B_ar = np.eye(4) # print 'The homogeneous transform from PR2 base link to head: \n', self.pr2_B_head # I now project the head pose onto the ground plane to mitigate potential problems with poorly registered head # pose. z_origin = np.array([0, 0, 1]) x_head = np.array([self.pr2_B_head[0, 0], self.pr2_B_head[1, 0], self.pr2_B_head[2, 0]]) y_head_project = np.cross(z_origin, x_head) y_head_project = y_head_project/np.linalg.norm(y_head_project) x_head_project = np.cross(y_head_project, z_origin) x_head_project = x_head_project/np.linalg.norm(x_head_project) self.pr2_B_head_project = np.eye(4) for i in xrange(3): self.pr2_B_head_project[i, 0] = x_head_project[i] self.pr2_B_head_project[i, 1] = y_head_project[i] self.pr2_B_head_project[i, 3] = self.pr2_B_head[i, 3] self.pr2_B_headfloor = copy.copy(np.matrix(self.pr2_B_head_project)) self.pr2_B_headfloor[2, 3] = 0. # print 'The homogeneous transform from PR2 base link to the head location projected onto the ground plane: \n', \ # self.pr2_B_headfloor headx = 0 heady = 0 # Sets the location of the robot with respect to the person based using a few homogeneous transforms. if model == 'chair': self.origin_B_pr2 = copy.copy(self.pr2_B_headfloor.I) # Regular bed is now deprecated. Do not use this unless you fix it first. elif model =='bed': an = -m.pi/2 self.headfloor_B_head = np.matrix([[ m.cos(an), 0., m.sin(an), 0.], #.45 #.438 [ 0., 1., 0., 0.], #0.34 #.42 [ -m.sin(an), 0., m.cos(an), 1.1546], [ 0., 0., 0., 1.]]) an2 = 0 originsubject_B_headfloor = np.matrix([[ m.cos(an2), 0., m.sin(an2), .2954], #.45 #.438 [ 0., 1., 0., 0.], #0.34 #.42 [-m.sin(an2), 0., m.cos(an2), 0.], [ 0., 0., 0., 1.]]) self.origin_B_pr2 = self.headfloor_B_head * self.pr2_B_head.I # Slightly more complicated for autobed because the person can move around on the bed. elif model == 'autobed': self.model_B_pr2 = self.model_B_ar * self.pr2_B_ar.I self.origin_B_pr2 = copy.copy(self.model_B_pr2) model_B_head = self.model_B_pr2 * self.pr2_B_headfloor ## This next bit selects what entry in the dictionary of scores to use based on the location of the head # with respect to the bed model. Currently it just selects the dictionary entry with the closest relative # head location. Ultimately it should use a gaussian to get scores from the dictionary based on the actual # head location. if model_B_head[0, 3] > -.025 and model_B_head[0, 3] < .025: headx = 0. elif model_B_head[0, 3] >= .025 and model_B_head[0, 3] < .75: headx = 0. elif model_B_head[0, 3] <= -.025 and model_B_head[0, 3] > -.75: headx = 0. elif model_B_head[0, 3] >= .075: headx = 0. elif model_B_head[0, 3] <= -.075: headx = 0. if model_B_head[1, 3] > -.025 and model_B_head[1, 3] < .025: heady = 0 elif model_B_head[1, 3] >= .025 and model_B_head[1, 3] < .075: heady = .05 elif model_B_head[1, 3] > -.075 and model_B_head[1, 3] <= -.025: heady = -.05 elif model_B_head[1, 3] >= .075: heady = .1 elif model_B_head[1, 3] <= -.075: heady = -.1 # subject_location is the transform from the robot to the origin of the model that was used in creating the # databases for base selection subject_location = self.origin_B_pr2.I print 'Now finished getting poses of the head, etc. Proceeding!' print 'Time to receive locations and start things off: %fs' % (time.time()-start_time) # Now check that we have the data loaded for the desired task. Load the data if it has not yet been loaded. start_time = time.time() if self.scores_dict[model, task] is None: print 'For whatever reason, the data for this task was not previously loaded. I will now try to load it.' all_scores = self.load_task(task, model) if all_scores is None: print 'Failed to load precomputed reachability data. That is a problem. Abort!' return None, None else: all_scores = self.scores_dict[model, task] scores = all_scores[headx, heady] ## Set the weights for the different scores. alpha = 0.0001 # Weight on base's closeness to goal beta = 1. # Weight on number of reachable goals gamma = 1. # Weight on manipulability of arm at each reachable goal zeta = .0007 # Weight on distance to move to get to that goal location # For the optimization, we currently only care about x-y distance moved by the base and rotation of the base # This should be updated to have a normalized range of values and to saturate at some distance. If the robot # if 3m away, it probably doesn't matter if it has to move 0.5m more or less. pr2_loc = np.array([self.origin_B_pr2[0, 3], self.origin_B_pr2[1, 3]]) length = len(scores) temp_scores = np.zeros([length, 1]) temp_locations = scores[:, 0] # print 'Original number of scores: ', length print 'Time to prepare data for processing: %fs' % (time.time()-start_time) start_time = time.time() for j in xrange(length): dist_score = 0 for i in xrange(len(scores[j, 0][0])): current_x = np.array([self.origin_B_pr2[0, 0], self.origin_B_pr2[1, 0], self.origin_B_pr2[2, 0]]) des_x = np.array([m.cos(scores[j, 0][2][i]), m.sin(scores[j, 0][2][i]), 0]) angle_change = m.acos(np.dot(current_x, des_x)/(np.linalg.norm(current_x)*np.linalg.norm(des_x))) dist_score += np.linalg.norm([pr2_loc[0]-scores[j, 0][0][i], pr2_loc[1]-scores[j, 0][1][i]]) dist_score += angle_change # I should eventually use the following line instead, because it normalizes to 0-1 range. This way the # weights can be compared better. # dist_score += angle_change/(2*m.pi) # This adds to dist score a cost for moving the robot in the z axis. Commented out currently. # dist_score += np.max([t for t in ((np.linalg.norm(self.robot_z - scores[j, 0][3][i])) # for i in xrange(len(scores[j, 0][0])) # ) # ]) # Summing the scores thisScore = -alpha*scores[j, 1][0]+beta*scores[j, 1][1]+gamma*scores[j, 1][2]-zeta*dist_score # Don't want scores less than 0. if thisScore < 0: thisScore = 0 temp_scores[j, 0] = copy.copy(thisScore) out_score = [] for i in xrange(length): out_score.append([temp_locations[i], temp_scores[i]]) out_score = np.array(out_score) print 'Final version of scores is: \n', out_score[0] self.score_sheet = np.array(sorted(out_score, key=lambda t:t[1], reverse=True)) self.score_length = len(self.score_sheet) print 'Best score and configuration is: \n', self.score_sheet[0] print 'Number of scores in score sheet: ', self.score_length print 'I have finished preparing the data for the task!' print 'Time to perform optimization: %fs' % (time.time()-start_time) if self.score_sheet[0, 1] == 0: print 'There are no base locations with a score greater than 0. There are no good base locations!!' return None, None # Published the wheelchair location to create a marker in rviz for visualization to compare where the service believes the wheelchair is to # where the person is (seen via kinect). pos_goal, ori_goal = Bmat_to_pos_quat(subject_location) self.publish_subject_marker(pos_goal, ori_goal) # Visualize plot is a function to return a 2-D plot showing the best scores for each robot X-Y base location # after the updates to the score from above. Currently deprecated, so don't use it. visualize_plot = False if visualize_plot: self.plot_final_score_sheet() # This is the end of this function. The return calls a function that creates an output that is appropriate for # whatever the system calling base selection wants. print 'Time for service to run start to finish: %fs' % (time.time()-service_initial_time) return self.handle_returning_base_goals()
def reset_goals(self): self.goals = [] if self.task == 'shaving': liftlink_B_reference = createBMatrix( [1.249848, -0.013344, 0.1121597], [0.044735, -0.010481, 0.998626, -0.025188]) liftlink_B_goal = [[ 1.107086, -0.019988, 0.014680, 0.011758, 0.014403, 0.031744, 0.999323 ], [ 1.089931, -0.023529, 0.115044, 0.008146, 0.125716, 0.032856, 0.991489 ], [ 1.123504, -0.124174, 0.060517, 0.065528, -0.078776, 0.322874, 0.940879 ], [ 1.192543, -0.178014, 0.093005, -0.032482, 0.012642, 0.569130, 0.821509 ], [ 1.194537, -0.180350, 0.024144, 0.055151, -0.113447, 0.567382, 0.813736 ], [ 1.103003, 0.066879, 0.047237, 0.023224, -0.083593, -0.247144, 0.965087 ], [ 1.180539, 0.155222, 0.061160, -0.048171, -0.076155, -0.513218, 0.853515 ], [ 1.181696, 0.153536, 0.118200, 0.022272, 0.045203, -0.551630, 0.832565 ]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I * createBMatrix( liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['head'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'face_wiping': liftlink_B_reference = createBMatrix( [1.249848, -0.013344, 0.1121597], [0.044735, -0.010481, 0.998626, -0.025188]) liftlink_B_goal = [ [ 1.107086, -0.019988, 0.014680, 0.011758, 0.014403, 0.031744, 0.999323 ], [ 1.089931, -0.023529, 0.115044, 0.008146, 0.125716, 0.032856, 0.991489 ], [ 1.123504, -0.124174, 0.060517, 0.065528, -0.078776, 0.322874, 0.940879 ], #[1.192543, -0.178014, 0.093005, -0.032482, 0.012642, 0.569130, 0.821509]] #[1.194537, -0.180350, 0.024144, 0.055151, -0.113447, 0.567382, 0.813736], [ 1.103003, 0.066879, 0.047237, 0.023224, -0.083593, -0.247144, 0.965087 ] ] #[1.180539, 0.155222, 0.061160, -0.048171, -0.076155, -0.513218, 0.853515]] #[1.181696, 0.153536, 0.118200, 0.022272, 0.045203, -0.551630, 0.832565]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I * createBMatrix( liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head left_side = self.goals[2] right_side = self.goals[3] #left_side[0:3,0:3] = -1*right_side[0:3,0:3] left_side[0, 3] = right_side[0, 3] left_side[1, 3] = -right_side[1, 3] left_side[2, 3] = right_side[2, 3] self.goals[2] = left_side self.num = np.ones([len(self.goals), 1]) self.reference_options = ['head'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'feeding': liftlink_B_reference = createBMatrix( [0.902000, 0.000000, 1.232000], [0.000000, 0.000000, 0.000000, 1.000000]) liftlink_B_goal = [[ 1.070794, -0.000, 1.183998, -0.018872, 0.033197, 0.999248, 0.006737 ], [ 1.070794, 0.02, 1.183998, -0.018872, 0.033197, 0.999248, 0.006737 ], [ 1.070794, -0.02, 1.183998, -0.018872, 0.033197, 0.999248, 0.006737 ], [ 1.154571, -0.000, 1.175490, -0.018872, 0.033197, 0.999248, 0.006737 ], [ 1.058091, -0.00, 1.213801, -0.251047, 0.033853, 0.967382, -0.001178 ], [ 1.226054, -0.00, 1.120987, 0.005207, 0.032937, 0.999380, -0.011313 ], [ 1.250737, -0.00, 1.062824, -0.011666, 0.032741, 0.999325, -0.011866 ]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I * createBMatrix( liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['head'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'feeding_quick': liftlink_B_reference = createBMatrix( [0.902000, 0.000000, 1.232000], [0.000000, 0.000000, 0.000000, 1.000000]) liftlink_B_goal = [ [ 1.070794, -0.000, 1.183998, -0.018872, 0.033197, 0.999248, 0.006737 ], [ 1.154571, -0.000, 1.175490, -0.018872, 0.033197, 0.999248, 0.006737 ], # [1.058091, -0.00, 1.213801, -0.251047, 0.033853, 0.967382, -0.001178], [ 1.226054, -0.00, 1.120987, 0.005207, 0.032937, 0.999380, -0.011313 ] ] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I * createBMatrix( liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['head'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'brushing': liftlink_B_reference = createBMatrix( [0.902000, 0.000000, 1.232000], [0.000000, 0.000000, 0.000000, 1.000000]) liftlink_B_goal = [[ 1.000937, 0.162396, 1.348265, -0.167223, 0.150695, -0.676151, 0.701532 ], [ 0.928537, 0.128237, 1.374088, -0.021005, 0.029874, -0.692838, 0.720168 ], [ 0.813246, 0.123432, 1.352483, 0.120200, -0.156845, -0.685395, 0.700847 ], [ 0.771687, 0.118168, 1.272472, 0.325030, -0.375306, -0.606457, 0.621056 ], [ 0.946550, 0.169899, 1.250764, -0.272857, -0.265660, -0.659053, 0.648554 ], [ 0.853001, 0.171546, 1.251115, -0.272857, -0.265660, -0.659053, 0.648554 ], [ 0.948000, -0.045943, 1.375369, 0.229507, 0.227993, -0.675373, 0.662735 ], [ 0.861745, -0.042059, 1.372639, 0.280951, 0.174134, -0.691221, 0.642617 ]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I * createBMatrix( liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['head'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'scratching_upper_arm_left': liftlink_B_reference = createBMatrix( [0.521625, 0.175031, 0.596279], [0.707105, 0.006780, -0.006691, 0.707044]) liftlink_B_goal = [[ 0.554345, 0.233102, 0.693507, -0.524865, 0.025934, 0.850781, -0.003895 ], [ 0.661972, 0.231151, 0.699075, -0.630902, 0.025209, 0.775420, -0.007229 ]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I * createBMatrix( liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['upper_arm_left'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'scratching_upper_arm_right': liftlink_B_reference = createBMatrix( [0.521668, -0.174969, 0.596249], [0.706852, 0.020076, -0.019986, 0.706794]) liftlink_B_goal = [[ 0.595982, -0.218764, 0.702598, -0.582908, 0.005202, 0.812505, -0.005172 ], [ 0.673797, -0.218444, 0.712133, -0.665426, 0.004626, 0.746428, -0.005692 ]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I * createBMatrix( liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['upper_arm_right'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'scratching_forearm_left': liftlink_B_reference = createBMatrix( [0.803358, 0.225067, 0.579914], [0.707054, -0.010898, 0.010985, 0.706991]) liftlink_B_goal = [[ 0.884083, 0.234311, 0.708599, -0.599114, 0.005096, 0.800630, -0.005276 ], [ 1.005796, 0.234676, 0.714125, -0.599114, 0.005096, 0.800630, -0.005276 ]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I * createBMatrix( liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['forearm_left'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'scratching_forearm_right': liftlink_B_reference = createBMatrix( [0.803222, -0.224933, 0.580274], [0.707054, -0.010898, 0.010985, 0.706991]) liftlink_B_goal = [[ 0.905275, -0.223041, 0.714310, -0.599114, 0.005096, 0.800630, -0.005276 ], [ 1.000633, -0.223224, 0.686273, -0.599114, 0.005096, 0.800630, -0.005276 ]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I * createBMatrix( liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['forearm_right'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'scratching_thigh_left': liftlink_B_reference = createBMatrix( [0.977372, 0.086085, 0.624297], [0.702011, 0.084996, -0.084900, 0.701960]) liftlink_B_goal = [[ 1.139935, 0.087965, 0.748606, -0.599114, 0.005096, 0.800630, -0.005276 ], [ 1.257200, 0.088435, 0.762122, -0.599114, 0.005096, 0.800630, -0.005276 ]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I * createBMatrix( liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['thigh_left'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'scratching_thigh_right': liftlink_B_reference = createBMatrix( [0.977394, -0.085915, 0.624282], [0.702011, 0.084996, -0.084900, 0.701960]) liftlink_B_goal = [[ 1.257598, -0.081394, 0.764582, -0.599114, 0.005096, 0.800630, -0.005276 ], [ 1.097844, -0.081661, 0.772003, -0.599114, 0.005096, 0.800630, -0.005276 ]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I * createBMatrix( liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['thigh_right'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'scratching_knee_left': liftlink_B_reference = createBMatrix( [1.403718, 0.086138, 0.632848], [0.027523, 0.706540, 0.706598, -0.027614]) liftlink_B_goal = [[ 1.452417, 0.090990, 0.702744, -0.694316, 0.002271, 0.719607, 0.009260 ], [ 1.452027, 0.144753, 0.667666, -0.651384, 0.240372, 0.679096, -0.238220 ], [ 1.451370, 0.056361, 0.689148, -0.675084, -0.162301, 0.696923, 0.179497 ]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I * createBMatrix( liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to knee self.num = np.ones([len(self.goals), 1]) self.reference_options = ['knee_left'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'scratching_knee_right': liftlink_B_reference = createBMatrix( [1.403740, -0.085862, 0.632833], [0.027523, 0.706540, 0.706598, -0.027614]) liftlink_B_goal = [[ 1.406576, -0.041661, 0.714539, -0.686141, 0.106256, 0.712874, -0.098644 ], [ 1.404770, -0.129258, 0.703603, -0.671525, -0.176449, 0.692998, 0.194099 ], [ 1.463372, -0.089422, 0.700517, -0.694316, 0.002271, 0.719607, 0.009260 ]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I * createBMatrix( liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to knee self.num = np.ones([len(self.goals), 1]) self.reference_options = ['knee_right'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'scratching_chest': liftlink_B_reference = createBMatrix( [0.424870, 0.000019, 0.589686], [0.706732, -0.023949, 0.024036, 0.706667]) liftlink_B_goal = [[ 0.606949, -0.012159, 0.723371, -0.599114, 0.005096, 0.800630, -0.005276 ], [ 0.660066, -0.011944, 0.729623, -0.599114, 0.005096, 0.800630, -0.005276 ]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I * createBMatrix( liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['chest'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'bathing': liftlink_B_reference = [] liftlink_B_reference.append( createBMatrix([0.521625, 0.175031, 0.596279], [0.707105, 0.006780, -0.006691, 0.707044])) liftlink_B_reference.append( createBMatrix([0.521668, -0.174969, 0.596249], [0.706852, 0.020076, -0.019986, 0.706794])) liftlink_B_reference.append( createBMatrix([0.803358, 0.225067, 0.579914], [0.707054, -0.010898, 0.010985, 0.706991])) liftlink_B_reference.append( createBMatrix([0.803222, -0.224933, 0.580274], [0.707054, -0.010898, 0.010985, 0.706991])) liftlink_B_reference.append( createBMatrix([0.977372, 0.086085, 0.624297], [0.702011, 0.084996, -0.084900, 0.701960])) liftlink_B_reference.append( createBMatrix([0.977394, -0.085915, 0.624282], [0.702011, 0.084996, -0.084900, 0.701960])) liftlink_B_reference.append( createBMatrix([0.424870, 0.000019, 0.589686], [0.706732, -0.023949, 0.024036, 0.706667])) liftlink_B_goal = [[ 0.554345, 0.233102, 0.693507, -0.524865, 0.025934, 0.850781, -0.003895 ], [ 0.661972, 0.231151, 0.699075, -0.630902, 0.025209, 0.775420, -0.007229 ], [ 0.595982, -0.218764, 0.702598, -0.582908, 0.005202, 0.812505, -0.005172 ], [ 0.673797, -0.218444, 0.712133, -0.665426, 0.004626, 0.746428, -0.005692 ], [ 0.884083, 0.234311, 0.708599, -0.599114, 0.005096, 0.800630, -0.005276 ], [ 1.005796, 0.234676, 0.714125, -0.599114, 0.005096, 0.800630, -0.005276 ], [ 0.905275, -0.223041, 0.714310, -0.599114, 0.005096, 0.800630, -0.005276 ], [ 1.000633, -0.223224, 0.686273, -0.599114, 0.005096, 0.800630, -0.005276 ], [ 1.139935, 0.087965, 0.748606, -0.599114, 0.005096, 0.800630, -0.005276 ], [ 1.257200, 0.088435, 0.762122, -0.599114, 0.005096, 0.800630, -0.005276 ], [ 1.257598, -0.081394, 0.764582, -0.599114, 0.005096, 0.800630, -0.005276 ], [ 1.097844, -0.081661, 0.772003, -0.599114, 0.005096, 0.800630, -0.005276 ], [ 0.606949, -0.012159, 0.723371, -0.599114, 0.005096, 0.800630, -0.005276 ], [ 0.660066, -0.011944, 0.729623, -0.599114, 0.005096, 0.800630, -0.005276 ]] for i in xrange(len(liftlink_B_goal)): self.goals.append( liftlink_B_reference[i / 2].I * createBMatrix( liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = [ 'upper_arm_left', 'upper_arm_right', 'forearm_left', 'forearm_right', 'thigh_left', 'thigh_right', 'chest' ] self.reference = np.array([[0], [0], [1], [1], [2], [2], [3], [3], [4], [4], [5], [5], [6], [6]]) else: print 'THE TASK I GOT WAS BOGUS. SOMETHING PROBABLY WRONG WITH INPUTS TO DATA READER TASK' self.goals = np.array(self.goals) #print self.goals return self.goals, self.num, self.reference, self.reference_options
def main(): rospack = rospkg.RosPack() pkg_path = rospack.get_path('hrl_base_selection') bag_path = '/home/ari/svn/robot1_data/2011_ijrr_phri/log_data/bag/' #bag_name = 'sub1_shaver' #bag_name = 'sub3_shaver' bag_name = 'sub6_shaver' adl2_B_toolframe = createBMatrix([0.234, 0.041, -0.015], [0, 0, 0, 1]) toolframe_B_shaver = createBMatrix([0.070, 0., 0.], [0, 0, 0, 1]) shaver_B_pr2goal = createBMatrix([-0.050, 0., 0.], [0, 0, 0, 1]) adl2_B_pr2goal = adl2_B_toolframe * toolframe_B_shaver * shaver_B_pr2goal head_B_headc = createBMatrix([0.21, 0.02, -0.087], [0, 0, 0, 1]) print 'Starting on the tool log file! \n' bag = rosbag.Bag(''.join([bag_path, bag_name, '.bag']), 'r') tool = open(''.join([pkg_path, '/data/', bag_name, '_tool.log']), 'w') for topic, tf_msg, t in bag.read_messages(): if topic == "/tf": if len(tf_msg.transforms ) > 0 and tf_msg.transforms[0].child_frame_id == "/adl2": tx = copy.copy(tf_msg.transforms[0].transform.translation.x) ty = copy.copy(tf_msg.transforms[0].transform.translation.y) tz = copy.copy(tf_msg.transforms[0].transform.translation.z) rx = copy.copy(tf_msg.transforms[0].transform.rotation.x) ry = copy.copy(tf_msg.transforms[0].transform.rotation.y) rz = copy.copy(tf_msg.transforms[0].transform.rotation.z) rw = copy.copy(tf_msg.transforms[0].transform.rotation.w) world_B_shaver = createBMatrix( [tx, ty, tz], [rx, ry, rz, rw]) * adl2_B_pr2goal eul = tft.euler_from_matrix(world_B_shaver, 'rxyz') #print world_B_shaver,'\n' #print eul,'\n' #print 'time: \n',t tool.write(''.join([ str(t), ' %f %f %f %f %f %f \n' % (world_B_shaver[0, 3], world_B_shaver[1, 3], world_B_shaver[2, 3], eul[0], eul[1], eul[2]) ])) #tool.write(''.join([world_B_shaver[0,3],' ',world_B_shaver[1,3],' ',world_B_shaver[2,3],' ',eul[0],' ',eul[1],' ',eul[2],'\n'])) bag.close() tool.close() print 'Starting on the head log file! \n' bag = rosbag.Bag(''.join([bag_path, bag_name, '.bag']), 'r') head = open(''.join([pkg_path, '/data/', bag_name, '_head.log']), 'w') for topic, tf_msg, t in bag.read_messages(): if topic == "/tf": if len(tf_msg.transforms ) > 0 and tf_msg.transforms[0].child_frame_id == "/head": tx = copy.copy(tf_msg.transforms[0].transform.translation.x) ty = copy.copy(tf_msg.transforms[0].transform.translation.y) tz = copy.copy(tf_msg.transforms[0].transform.translation.z) rx = copy.copy(tf_msg.transforms[0].transform.rotation.x) ry = copy.copy(tf_msg.transforms[0].transform.rotation.y) rz = copy.copy(tf_msg.transforms[0].transform.rotation.z) rw = copy.copy(tf_msg.transforms[0].transform.rotation.w) world_B_headc = createBMatrix([tx, ty, tz], [rx, ry, rz, rw]) * head_B_headc eul = copy.copy(tft.euler_from_matrix(world_B_headc, 'rxyz')) head.write(''.join([ str(t), ' %f %f %f %f %f %f \n' % (world_B_headc[0, 3], world_B_headc[1, 3], world_B_headc[2, 3], eul[0], eul[1], eul[2]) ])) #head.write(''.join([t,' ',world_B_head[0,3],' ',world_B_head[1,3],' ',world_B_head[2,3],' ',eul[0],' ',eul[1],' ',eul[2],' ',eul[3],'\n'])) bag.close() head.close() print "Saved files!"
def handle_select_base(self, req):#, task): #choose_task(req.task) print 'I have received inputs!' #print 'My given inputs were: \n' #print 'goal is: \n',req.goal #print 'head is: \n', req.head # The head location is received as a posestamped message and is converted and used as the head location. pos_temp = [req.head.pose.position.x, req.head.pose.position.y, req.head.pose.position.z] ori_temp = [req.head.pose.orientation.x, req.head.pose.orientation.y, req.head.pose.orientation.z, req.head.pose.orientation.w] head = createBMatrix(pos_temp, ori_temp) #print 'head from input: \n', head # This lets the service use TF to get the head location instead of requiring it as an input. #(trans,rot) = self.listener.lookupTransform('/base_link', '/head_frame', rospy.Time(0)) #pos_temp = trans #ori_temp = rot #head = createBMatrix(pos_temp,ori_temp) #print 'head from tf: \n',head # The goal location is received as a posestamped message and is converted and used as the goal location. pos_temp = [req.goal.pose.position.x, req.goal.pose.position.y, req.goal.pose.position.z] ori_temp = [req.goal.pose.orientation.x, req.goal.pose.orientation.y, req.goal.pose.orientation.z, req.goal.pose.orientation.w] goal = createBMatrix(pos_temp,ori_temp) #print 'goal: \n',goal print 'I will move to be able to reach the goal.' # Sets the wheelchair location based on the location of the head using a few homogeneous transforms. pr2_B_wc = np.matrix([[head[0,0], head[0,1], 0., head[0,3]], [head[1,0], head[1,1], 0., head[1,3]], [ 0., 0., 1., 0.], [ 0., 0., 0., 1]]) # Transform from the coordinate frame of the wc model in the back right bottom corner, to the head location corner_B_head = np.matrix([[m.cos(0.), -m.sin(0.), 0., .442603], [m.sin(0.), m.cos(0.), 0., .384275], #0.34 [ 0., 0., 1., 0.], [ 0., 0., 0., 1.]]) wheelchair_location = pr2_B_wc * corner_B_head.I self.wheelchair.SetTransform(np.array(wheelchair_location)) # Published the wheelchair location to create a marker in rviz for visualization to compare where the service believes the wheelchair is to # where the person is (seen via kinect). pos_goal = wheelchair_location[0:3,3] ori_goal = tr.matrix_to_quaternion(wheelchair_location[0:3,0:3]) self.publish_wc_marker(pos_goal, ori_goal) #Setup for inside loop angle = m.pi # Transforms from end of wrist of PR2 to the end of its gripper. Openrave has a weird coordinate system for the gripper so I use a transform to correct. goal_B_gripper = np.matrix([[ 0, 0, 1., .1], [ 0, 1, 0., 0.], [ -1., 0., 0., 0.], [ 0., 0., 0., 1.]]) #pr2_B_goal = head * head_B_goal pr2_B_goal = goal*goal_B_gripper angle_base = m.pi/2 #print 'The goal gripper pose is: \n' , np.array(pr2_B_goal) # This is to publish WC position w.r.t. PR2 after the PR2 reaches goal location. # Only necessary for testing in simulation to set the wheelchair in reach of PR2. #goalpr2_B_wc = wc_B_goalpr2.I #print 'pr2_B_wc is: \n',goalpr2_B_wc #pos_goal = goalpr2_B_wc[:3,3] #ori_goal = tr.matrix_to_quaternion(goalpr2_B_wc[0:3,0:3]) #psm_wc = PoseStamped() #psm_wc.header.frame_id = '/odom_combined' #psm_wc.pose.position.x=pos_goal[0] #psm_wc.pose.position.y=pos_goal[1] #psm_wc.pose.position.z=pos_goal[2] #psm_wc.pose.orientation.x=ori_goal[0] #psm_wc.pose.orientation.y=ori_goal[1] #psm_wc.pose.orientation.z=ori_goal[2] #psm_wc.pose.orientation.w=ori_goal[3] #self.wc_position.publish(psm_wc) # Find a base location by testing various base locations online for IK solution for i in [0.,.05,.1,.15,.2,.25,.3,.35,.4,.45,.5,-.05,-.1,-.15,-.2,-.25,-.3]:#[.1]:#[0.,.1,.3,.5,.8,1,-.1,-.2,-.3]: for j in [0.,.03,.05,.08,-.03,-.05,-.08, -.1,-.12,-.2,-.3]:#[.2]:#[0.,.1,.3,.5,.8,-.1,-.2,-.3]: for k in [0.,m.pi/4,m.pi/2,-m.pi/4,-m.pi/2,m.pi,3*m.pi/2]: #goal_pose = req.goal # transform from head frame in wheelchair to desired base goal wc_B_goalpr2 = np.matrix([[m.cos(angle_base+k), -m.sin(angle_base+k), 0., .4+i], [m.sin(angle_base+k), m.cos(angle_base+k), 0., -.9+j], [ 0., 0., 1, 0.], [ 0., 0., 0., 1]]) base_position = pr2_B_wc * wc_B_goalpr2 #print 'base position: \n',base_position self.robot.SetTransform(np.array(base_position)) #res = self.manipprob.MoveToHandPosition(matrices=[np.array(pr2_B_goal)],seedik=10) # call motion planner with goal joint angles #self.robot.WaitForController(0) # wait #print 'res: \n',res v = self.robot.GetActiveDOFValues() for name in self.joint_names: v[self.robot.GetJoint(name).GetDOFIndex()] = self.joint_angles[self.joint_names.index(name)] self.robot.SetActiveDOFValues(v) with self.env: #print 'checking goal base location: \n' , np.array(base_position) if not self.manip.CheckIndependentCollision(op.CollisionReport()): sol = self.manip.FindIKSolution(np.array(pr2_B_goal), op.IkFilterOptions.CheckEnvCollisions) if sol is not None: now = rospy.Time.now() + rospy.Duration(1.0) self.listener.waitForTransform('/odom_combined', '/base_link', now, rospy.Duration(10)) (trans,rot) = self.listener.lookupTransform('/odom_combined', '/base_link', now) odom_goal = createBMatrix(trans, rot) * base_position pos_goal = odom_goal[:3,3] ori_goal = tr.matrix_to_quaternion(odom_goal[0:3,0:3]) #print 'Got an iksolution! \n', sol psm = PoseStamped() psm.header.frame_id = '/odom_combined' psm.pose.position.x=pos_goal[0] psm.pose.position.y=pos_goal[1] psm.pose.position.z=pos_goal[2] psm.pose.orientation.x=ori_goal[0] psm.pose.orientation.y=ori_goal[1] psm.pose.orientation.z=ori_goal[2] psm.pose.orientation.w=ori_goal[3] # This is to publish WC position w.r.t. PR2 after the PR2 reaches goal location. # Only necessary for testing in simulation to set the wheelchair in reach of PR2. #goalpr2_B_wc = wc_B_goalpr2.I #print 'pr2_B_wc is: \n',goalpr2_B_wc #pos_goal = goalpr2_B_wc[:3,3] #ori_goal = tr.matrix_to_quaternion(goalpr2_B_wc[0:3,0:3]) #psm_wc = PoseStamped() #psm_wc.header.frame_id = '/odom_combined' #psm_wc.pose.position.x=pos_goal[0] #psm_wc.pose.position.y=pos_goal[1] #psm_wc.pose.position.z=pos_goal[2] #psm_wc.pose.orientation.x=ori_goal[0] #psm_wc.pose.orientation.y=ori_goal[1] #psm_wc.pose.orientation.z=ori_goal[2] #psm_wc.pose.orientation.w=ori_goal[3] #self.wc_position.publish(psm_wc) print 'I found a goal location! It is at B transform: \n',base_position print 'The quaternion to the goal location is: \n',psm return psm #self.robot.SetDOFValues(sol,self.manip.GetArmIndices()) # set the current solution #Tee = self.manip.GetEndEffectorTransform() #self.env.UpdatePublishedBodies() # allow viewer to update new robot # traj = None # try: # #res = self.manipprob.MoveToHandPosition(matrices=[np.array(pr2_B_goal)],seedik=10) # call motion planner with goal joint angles # traj = self.manipprob.MoveManipulator(goal=sol, outputtrajobj=True) # print 'Got a trajectory! \n'#,traj # except: # #print 'traj = \n',traj # traj = None # print 'traj failed \n' # pass # #traj =1 #This gets rid of traj # if traj is not None: else: print 'I found a bad goal location. Trying again!' #rospy.sleep(.1) print 'I found nothing! My given inputs were: \n', req.goal, req.head print 'I found a goal location! It is at B transform: \n',base_location print 'The quaternion to the goal location is: \n',psm return psm
def handle_select_base(self, req): #, task): #choose_task(req.task) print 'I have received inputs!' #print 'My given inputs were: \n' #print 'goal is: \n',req.goal #print 'head is: \n', req.head # The head location is received as a posestamped message and is converted and used as the head location. pos_temp = [ req.head.pose.position.x, req.head.pose.position.y, req.head.pose.position.z ] ori_temp = [ req.head.pose.orientation.x, req.head.pose.orientation.y, req.head.pose.orientation.z, req.head.pose.orientation.w ] head = createBMatrix(pos_temp, ori_temp) #print 'head from input: \n', head # This lets the service use TF to get the head location instead of requiring it as an input. #(trans,rot) = self.listener.lookupTransform('/base_link', '/head_frame', rospy.Time(0)) #pos_temp = trans #ori_temp = rot #head = createBMatrix(pos_temp,ori_temp) #print 'head from tf: \n',head # The goal location is received as a posestamped message and is converted and used as the goal location. pos_temp = [ req.goal.pose.position.x, req.goal.pose.position.y, req.goal.pose.position.z ] ori_temp = [ req.goal.pose.orientation.x, req.goal.pose.orientation.y, req.goal.pose.orientation.z, req.goal.pose.orientation.w ] goal = createBMatrix(pos_temp, ori_temp) #print 'goal: \n',goal print 'I will move to be able to reach the goal.' # Sets the wheelchair location based on the location of the head using a few homogeneous transforms. pr2_B_wc = np.matrix([[head[0, 0], head[0, 1], 0., head[0, 3]], [head[1, 0], head[1, 1], 0., head[1, 3]], [0., 0., 1., 0.], [0., 0., 0., 1]]) # Transform from the coordinate frame of the wc model in the back right bottom corner, to the head location corner_B_head = np.matrix([ [m.cos(0.), -m.sin(0.), 0., .442603], [m.sin(0.), m.cos(0.), 0., .384275], #0.34 [0., 0., 1., 0.], [0., 0., 0., 1.] ]) wheelchair_location = pr2_B_wc * corner_B_head.I self.wheelchair.SetTransform(np.array(wheelchair_location)) # Published the wheelchair location to create a marker in rviz for visualization to compare where the service believes the wheelchair is to # where the person is (seen via kinect). pos_goal = wheelchair_location[0:3, 3] ori_goal = tr.matrix_to_quaternion(wheelchair_location[0:3, 0:3]) self.publish_wc_marker(pos_goal, ori_goal) #Setup for inside loop angle = m.pi # Transforms from end of wrist of PR2 to the end of its gripper. Openrave has a weird coordinate system for the gripper so I use a transform to correct. goal_B_gripper = np.matrix([[0, 0, 1., .1], [0, 1, 0., 0.], [-1., 0., 0., 0.], [0., 0., 0., 1.]]) #pr2_B_goal = head * head_B_goal pr2_B_goal = goal * goal_B_gripper angle_base = m.pi / 2 #print 'The goal gripper pose is: \n' , np.array(pr2_B_goal) # This is to publish WC position w.r.t. PR2 after the PR2 reaches goal location. # Only necessary for testing in simulation to set the wheelchair in reach of PR2. #goalpr2_B_wc = wc_B_goalpr2.I #print 'pr2_B_wc is: \n',goalpr2_B_wc #pos_goal = goalpr2_B_wc[:3,3] #ori_goal = tr.matrix_to_quaternion(goalpr2_B_wc[0:3,0:3]) #psm_wc = PoseStamped() #psm_wc.header.frame_id = '/odom_combined' #psm_wc.pose.position.x=pos_goal[0] #psm_wc.pose.position.y=pos_goal[1] #psm_wc.pose.position.z=pos_goal[2] #psm_wc.pose.orientation.x=ori_goal[0] #psm_wc.pose.orientation.y=ori_goal[1] #psm_wc.pose.orientation.z=ori_goal[2] #psm_wc.pose.orientation.w=ori_goal[3] #self.wc_position.publish(psm_wc) # Find a base location by testing various base locations online for IK solution for i in [ 0., .05, .1, .15, .2, .25, .3, .35, .4, .45, .5, -.05, -.1, -.15, -.2, -.25, -.3 ]: #[.1]:#[0.,.1,.3,.5,.8,1,-.1,-.2,-.3]: for j in [ 0., .03, .05, .08, -.03, -.05, -.08, -.1, -.12, -.2, -.3 ]: #[.2]:#[0.,.1,.3,.5,.8,-.1,-.2,-.3]: for k in [ 0., m.pi / 4, m.pi / 2, -m.pi / 4, -m.pi / 2, m.pi, 3 * m.pi / 2 ]: #goal_pose = req.goal # transform from head frame in wheelchair to desired base goal wc_B_goalpr2 = np.matrix([[ m.cos(angle_base + k), -m.sin(angle_base + k), 0., .4 + i ], [ m.sin(angle_base + k), m.cos(angle_base + k), 0., -.9 + j ], [0., 0., 1, 0.], [0., 0., 0., 1]]) base_position = pr2_B_wc * wc_B_goalpr2 #print 'base position: \n',base_position self.robot.SetTransform(np.array(base_position)) #res = self.manipprob.MoveToHandPosition(matrices=[np.array(pr2_B_goal)],seedik=10) # call motion planner with goal joint angles #self.robot.WaitForController(0) # wait #print 'res: \n',res v = self.robot.GetActiveDOFValues() for name in self.joint_names: v[self.robot.GetJoint(name).GetDOFIndex( )] = self.joint_angles[self.joint_names.index(name)] self.robot.SetActiveDOFValues(v) with self.env: #print 'checking goal base location: \n' , np.array(base_position) if not self.manip.CheckIndependentCollision( op.CollisionReport()): sol = self.manip.FindIKSolution( np.array(pr2_B_goal), op.IkFilterOptions.CheckEnvCollisions) if sol is not None: now = rospy.Time.now() + rospy.Duration(1.0) self.listener.waitForTransform( '/odom_combined', '/base_link', now, rospy.Duration(10)) (trans, rot) = self.listener.lookupTransform( '/odom_combined', '/base_link', now) odom_goal = createBMatrix(trans, rot) * base_position pos_goal = odom_goal[:3, 3] ori_goal = tr.matrix_to_quaternion( odom_goal[0:3, 0:3]) #print 'Got an iksolution! \n', sol psm = PoseStamped() psm.header.frame_id = '/odom_combined' psm.pose.position.x = pos_goal[0] psm.pose.position.y = pos_goal[1] psm.pose.position.z = pos_goal[2] psm.pose.orientation.x = ori_goal[0] psm.pose.orientation.y = ori_goal[1] psm.pose.orientation.z = ori_goal[2] psm.pose.orientation.w = ori_goal[3] # This is to publish WC position w.r.t. PR2 after the PR2 reaches goal location. # Only necessary for testing in simulation to set the wheelchair in reach of PR2. #goalpr2_B_wc = wc_B_goalpr2.I #print 'pr2_B_wc is: \n',goalpr2_B_wc #pos_goal = goalpr2_B_wc[:3,3] #ori_goal = tr.matrix_to_quaternion(goalpr2_B_wc[0:3,0:3]) #psm_wc = PoseStamped() #psm_wc.header.frame_id = '/odom_combined' #psm_wc.pose.position.x=pos_goal[0] #psm_wc.pose.position.y=pos_goal[1] #psm_wc.pose.position.z=pos_goal[2] #psm_wc.pose.orientation.x=ori_goal[0] #psm_wc.pose.orientation.y=ori_goal[1] #psm_wc.pose.orientation.z=ori_goal[2] #psm_wc.pose.orientation.w=ori_goal[3] #self.wc_position.publish(psm_wc) print 'I found a goal location! It is at B transform: \n', base_position print 'The quaternion to the goal location is: \n', psm return psm #self.robot.SetDOFValues(sol,self.manip.GetArmIndices()) # set the current solution #Tee = self.manip.GetEndEffectorTransform() #self.env.UpdatePublishedBodies() # allow viewer to update new robot # traj = None # try: # #res = self.manipprob.MoveToHandPosition(matrices=[np.array(pr2_B_goal)],seedik=10) # call motion planner with goal joint angles # traj = self.manipprob.MoveManipulator(goal=sol, outputtrajobj=True) # print 'Got a trajectory! \n'#,traj # except: # #print 'traj = \n',traj # traj = None # print 'traj failed \n' # pass # #traj =1 #This gets rid of traj # if traj is not None: else: print 'I found a bad goal location. Trying again!' #rospy.sleep(.1) print 'I found nothing! My given inputs were: \n', req.goal, req.head print 'I found a goal location! It is at B transform: \n', base_location print 'The quaternion to the goal location is: \n', psm return psm
def main(): rospack = rospkg.RosPack() pkg_path = rospack.get_path('hrl_base_selection') #bag_path = '/home/ari/svn/robot1_data/2011_ijrr_phri/log_data/bag/' bag_path = '/home/ari/git/gt-ros-pkg.hrl-assistive/hrl_base_selection/data/yogurt/bags/' bag_name_list = ['2014-06-17-16-13-46', #'2014-06-17-16-17-20', This is a bad bag '2014-06-17-16-19-02', '2014-06-17-16-21-57', '2014-06-17-16-24-03', '2014-06-17-16-26-12', '2014-06-17-16-27-46', '2014-06-17-16-29-18', '2014-06-17-16-31-24', '2014-06-17-16-33-13', '2014-06-18-12-37-23', '2014-06-18-12-39-43', '2014-06-18-12-41-42', '2014-06-18-12-43-45', '2014-06-18-12-45-26', '2014-06-18-12-47-22', '2014-06-18-12-49-04', '2014-06-18-12-50-52', '2014-06-18-12-52-39', '2014-06-18-12-54-26'] # bag_name = '2014-06-17-16-17-20' # bag_name = '2014-06-17-16-19-02' # bag_name = # bag_name = # bag_name = # bag_name = # bag_name = # bag_name = # bag_name = # bag_name = # bag_name = for num,bag_name in enumerate(bag_name_list): print 'Going to open bag: ',bag_name,'.bag' print 'Starting on a tool log file! \n' bag = rosbag.Bag(''.join([bag_path,bag_name,'.bag']), 'r') tool = open(''.join([pkg_path,'/data/yogurt/logs/',bag_name,'_tool.log']), 'w') for topic, msg, t in bag.read_messages(): if topic == "/spoon/pose": tx = copy.copy(msg.transform.translation.x) ty = copy.copy(msg.transform.translation.y) tz = copy.copy(msg.transform.translation.z) rx = copy.copy(msg.transform.rotation.x) ry = copy.copy(msg.transform.rotation.y) rz = copy.copy(msg.transform.rotation.z) rw = copy.copy(msg.transform.rotation.w) world_B_spoon = createBMatrix([tx,ty,tz],[rx,ry,rz,rw])#*adl2_B_pr2goal eul = tft.euler_from_matrix(world_B_spoon,'rxyz') #print world_B_shaver,'\n' #print eul,'\n' #print 'time: \n',t tool.write(''.join([str(t),' %f %f %f %f %f %f \n' % (world_B_spoon[0,3],world_B_spoon[1,3],world_B_spoon[2,3],eul[0],eul[1],eul[2])])) #tool.write(''.join([world_B_shaver[0,3],' ',world_B_shaver[1,3],' ',world_B_shaver[2,3],' ',eul[0],' ',eul[1],' ',eul[2],'\n'])) bag.close() tool.close() print 'Starting on a head log file! \n' bag = rosbag.Bag(''.join([bag_path,bag_name,'.bag']), 'r') head = open(''.join([pkg_path,'/data/yogurt/logs/',bag_name,'_head.log']), 'w') for topic, msg, t in bag.read_messages(): if topic == "/head/pose": tx = copy.copy(msg.transform.translation.x) ty = copy.copy(msg.transform.translation.y) tz = copy.copy(msg.transform.translation.z) rx = copy.copy(msg.transform.rotation.x) ry = copy.copy(msg.transform.rotation.y) rz = copy.copy(msg.transform.rotation.z) rw = copy.copy(msg.transform.rotation.w) world_B_head = createBMatrix([tx,ty,tz],[rx,ry,rz,rw])#*adl2_B_pr2goal eul = copy.copy(tft.euler_from_matrix(world_B_head,'rxyz')) head.write(''.join([str(t),' %f %f %f %f %f %f \n' % (world_B_head[0,3],world_B_head[1,3],world_B_head[2,3],eul[0],eul[1],eul[2])])) #head.write(''.join([t,' ',world_B_head[0,3],' ',world_B_head[1,3],' ',world_B_head[2,3],' ',eul[0],' ',eul[1],' ',eul[2],' ',eul[3],'\n'])) bag.close() head.close() print "Saved file! Finished bag #",num+1 print 'Done with all bag files!'
def handle_select_base(self, req): model = req.model self.model = model task = req.task self.task = task if model == 'autobed': self.autobed_sub = rospy.Subscriber('/abdout0', FloatArrayBare, self.bed_state_cb) print 'I have received inputs!' start_time = time.time() #print 'My given inputs were: \n' #print 'head is: \n', req.head # The head location is received as a posestamped message and is converted and used as the head location. # pos_temp = [req.head.pose.position.x, # req.head.pose.position.y, # req.head.pose.position.z] # ori_temp = [req.head.pose.orientation.x, # req.head.pose.orientation.y, # req.head.pose.orientation.z, # req.head.pose.orientation.w] # self.pr2_B_head = createBMatrix(pos_temp, ori_temp) # #print 'head from input: \n', head if self.task == 'shaving_test': self.mode = 'test' elif self.mode == 'normal': try: now = rospy.Time.now() self.listener.waitForTransform('/base_link', '/head_frame', now, rospy.Duration(15)) (trans, rot) = self.listener.lookupTransform('/base_link', '/head_frame', now) self.pr2_B_head = createBMatrix(trans, rot) if model == 'chair': now = rospy.Time.now() self.listener.waitForTransform('/base_link', '/ar_marker', now, rospy.Duration(15)) (trans, rot) = self.listener.lookupTransform( '/base_link', '/ar_marker', now) self.pr2_B_ar = createBMatrix(trans, rot) elif model == 'autobed': now = rospy.Time.now() self.listener.waitForTransform('/base_link', '/ar_marker', now, rospy.Duration(15)) (trans, rot) = self.listener.lookupTransform( '/base_link', '/ar_marker', now) self.pr2_B_ar = createBMatrix(trans, rot) ar_trans_B = np.eye(4) # -.445 if right side of body. .445 if left side. # This is the translational transform from bed origin to the ar tag tf. # ar_trans_B[0:3,3] = np.array([0.625, -.445, .275+(self.bed_state_z-9)/100]) ar_trans_B[0:3, 3] = np.array([-.04, 0., .74]) ar_rotz_B = np.eye(4) # If left side of body should be np.array([[-1,0],[0,-1]]) # If right side of body should be np.array([[1,0],[0,1]]) # ar_rotz_B [0:2,0:2] = np.array([[-1, 0],[0, -1]]) # ar_rotz_B ar_rotx_B = np.eye(4) # ar_rotx_B[1:3,1:3] = np.array([[0,1],[-1,0]]) self.model_B_ar = np.matrix(ar_trans_B) * np.matrix( ar_rotz_B) * np.matrix(ar_rotx_B) # now = rospy.Time.now() # self.listener.waitForTransform('/ar_marker', '/bed_frame', now, rospy.Duration(3)) # (trans, rot) = self.listener.lookupTransform('/ar_marker', '/bed_frame', now) # self.ar_B_model = createBMatrix(trans, rot) if np.linalg.norm(trans) > 4: rospy.loginfo( 'AR tag is too far away. Use the \'Testing\' button to move PR2 to 1 meter from AR ' 'tag. Or just move it closer via other means. Alternatively, the PR2 may have lost ' 'sight of the AR tag or it is having silly issues recognizing it. ' ) return None except Exception as e: rospy.loginfo( "TF Exception. Could not get the AR_tag location, bed location, or " "head location:\r\n%s" % e) return None elif self.mode == 'demo': try: now = rospy.Time.now() self.listener.waitForTransform('/base_link', '/head_frame', now, rospy.Duration(15)) (trans, rot) = self.listener.lookupTransform('/base_link', '/head_frame', now) self.pr2_B_head = createBMatrix(trans, rot) if model == 'chair': now = rospy.Time.now() self.listener.waitForTransform('/base_link', '/ar_marker', now, rospy.Duration(15)) (trans, rot) = self.listener.lookupTransform( '/base_link', '/ar_marker', now) self.pr2_B_ar = createBMatrix(trans, rot) elif model == 'autobed': now = rospy.Time.now() self.listener.waitForTransform('/base_link', '/reference', now, rospy.Duration(15)) (trans, rot) = self.listener.lookupTransform( '/base_link', '/reference', now) self.pr2_B_ar = createBMatrix(trans, rot) ar_trans_B_model = np.eye(4) # -.445 if right side of body. .445 if left side. # This is the translational transform from reference markers to the bed origin. # ar_trans_B[0:3,3] = np.array([0.625, -.445, .275+(self.bed_state_z-9)/100]) # ar_trans_B_model[0:3,3] = np.array([.06, 0., -.74]) # ar_rotz_B = np.eye(4) # If left side of body should be np.array([[-1,0],[0,-1]]) # If right side of body should be np.array([[1,0],[0,1]]) # ar_rotz_B [0:2,0:2] = np.array([[-1, 0],[0, -1]]) # ar_rotz_B # ar_rotx_B = np.eye(4) # ar_rotx_B[1:3,1:3] = np.array([[0,1],[-1,0]]) # self.model_B_ar = np.matrix(ar_trans_B_model).I # *np.matrix(ar_rotz_B)*np.matrix(ar_rotx_B) self.model_B_ar = np.matrix(np.eye(4)) # now = rospy.Time.now() # self.listener.waitForTransform('/ar_marker', '/bed_frame', now, rospy.Duration(3)) # (trans, rot) = self.listener.lookupTransform('/ar_marker', '/bed_frame', now) # self.ar_B_model = createBMatrix(trans, rot) if np.linalg.norm(trans) > 4: rospy.loginfo( 'AR tag is too far away. Use the \'Testing\' button to move PR2 to 1 meter from AR ' 'tag. Or just move it closer via other means. Alternatively, the PR2 may have lost ' 'sight of the AR tag or it is having silly issues recognizing it. ' ) return None except Exception as e: rospy.loginfo( "TF Exception. Could not get the AR_tag location, bed location, or " "head location:\r\n%s" % e) return None elif self.mode == 'sim': self.head_pose_sub = rospy.Subscriber('/haptic_mpc/head_pose', PoseStamped, self.head_pose_cb) if self.model == 'autobed': self.bed_pose_sub = rospy.Subscriber('/haptic_mpc/bed_pose', PoseStamped, self.bed_pose_cb) self.model_B_ar = np.eye(4) print 'The homogeneous transfrom from PR2 base link to head: \n', self.pr2_B_head z_origin = np.array([0, 0, 1]) x_head = np.array([ self.pr2_B_head[0, 0], self.pr2_B_head[1, 0], self.pr2_B_head[2, 0] ]) y_head_project = np.cross(z_origin, x_head) y_head_project = y_head_project / np.linalg.norm(y_head_project) x_head_project = np.cross(y_head_project, z_origin) x_head_project = x_head_project / np.linalg.norm(x_head_project) self.pr2_B_head_project = np.eye(4) for i in xrange(3): self.pr2_B_head_project[i, 0] = x_head_project[i] self.pr2_B_head_project[i, 1] = y_head_project[i] self.pr2_B_head_project[i, 3] = self.pr2_B_head[i, 3] self.pr2_B_headfloor = copy.copy(np.matrix(self.pr2_B_head_project)) self.pr2_B_headfloor[2, 3] = 0. print 'The homogeneous transform from PR2 base link to the head location projected onto the ground plane: \n', \ self.pr2_B_headfloor print 'I will now determine a good base location.' headx = 0 heady = 0 # Sets the wheelchair location based on the location of the head using a few homogeneous transforms. if model == 'chair': # self.pr2_B_headfloor = copy.copy(np.matrix(self.pr2_B_head_project)) # self.pr2_B_headfloor[2, 3] = 0. # Transform from the coordinate frame of the wc model in the back right bottom corner, to the head location originsubject_B_headfloor = np.matrix([ [m.cos(0.), -m.sin(0.), 0., .442603], #.45 #.438 [m.sin(0.), m.cos(0.), 0., .384275], #0.34 #.42 [0., 0., 1., 0.], [0., 0., 0., 1.] ]) self.origin_B_pr2 = copy.copy(self.pr2_B_headfloor.I) # self.origin_B_pr2 = self.headfloor_B_head * self.pr2_B_head.I # reference_floor_B_pr2 = self.pr2_B_head * self.headfloor_B_head.I * originsubject_B_headfloor.I # Regular bed is now deprecated. To use need to fix to be similar to chair. if model == 'bed': an = -m.pi / 2 self.headfloor_B_head = np.matrix([ [m.cos(an), 0., m.sin(an), 0.], #.45 #.438 [0., 1., 0., 0.], #0.34 #.42 [-m.sin(an), 0., m.cos(an), 1.1546], [0., 0., 0., 1.] ]) an2 = 0 originsubject_B_headfloor = np.matrix([ [m.cos(an2), 0., m.sin(an2), .2954], #.45 #.438 [0., 1., 0., 0.], #0.34 #.42 [-m.sin(an2), 0., m.cos(an2), 0.], [0., 0., 0., 1.] ]) self.origin_B_pr2 = self.headfloor_B_head * self.pr2_B_head.I # subject_location = self.pr2_B_head * self.headfloor_B_head.I * originsubject_B_headfloor.I if model == 'autobed': # an = -m.pi/2 # self.headfloor_B_head = np.matrix([[ m.cos(an), 0., m.sin(an), 0.], #.45 #.438 # [ 0., 1., 0., 0.], #0.34 #.42 # [ -m.sin(an), 0., m.cos(an), 1.1546], # [ 0., 0., 0., 1.]]) # an2 = 0 # self.origin_B_model = np.matrix([[ 1., 0., 0., 0.0], # [ 0., 1., 0., 0.0], # [ 0., 0., 1., self.bed_state_z], # [ 0., 0., 0., 1.0]]) self.model_B_pr2 = self.model_B_ar * self.pr2_B_ar.I self.origin_B_pr2 = copy.copy(self.model_B_pr2) model_B_head = self.model_B_pr2 * self.pr2_B_headfloor ## This next bit selects what entry in the dictionary of scores to use based on the location of the head # with respect to the bed model. Currently it just selects the dictionary entry with the closest relative # head location. Ultimately it should use a gaussian to get scores from the dictionary based on the actual # head location. if model_B_head[0, 3] > -.025 and model_B_head[0, 3] < .025: headx = 0. elif model_B_head[0, 3] >= .025 and model_B_head[0, 3] < .75: headx = 0. elif model_B_head[0, 3] <= -.025 and model_B_head[0, 3] > -.75: headx = 0. elif model_B_head[0, 3] >= .075: headx = 0. elif model_B_head[0, 3] <= -.075: headx = 0. if model_B_head[1, 3] > -.025 and model_B_head[1, 3] < .025: heady = 0 elif model_B_head[1, 3] >= .025 and model_B_head[1, 3] < .075: heady = .05 elif model_B_head[1, 3] > -.075 and model_B_head[1, 3] <= -.025: heady = -.05 elif model_B_head[1, 3] >= .075: heady = .1 elif model_B_head[1, 3] <= -.075: heady = -.1 # subject_location = self.pr2_B_head * self.headfloor_B_head.I * originsubject_B_headfloor.I # subject_location = self.pr2_B_head * self.headfloor_B_head.I * originsubject_B_headfloor.I subject_location = self.origin_B_pr2.I #self.subject.SetTransform(np.array(subject_location)) # Get score data and convert to goal locations print 'Time to receive head location and start things off: %fs' % ( time.time() - start_time) start_time = time.time() if self.model == 'chair': all_scores = self.chair_scores elif self.model == 'autobed': all_scores = self.autobed_scores # all_scores = self.load_task(task, model) scores = all_scores[headx, heady] if scores == None: print 'Failed to load precomputed reachability data. That is a problem. Abort!' return None, None # for score in scores: # if score[0][4]!=0: # print score[0] # print 'Time to load pickle: %fs' % (time.time()-start_time) start_time = time.time() ## Set the weights for the different scores. alpha = 0.0001 # Weight on base's closeness to goal beta = 1. # Weight on number of reachable goals gamma = 1. # Weight on manipulability of arm at each reachable goal zeta = .0007 # Weight on distance to move to get to that goal location # pr2_B_headfloor = self.pr2_B_head*self.headfloor_B_head.I # headfloor_B_pr2 = pr2_B_headfloor.I pr2_loc = np.array([self.origin_B_pr2[0, 3], self.origin_B_pr2[1, 3]]) length = len(scores) temp_scores = np.zeros([length, 1]) temp_locations = scores[:, 0] print 'Original number of scores: ', length print 'Time to start data processing: %fs' % (time.time() - start_time) for j in xrange(length): #print 'score: \n',score dist_score = 0 for i in xrange(len(scores[j, 0][0])): #headfloor_B_goal = createBMatrix([scores[j,0][i],scores[j,1][i],0],tr.matrix_to_quaternion(tr.rotZ(scores[j,2][i]))) #print 'from createbmatrix: \n', headfloor_B_goal #headfloor_B_goal = np.matrix([[ m.cos(scores[j,2][i]), -m.sin(scores[j,2][i]), 0., scores[j,0][i]], # [ m.sin(scores[j,2][i]), m.cos(scores[j,2][i]), 0., scores[j,1][i]], # [ 0., 0., 1., 0.], # [ 0., 0., 0., 1.]]) #dist = np.linalg.norm(pr2_loc-scores[j,0:2][i]) #headfloor_B_goal = np.diag([1.,1.,1.,1.]) #headfloor_B_goal[0,3] = scores[j,0][i] #headfloor_B_goal[1,3] = scores[j,1][i] #th = scores[j,2][i] #ct=m.cos(th) #st = m.sin(th) #headfloor_B_goal[0,0] = ct #headfloor_B_goal[0,1] = -st #headfloor_B_goal[1,0] = st #headfloor_B_goal[1,1] = ct #headfloor_B_goal[0:2,0:2] = np.array([[m.cos(scores[j,2][i]),-m.sin(scores[j,2][i])],[m.sin(scores[j,2][i]),m.cos(scores[j,2][i])]]) #print 'from manual: \n', headfloor_B_goal #dist_score += np.linalg.norm((pr2_B_headfloor*headfloor_B_goal)[0:2,3]) # print pr2_loc # print scores[0, 0] # print 'i ', i current_x = np.array([ self.origin_B_pr2[0, 0], self.origin_B_pr2[1, 0], self.origin_B_pr2[2, 0] ]) des_x = np.array( [m.cos(scores[j, 0][2][i]), m.sin(scores[j, 0][2][i]), 0]) angle_change = m.acos( np.dot(current_x, des_x) / (np.linalg.norm(current_x) * np.linalg.norm(des_x))) dist_score += np.linalg.norm([ pr2_loc[0] - scores[j, 0][0][i], pr2_loc[1] - scores[j, 0][1][i] ]) # if np.round(scores[j, 0][2][i],3) == 2.356: # print 'using 3/4 pi' # angle_change = 0 dist_score += angle_change # I should eventually use the following line instead, because it normalizes to 0-1 range. This way the # weights can be compared better. # dist_score += angle_change/(2*m.pi) # This adds to dist score a cost for moving the robot in the z axis. Commented out currently. # dist_score += np.max([t for t in ((np.linalg.norm(self.robot_z - scores[j, 0][3][i])) # for i in xrange(len(scores[j, 0][0])) # ) # ]) thisScore = -alpha * scores[j, 1][0] + beta * scores[ j, 1][1] + gamma * scores[j, 1][2] - zeta * dist_score if thisScore < 0: thisScore = 0 #print 'This score: ',thisScore # temp_scores[j,0] = 0 temp_scores[j, 0] = copy.copy(thisScore) #if thisScore>0: # temp_scores[j] = copy.copy(thisScore) #temp_locations[num] = np.vstack([temp_locations,score[0]]) #else: # temp_scores = np.delete(temp_scores,j,0) # temp_locations = np.delete(temp_locations,j,0) #print 'Time to run through data: %fs'%(time.time()-start_time) #temp_locations = np.delete(temp_scores,0,0) # temp_scores = np.hstack([list(temp_locations), temp_scores]) out_score = [] for i in xrange(length): out_score.append([temp_locations[i], temp_scores[i]]) out_score = np.array(out_score) #reachable.append(score[1]) #manipulable.append(score[2]) print 'Final version of scores is: \n', out_score[0] self.score_sheet = np.array( sorted(out_score, key=lambda t: t[1], reverse=True)) self.score_length = len(self.score_sheet) print 'Best score and configuration is: \n', self.score_sheet[0] print 'Number of scores in score sheet: ', self.score_length print 'I have finished preparing the data for the task!' if self.score_sheet[0, 1] == 0: print 'There are no base locations with a score greater than 0. There are no good base locations!!' return None, None print 'Time to adjust base scores: %fs' % (time.time() - start_time) # Published the wheelchair location to create a marker in rviz for visualization to compare where the service believes the wheelchair is to # where the person is (seen via kinect). pos_goal, ori_goal = Bmat_to_pos_quat(subject_location) self.publish_subject_marker(pos_goal, ori_goal) visualize_plot = False if visualize_plot: self.plot_final_score_sheet() return self.handle_returning_base_goals()
def update_wc(self,msg): v = self.robot.GetActiveDOFValues() for name in self.joint_names: v[self.robot.GetJoint(name).GetDOFIndex()] = self.joint_angles[self.joint_names.index(name)] self.robot.SetActiveDOFValues(v) rospy.loginfo("I have got a wc location!") pos_temp = [msg.pose.position.x, msg.pose.position.y, msg.pose.position.z] ori_temp = [msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w] self.pr2_B_wc = createBMatrix(pos_temp, ori_temp) psm = PoseStamped() psm.header.stamp = rospy.Time.now() #self.goal_pose_pub.publish(psm) self.pub_feedback("Reaching toward goal location") wheelchair_location = self.pr2_B_wc * self.corner_B_head.I self.wheelchair.SetTransform(np.array(wheelchair_location)) pos_goal = wheelchair_location[:3,3] ori_goal = tr.matrix_to_quaternion(wheelchair_location[0:3,0:3]) marker = Marker() marker.header.frame_id = "base_link" marker.header.stamp = rospy.Time() marker.ns = "arm_reacher_wc_model" marker.id = 0 marker.type = Marker.MESH_RESOURCE; marker.action = Marker.ADD marker.pose.position.x = pos_goal[0] marker.pose.position.y = pos_goal[1] marker.pose.position.z = pos_goal[2] marker.pose.orientation.x = ori_goal[0] marker.pose.orientation.y = ori_goal[1] marker.pose.orientation.z = ori_goal[2] marker.pose.orientation.w = ori_goal[3] marker.scale.x = 0.0254 marker.scale.y = 0.0254 marker.scale.z = 0.0254 marker.color.a = 1.0 marker.color.r = 1.0 marker.color.g = 0.0 marker.color.b = 0.0 #only if using a MESH_RESOURCE marker type: marker.mesh_resource = "package://hrl_base_selection/models/ADA_Wheelchair.dae" self.vis_pub.publish( marker ) v = self.robot.GetActiveDOFValues() for name in self.joint_names: v[self.robot.GetJoint(name).GetDOFIndex()] = self.joint_angles[self.joint_names.index(name)] self.robot.SetActiveDOFValues(v) goal_angle = 0#m.pi/2 self.goal = np.matrix([[ m.cos(goal_angle), -m.sin(goal_angle), 0., .5], [ m.sin(goal_angle), m.cos(goal_angle), 0., 0], [ 0., 0., 1., 1.2], [ 0., 0., 0., 1.]]) #self.goal = copy.copy(self.pr2_B_wc) #self.goal[0,3]=self.goal[0,3]-.2 #self.goal[1,3]=self.goal[1,3]-.3 #self.goal[2,3]= 1.3 print 'goal is: \n', self.goal self.goal_B_gripper = np.matrix([[ 0., 0., 1., 0.1], [ 0., 1., 0., 0.], [ -1., 0., 0., 0.], [ 0., 0., 0., 1.]]) self.pr2_B_goal = self.goal*self.goal_B_gripper self.sol = self.manip.FindIKSolution(np.array(self.pr2_B_goal), op.IkFilterOptions.CheckEnvCollisions) if self.sol is None: self.pub_feedback("Failed to find a good arm configuration for reaching.") return None rospy.loginfo("[%s] Got an IK solution: %s" % (rospy.get_name(), self.sol)) self.pub_feedback("Found a good arm configuration for reaching.") self.pub_feedback("Looking for path for arm to goal.") traj = None try: #self.res = self.manipprob.MoveToHandPosition(matrices=[np.array(self.pr2_B_goal)],seedik=10) # call motion planner with goal joint angles self.traj=self.manipprob.MoveManipulator(goal=self.sol,outputtrajobj=True) self.pub_feedback("Found a path to reach to the goal.") except: self.traj = None self.pub_feedback("Could not find a path to reach to the goal") return None tmp_traj = tmp_vel = tmp_acc = [] #np.zeros([self.traj.GetNumWaypoints(),7]) trajectory = JointTrajectory() for i in xrange(self.traj.GetNumWaypoints()): point = JointTrajectoryPoint() temp = [] for j in xrange(7): temp.append(float(self.traj.GetWaypoint(i)[j])) point.positions = temp #point.positions.append(temp) #point.accelerations.append([0.,0.,0.,0.,0.,0.,0.]) #point.velocities.append([0.,0.,0.,0.,0.,0.,0.]) trajectory.points.append(point) #tmp_traj.append(temp) #tmp_traj.append(list(self.traj.GetWaypoint(i))) #tmp_vel.append([0.,0.,0.,0.,0.,0.,0.]) #tmp_acc.append([0.,0.,0.,0.,0.,0.,0.]) #print 'tmp_traj is: \n', tmp_traj #for j in xrange(7): #tmp_traj[i,j] = float(self.traj.GetWaypoint(i)[j]) #trajectory = JointTrajectory() #point = JointTrajectoryPoint() #point.positions.append(tmp_traj) #point.velocities.append(tmp_vel) #point.accelerations.append(tmp_acc) #point.velocities=[[0.,0.,0.,0.,0.,0.,0.]] #point.accelerations=[[0.,0.,0.,0.,0.,0.,0.]] trajectory.joint_names = ['l_upper_arm_roll_joint', 'l_shoulder_pan_joint', 'l_shoulder_lift_joint', 'l_forearm_roll_joint', 'l_elbow_flex_joint', 'l_wrist_flex_joint', 'l_wrist_roll_joint'] #trajectory.points.append(point) #self.mpc_weights_pub.publish(self.weights) self.moving=True self.goal_traj_pub.publish(trajectory) self.pub_feedback("Reaching to location")
def get_raw_data(self): rospack = rospkg.RosPack() pkg_path = rospack.get_path('hrl_base_selection') #tool_init_pos = np.array([0.357766509056, 0.593838989735, 0.936517715454]) #tool_init_rot = np.array([-0.23529052448, -0.502738550591, 3.00133908425]) #head_init_pos = np.array([-0.142103180289, 0.457304298878, 1.13128328323]) #head_init_rot = np.array([0.304673484999, -0.304466132626, 0.579344748594]) #world_B_headinit = createBMatrix(head_init_pos,tft.quaternion_from_euler(head_init_rot[0],head_init_rot[1],head_init_rot[2],'rxyz')) #world_B_toolinit = createBMatrix(tool_init_pos,tft.quaternion_from_euler(tool_init_rot[0],tool_init_rot[1],tool_init_rot[2],'rxyz')) self.file_length = 0 world_B_headc_reference_raw_data = np.array([ map(float, line.strip().split()) for line in open(''.join( [pkg_path, '/data/', self.subject, '_head.log'])) ]) world_B_tool_raw_data = np.array([ map(float, line.strip().split()) for line in open(''.join( [pkg_path, '/data/', self.subject, '_tool.log'])) ]) #dist = [] #for num,line in enumerate(world_B_headc_reference_raw_data): #dist.append(np.linalg.norm(world_B_headc_reference_raw_data[num,1:4] - world_B_tool_raw_data[num,1:4])) #print 'raw distance: \n',np.min(np.array(dist)) # max_length is the length of the shorter of the two files. They sometimes differ in length by a few points. max_length = np.min([ len(world_B_headc_reference_raw_data), len(world_B_tool_raw_data) ]) # If data_finish is 'end' then it does from whatever is the start data to the end. self.length is the number of data points we are looking at. if self.data_finish == 'end': self.data_finish = max_length if self.data_finish > max_length: self.data_finish = max_length an = -m.pi / 2 tool_correction = np.matrix([ [m.cos(an), 0., m.sin(an), 0.], #.45 #.438 [0., 1., 0., 0.], #0.34 #.42 [-m.sin(an), 0., m.cos(an), 0.], [0., 0., 0., 1.] ]) self.length = np.min( np.array([max_length, self.data_finish - self.data_start])) world_B_headc_reference_data = np.zeros([self.length, 4, 4]) world_B_tool_data = np.zeros([self.length, 4, 4]) for num, line in enumerate( world_B_headc_reference_raw_data[self.data_start:self. data_finish]): world_B_headc_reference_data[num] = np.array( createBMatrix([line[1], line[2], line[3]], tft.quaternion_from_euler( line[4], line[5], line[6], 'rxyz'))) for num, line in enumerate( world_B_tool_raw_data[self.data_start:self.data_finish]): world_B_tool_data[num] = np.array( createBMatrix([line[1], line[2], line[3]], tft.quaternion_from_euler( line[4], line[5], line[6], 'rxyz'))) # I don't think data was collected on the tool tip, so I don't know what this data is. It has way more data than the others' #raw_tool_tip_data = np.array([map(float,line.strip().split()) for line in open(''.join([pkg_path,'/data/sub1_shaver_self_1_tool_tip.log']))]) #tool_tip_data = np.zeros([len(raw_tool_tip_data),4,4]) # We set the reference options here. self.reference_options = [] self.reference_options.append('head') self.reference = [ ] # Head references are associated with a value in self.reference of 0. #for num,line in enumerate(raw_tool_tip_data): # tool_tip_data[num] = np.array(createBMatrix([line[2],line[3],line[4]],tft.quaternion_from_euler#(line[5],line[6],line[7],'rxyz'))) self.distance = [] self.raw_goal_data = [] #np.zeros([self.length,4,4]) self.max_start_distance = .4 #0.2 i = 0 while self.raw_goal_data == []: #temp = np.array(np.matrix(world_B_headc_reference_data[i]).I*np.matrix(world_B_tool_data[i])*tool_correction) temp = np.array( np.matrix(world_B_headc_reference_data[i]).I * np.matrix(world_B_tool_data[i])) if np.linalg.norm(temp[0:3, 3]) < self.max_start_distance: self.raw_goal_data.append(temp) self.reference.append(0) else: i += 1 print 'The first ', i, ' data points in the file was noise' for num in xrange(1, self.length): temp = np.array( np.matrix(world_B_headc_reference_data[num]).I * np.matrix(world_B_tool_data[num])) pos2, ori2 = Bmat_to_pos_quat(temp) pos1, ori1 = Bmat_to_pos_quat( self.raw_goal_data[len(self.raw_goal_data) - 1]) if np.linalg.norm(pos1 - pos2) < self.max_distance: self.raw_goal_data.append(temp) self.reference.append(0) self.file_length += 1 self.distance.append(np.linalg.norm(temp[0:3, 3])) self.raw_goal_data = np.array(self.raw_goal_data) print 'Minimum distance between center of head and goal location from raw data = ', np.min( np.array(self.distance)) return self.raw_goal_data
def reset_goals(self): self.goals = [] if self.task == 'shaving': liftlink_B_reference = createBMatrix([1.249848, -0.013344, 0.1121597], [0.044735, -0.010481, 0.998626, -0.025188]) liftlink_B_goal = [[1.107086, -0.019988, 0.014680, 0.011758, 0.014403, 0.031744, 0.999323], [1.089931, -0.023529, 0.115044, 0.008146, 0.125716, 0.032856, 0.991489], [1.123504, -0.124174, 0.060517, 0.065528, -0.078776, 0.322874, 0.940879], [1.192543, -0.178014, 0.093005, -0.032482, 0.012642, 0.569130, 0.821509], [1.194537, -0.180350, 0.024144, 0.055151, -0.113447, 0.567382, 0.813736], [1.103003, 0.066879, 0.047237, 0.023224, -0.083593, -0.247144, 0.965087], [1.180539, 0.155222, 0.061160, -0.048171, -0.076155, -0.513218, 0.853515], [1.181696, 0.153536, 0.118200, 0.022272, 0.045203, -0.551630, 0.832565]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I*createBMatrix(liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['head'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'face_wiping': liftlink_B_reference = createBMatrix([1.249848, -0.013344, 0.1121597], [0.044735, -0.010481, 0.998626, -0.025188]) liftlink_B_goal = [[1.107086, -0.019988, 0.014680, 0.011758, 0.014403, 0.031744, 0.999323], [1.089931, -0.023529, 0.115044, 0.008146, 0.125716, 0.032856, 0.991489], [1.123504, -0.124174, 0.060517, 0.065528, -0.078776, 0.322874, 0.940879], #[1.192543, -0.178014, 0.093005, -0.032482, 0.012642, 0.569130, 0.821509]] #[1.194537, -0.180350, 0.024144, 0.055151, -0.113447, 0.567382, 0.813736], [1.103003, 0.066879, 0.047237, 0.023224, -0.083593, -0.247144, 0.965087]] #[1.180539, 0.155222, 0.061160, -0.048171, -0.076155, -0.513218, 0.853515]] #[1.181696, 0.153536, 0.118200, 0.022272, 0.045203, -0.551630, 0.832565]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I*createBMatrix(liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head left_side = self.goals[2] right_side = self.goals[3] #left_side[0:3,0:3] = -1*right_side[0:3,0:3] left_side[0,3] = right_side[0,3] left_side[1,3] = -right_side[1,3] left_side[2,3] = right_side[2,3] self.goals[2] = left_side self.num = np.ones([len(self.goals), 1]) self.reference_options = ['head'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'feeding': liftlink_B_reference = createBMatrix([0.902000, 0.000000, 1.232000], [0.000000, 0.000000, 0.000000, 1.000000]) liftlink_B_goal = [[1.070794, -0.000, 1.183998, -0.018872, 0.033197, 0.999248, 0.006737], [1.070794, 0.02, 1.183998, -0.018872, 0.033197, 0.999248, 0.006737], [1.070794, -0.02, 1.183998, -0.018872, 0.033197, 0.999248, 0.006737], [1.154571, -0.000, 1.175490, -0.018872, 0.033197, 0.999248, 0.006737], [1.058091, -0.00, 1.213801, -0.251047, 0.033853, 0.967382, -0.001178], [1.226054, -0.00, 1.120987, 0.005207, 0.032937, 0.999380, -0.011313], [1.250737, -0.00, 1.062824, -0.011666, 0.032741, 0.999325, -0.011866]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I*createBMatrix(liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['head'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'feeding_quick': liftlink_B_reference = createBMatrix([0.902000, 0.000000, 1.232000], [0.000000, 0.000000, 0.000000, 1.000000]) liftlink_B_goal = [[1.070794, -0.000, 1.183998, -0.018872, 0.033197, 0.999248, 0.006737], [1.154571, -0.000, 1.175490, -0.018872, 0.033197, 0.999248, 0.006737], # [1.058091, -0.00, 1.213801, -0.251047, 0.033853, 0.967382, -0.001178], [1.226054, -0.00, 1.120987, 0.005207, 0.032937, 0.999380, -0.011313]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I*createBMatrix(liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['head'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'brushing': liftlink_B_reference = createBMatrix([0.902000, 0.000000, 1.232000], [0.000000, 0.000000, 0.000000, 1.000000]) liftlink_B_goal = [[1.000937, 0.162396, 1.348265, -0.167223, 0.150695, -0.676151, 0.701532], [0.928537, 0.128237, 1.374088, -0.021005, 0.029874, -0.692838, 0.720168], [0.813246, 0.123432, 1.352483, 0.120200, -0.156845, -0.685395, 0.700847], [0.771687, 0.118168, 1.272472, 0.325030, -0.375306, -0.606457, 0.621056], [0.946550, 0.169899, 1.250764, -0.272857, -0.265660, -0.659053, 0.648554], [0.853001, 0.171546, 1.251115, -0.272857, -0.265660, -0.659053, 0.648554], [0.948000, -0.045943, 1.375369, 0.229507, 0.227993, -0.675373, 0.662735], [0.861745, -0.042059, 1.372639, 0.280951, 0.174134, -0.691221, 0.642617]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I*createBMatrix(liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['head'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'scratching_upper_arm_left': liftlink_B_reference = createBMatrix([0.521625, 0.175031, 0.596279], [0.707105, 0.006780, -0.006691, 0.707044]) liftlink_B_goal = [[0.554345, 0.233102, 0.693507, -0.524865, 0.025934, 0.850781, -0.003895], [0.661972, 0.231151, 0.699075, -0.630902, 0.025209, 0.775420, -0.007229]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I*createBMatrix(liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['upper_arm_left'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'scratching_upper_arm_right': liftlink_B_reference = createBMatrix([0.521668, -0.174969, 0.596249], [0.706852, 0.020076, -0.019986, 0.706794]) liftlink_B_goal = [[0.595982, -0.218764, 0.702598, -0.582908, 0.005202, 0.812505, -0.005172], [0.673797, -0.218444, 0.712133, -0.665426, 0.004626, 0.746428, -0.005692]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I*createBMatrix(liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['upper_arm_right'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'scratching_forearm_left': liftlink_B_reference = createBMatrix([0.803358, 0.225067, 0.579914], [0.707054, -0.010898, 0.010985, 0.706991]) liftlink_B_goal = [[0.884083, 0.234311, 0.708599, -0.599114, 0.005096, 0.800630, -0.005276], [1.005796, 0.234676, 0.714125, -0.599114, 0.005096, 0.800630, -0.005276]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I*createBMatrix(liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['forearm_left'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'scratching_forearm_right': liftlink_B_reference = createBMatrix([0.803222, -0.224933, 0.580274], [0.707054, -0.010898, 0.010985, 0.706991]) liftlink_B_goal = [[0.905275, -0.223041, 0.714310, -0.599114, 0.005096, 0.800630, -0.005276], [1.000633, -0.223224, 0.686273, -0.599114, 0.005096, 0.800630, -0.005276]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I*createBMatrix(liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['forearm_right'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'scratching_thigh_left': liftlink_B_reference = createBMatrix([0.977372, 0.086085, 0.624297], [0.702011, 0.084996, -0.084900, 0.701960]) liftlink_B_goal = [[1.139935, 0.087965, 0.748606, -0.599114, 0.005096, 0.800630, -0.005276], [1.257200, 0.088435, 0.762122, -0.599114, 0.005096, 0.800630, -0.005276]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I*createBMatrix(liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['thigh_left'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'scratching_thigh_right': liftlink_B_reference = createBMatrix([0.977394, -0.085915, 0.624282], [0.702011, 0.084996, -0.084900, 0.701960]) liftlink_B_goal = [[1.257598, -0.081394, 0.764582, -0.599114, 0.005096, 0.800630, -0.005276], [1.097844, -0.081661, 0.772003, -0.599114, 0.005096, 0.800630, -0.005276]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I*createBMatrix(liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['thigh_right'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'scratching_knee_left': liftlink_B_reference = createBMatrix([1.403718, 0.086138, 0.632848], [0.027523, 0.706540, 0.706598, -0.027614]) liftlink_B_goal = [[1.452417, 0.090990, 0.702744, -0.694316, 0.002271, 0.719607, 0.009260], [1.452027, 0.144753, 0.667666, -0.651384, 0.240372, 0.679096, -0.238220], [1.451370, 0.056361, 0.689148, -0.675084, -0.162301, 0.696923, 0.179497]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I*createBMatrix(liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to knee self.num = np.ones([len(self.goals), 1]) self.reference_options = ['knee_left'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'scratching_knee_right': liftlink_B_reference = createBMatrix([1.403740, -0.085862, 0.632833], [0.027523, 0.706540, 0.706598, -0.027614]) liftlink_B_goal = [[1.406576, -0.041661, 0.714539, -0.686141, 0.106256, 0.712874, -0.098644], [1.404770, -0.129258, 0.703603, -0.671525, -0.176449, 0.692998, 0.194099], [1.463372, -0.089422, 0.700517, -0.694316, 0.002271, 0.719607, 0.009260]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I*createBMatrix(liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to knee self.num = np.ones([len(self.goals), 1]) self.reference_options = ['knee_right'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'scratching_chest': liftlink_B_reference = createBMatrix([0.424870, 0.000019, 0.589686], [0.706732, -0.023949, 0.024036, 0.706667]) liftlink_B_goal = [[0.606949, -0.012159, 0.723371, -0.599114, 0.005096, 0.800630, -0.005276], [0.660066, -0.011944, 0.729623, -0.599114, 0.005096, 0.800630, -0.005276]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference.I*createBMatrix(liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['chest'] self.reference = np.zeros([len(self.goals), 1]) elif self.task == 'bathing': liftlink_B_reference = [] liftlink_B_reference.append(createBMatrix([0.521625, 0.175031, 0.596279], [0.707105, 0.006780, -0.006691, 0.707044])) liftlink_B_reference.append(createBMatrix([0.521668, -0.174969, 0.596249], [0.706852, 0.020076, -0.019986, 0.706794])) liftlink_B_reference.append(createBMatrix([0.803358, 0.225067, 0.579914], [0.707054, -0.010898, 0.010985, 0.706991])) liftlink_B_reference.append(createBMatrix([0.803222, -0.224933, 0.580274], [0.707054, -0.010898, 0.010985, 0.706991])) liftlink_B_reference.append(createBMatrix([0.977372, 0.086085, 0.624297], [0.702011, 0.084996, -0.084900, 0.701960])) liftlink_B_reference.append(createBMatrix([0.977394, -0.085915, 0.624282], [0.702011, 0.084996, -0.084900, 0.701960])) liftlink_B_reference.append(createBMatrix([0.424870, 0.000019, 0.589686], [0.706732, -0.023949, 0.024036, 0.706667])) liftlink_B_goal = [[0.554345, 0.233102, 0.693507, -0.524865, 0.025934, 0.850781, -0.003895], [0.661972, 0.231151, 0.699075, -0.630902, 0.025209, 0.775420, -0.007229], [0.595982, -0.218764, 0.702598, -0.582908, 0.005202, 0.812505, -0.005172], [0.673797, -0.218444, 0.712133, -0.665426, 0.004626, 0.746428, -0.005692], [0.884083, 0.234311, 0.708599, -0.599114, 0.005096, 0.800630, -0.005276], [1.005796, 0.234676, 0.714125, -0.599114, 0.005096, 0.800630, -0.005276], [0.905275, -0.223041, 0.714310, -0.599114, 0.005096, 0.800630, -0.005276], [1.000633, -0.223224, 0.686273, -0.599114, 0.005096, 0.800630, -0.005276], [1.139935, 0.087965, 0.748606, -0.599114, 0.005096, 0.800630, -0.005276], [1.257200, 0.088435, 0.762122, -0.599114, 0.005096, 0.800630, -0.005276], [1.257598, -0.081394, 0.764582, -0.599114, 0.005096, 0.800630, -0.005276], [1.097844, -0.081661, 0.772003, -0.599114, 0.005096, 0.800630, -0.005276], [0.606949, -0.012159, 0.723371, -0.599114, 0.005096, 0.800630, -0.005276], [0.660066, -0.011944, 0.729623, -0.599114, 0.005096, 0.800630, -0.005276]] for i in xrange(len(liftlink_B_goal)): self.goals.append(liftlink_B_reference[i/2].I*createBMatrix(liftlink_B_goal[i][0:3], liftlink_B_goal[i][3:])) # all in reference to head self.num = np.ones([len(self.goals), 1]) self.reference_options = ['upper_arm_left', 'upper_arm_right', 'forearm_left', 'forearm_right', 'thigh_left', 'thigh_right', 'chest'] self.reference = np.array([[0], [0], [1], [1], [2], [2], [3], [3], [4], [4], [5], [5], [6], [6]]) else: print 'THE TASK I GOT WAS BOGUS. SOMETHING PROBABLY WRONG WITH INPUTS TO DATA READER TASK' self.goals = np.array(self.goals) #print self.goals return self.goals, self.num, self.reference, self.reference_options
def handle_select_base(self, req): model = req.model self.model = model task = req.task self.task = task if model == 'autobed': self.autobed_sub = rospy.Subscriber('/abdout0', FloatArrayBare, self.bed_state_cb) print 'I have received inputs!' start_time = time.time() #print 'My given inputs were: \n' #print 'head is: \n', req.head # The head location is received as a posestamped message and is converted and used as the head location. # pos_temp = [req.head.pose.position.x, # req.head.pose.position.y, # req.head.pose.position.z] # ori_temp = [req.head.pose.orientation.x, # req.head.pose.orientation.y, # req.head.pose.orientation.z, # req.head.pose.orientation.w] # self.pr2_B_head = createBMatrix(pos_temp, ori_temp) # #print 'head from input: \n', head if self.task == 'shaving_test': self.mode = 'test' elif self.mode == 'normal': try: now = rospy.Time.now() self.listener.waitForTransform('/base_link', '/head_frame', now, rospy.Duration(15)) (trans, rot) = self.listener.lookupTransform('/base_link', '/head_frame', now) self.pr2_B_head = createBMatrix(trans, rot) if model == 'chair': now = rospy.Time.now() self.listener.waitForTransform('/base_link', '/ar_marker', now, rospy.Duration(15)) (trans, rot) = self.listener.lookupTransform('/base_link', '/ar_marker', now) self.pr2_B_ar = createBMatrix(trans, rot) elif model == 'autobed': now = rospy.Time.now() self.listener.waitForTransform('/base_link', '/ar_marker', now, rospy.Duration(15)) (trans, rot) = self.listener.lookupTransform('/base_link', '/ar_marker', now) self.pr2_B_ar = createBMatrix(trans, rot) ar_trans_B = np.eye(4) # -.445 if right side of body. .445 if left side. # This is the translational transform from bed origin to the ar tag tf. # ar_trans_B[0:3,3] = np.array([0.625, -.445, .275+(self.bed_state_z-9)/100]) ar_trans_B[0:3,3] = np.array([-.04, 0., .74]) ar_rotz_B = np.eye(4) # If left side of body should be np.array([[-1,0],[0,-1]]) # If right side of body should be np.array([[1,0],[0,1]]) # ar_rotz_B [0:2,0:2] = np.array([[-1, 0],[0, -1]]) # ar_rotz_B ar_rotx_B = np.eye(4) # ar_rotx_B[1:3,1:3] = np.array([[0,1],[-1,0]]) self.model_B_ar = np.matrix(ar_trans_B)*np.matrix(ar_rotz_B)*np.matrix(ar_rotx_B) # now = rospy.Time.now() # self.listener.waitForTransform('/ar_marker', '/bed_frame', now, rospy.Duration(3)) # (trans, rot) = self.listener.lookupTransform('/ar_marker', '/bed_frame', now) # self.ar_B_model = createBMatrix(trans, rot) if np.linalg.norm(trans) > 4: rospy.loginfo('AR tag is too far away. Use the \'Testing\' button to move PR2 to 1 meter from AR ' 'tag. Or just move it closer via other means. Alternatively, the PR2 may have lost ' 'sight of the AR tag or it is having silly issues recognizing it. ') return None except Exception as e: rospy.loginfo("TF Exception. Could not get the AR_tag location, bed location, or " "head location:\r\n%s" % e) return None elif self.mode == 'demo': try: now = rospy.Time.now() self.listener.waitForTransform('/base_link', '/head_frame', now, rospy.Duration(15)) (trans, rot) = self.listener.lookupTransform('/base_link', '/head_frame', now) self.pr2_B_head = createBMatrix(trans, rot) if model == 'chair': now = rospy.Time.now() self.listener.waitForTransform('/base_link', '/ar_marker', now, rospy.Duration(15)) (trans, rot) = self.listener.lookupTransform('/base_link', '/ar_marker', now) self.pr2_B_ar = createBMatrix(trans, rot) elif model == 'autobed': now = rospy.Time.now() self.listener.waitForTransform('/base_link', '/reference', now, rospy.Duration(15)) (trans, rot) = self.listener.lookupTransform('/base_link', '/reference', now) self.pr2_B_ar = createBMatrix(trans, rot) ar_trans_B_model = np.eye(4) # -.445 if right side of body. .445 if left side. # This is the translational transform from reference markers to the bed origin. # ar_trans_B[0:3,3] = np.array([0.625, -.445, .275+(self.bed_state_z-9)/100]) # ar_trans_B_model[0:3,3] = np.array([.06, 0., -.74]) # ar_rotz_B = np.eye(4) # If left side of body should be np.array([[-1,0],[0,-1]]) # If right side of body should be np.array([[1,0],[0,1]]) # ar_rotz_B [0:2,0:2] = np.array([[-1, 0],[0, -1]]) # ar_rotz_B # ar_rotx_B = np.eye(4) # ar_rotx_B[1:3,1:3] = np.array([[0,1],[-1,0]]) # self.model_B_ar = np.matrix(ar_trans_B_model).I # *np.matrix(ar_rotz_B)*np.matrix(ar_rotx_B) self.model_B_ar = np.matrix(np.eye(4)) # now = rospy.Time.now() # self.listener.waitForTransform('/ar_marker', '/bed_frame', now, rospy.Duration(3)) # (trans, rot) = self.listener.lookupTransform('/ar_marker', '/bed_frame', now) # self.ar_B_model = createBMatrix(trans, rot) if np.linalg.norm(trans) > 4: rospy.loginfo('AR tag is too far away. Use the \'Testing\' button to move PR2 to 1 meter from AR ' 'tag. Or just move it closer via other means. Alternatively, the PR2 may have lost ' 'sight of the AR tag or it is having silly issues recognizing it. ') return None except Exception as e: rospy.loginfo("TF Exception. Could not get the AR_tag location, bed location, or " "head location:\r\n%s" % e) return None elif self.mode == 'sim': self.head_pose_sub = rospy.Subscriber('/haptic_mpc/head_pose', PoseStamped, self.head_pose_cb) if self.model == 'autobed': self.bed_pose_sub = rospy.Subscriber('/haptic_mpc/bed_pose', PoseStamped, self.bed_pose_cb) self.model_B_ar = np.eye(4) print 'The homogeneous transfrom from PR2 base link to head: \n', self.pr2_B_head z_origin = np.array([0, 0, 1]) x_head = np.array([self.pr2_B_head[0, 0], self.pr2_B_head[1, 0], self.pr2_B_head[2, 0]]) y_head_project = np.cross(z_origin, x_head) y_head_project = y_head_project/np.linalg.norm(y_head_project) x_head_project = np.cross(y_head_project, z_origin) x_head_project = x_head_project/np.linalg.norm(x_head_project) self.pr2_B_head_project = np.eye(4) for i in xrange(3): self.pr2_B_head_project[i, 0] = x_head_project[i] self.pr2_B_head_project[i, 1] = y_head_project[i] self.pr2_B_head_project[i, 3] = self.pr2_B_head[i, 3] self.pr2_B_headfloor = copy.copy(np.matrix(self.pr2_B_head_project)) self.pr2_B_headfloor[2, 3] = 0. print 'The homogeneous transform from PR2 base link to the head location projected onto the ground plane: \n', \ self.pr2_B_headfloor print 'I will now determine a good base location.' headx = 0 heady = 0 # Sets the wheelchair location based on the location of the head using a few homogeneous transforms. if model == 'chair': # self.pr2_B_headfloor = copy.copy(np.matrix(self.pr2_B_head_project)) # self.pr2_B_headfloor[2, 3] = 0. # Transform from the coordinate frame of the wc model in the back right bottom corner, to the head location originsubject_B_headfloor = np.matrix([[m.cos(0.), -m.sin(0.), 0., .442603], #.45 #.438 [m.sin(0.), m.cos(0.), 0., .384275], #0.34 #.42 [ 0., 0., 1., 0.], [ 0., 0., 0., 1.]]) self.origin_B_pr2 = copy.copy(self.pr2_B_headfloor.I) # self.origin_B_pr2 = self.headfloor_B_head * self.pr2_B_head.I # reference_floor_B_pr2 = self.pr2_B_head * self.headfloor_B_head.I * originsubject_B_headfloor.I # Regular bed is now deprecated. To use need to fix to be similar to chair. if model =='bed': an = -m.pi/2 self.headfloor_B_head = np.matrix([[ m.cos(an), 0., m.sin(an), 0.], #.45 #.438 [ 0., 1., 0., 0.], #0.34 #.42 [ -m.sin(an), 0., m.cos(an), 1.1546], [ 0., 0., 0., 1.]]) an2 = 0 originsubject_B_headfloor = np.matrix([[ m.cos(an2), 0., m.sin(an2), .2954], #.45 #.438 [ 0., 1., 0., 0.], #0.34 #.42 [-m.sin(an2), 0., m.cos(an2), 0.], [ 0., 0., 0., 1.]]) self.origin_B_pr2 = self.headfloor_B_head * self.pr2_B_head.I # subject_location = self.pr2_B_head * self.headfloor_B_head.I * originsubject_B_headfloor.I if model == 'autobed': # an = -m.pi/2 # self.headfloor_B_head = np.matrix([[ m.cos(an), 0., m.sin(an), 0.], #.45 #.438 # [ 0., 1., 0., 0.], #0.34 #.42 # [ -m.sin(an), 0., m.cos(an), 1.1546], # [ 0., 0., 0., 1.]]) # an2 = 0 # self.origin_B_model = np.matrix([[ 1., 0., 0., 0.0], # [ 0., 1., 0., 0.0], # [ 0., 0., 1., self.bed_state_z], # [ 0., 0., 0., 1.0]]) self.model_B_pr2 = self.model_B_ar * self.pr2_B_ar.I self.origin_B_pr2 = copy.copy(self.model_B_pr2) model_B_head = self.model_B_pr2 * self.pr2_B_headfloor ## This next bit selects what entry in the dictionary of scores to use based on the location of the head # with respect to the bed model. Currently it just selects the dictionary entry with the closest relative # head location. Ultimately it should use a gaussian to get scores from the dictionary based on the actual # head location. if model_B_head[0, 3] > -.025 and model_B_head[0, 3] < .025: headx = 0. elif model_B_head[0, 3] >= .025 and model_B_head[0, 3] < .75: headx = 0. elif model_B_head[0, 3] <= -.025 and model_B_head[0, 3] > -.75: headx = 0. elif model_B_head[0, 3] >= .075: headx = 0. elif model_B_head[0, 3] <= -.075: headx = 0. if model_B_head[1, 3] > -.025 and model_B_head[1, 3] < .025: heady = 0 elif model_B_head[1, 3] >= .025 and model_B_head[1, 3] < .075: heady = .05 elif model_B_head[1, 3] > -.075 and model_B_head[1, 3] <= -.025: heady = -.05 elif model_B_head[1, 3] >= .075: heady = .1 elif model_B_head[1, 3] <= -.075: heady = -.1 # subject_location = self.pr2_B_head * self.headfloor_B_head.I * originsubject_B_headfloor.I # subject_location = self.pr2_B_head * self.headfloor_B_head.I * originsubject_B_headfloor.I subject_location = self.origin_B_pr2.I #self.subject.SetTransform(np.array(subject_location)) # Get score data and convert to goal locations print 'Time to receive head location and start things off: %fs' % (time.time()-start_time) start_time = time.time() if self.model == 'chair': all_scores = self.chair_scores elif self.model == 'autobed': all_scores = self.autobed_scores # all_scores = self.load_task(task, model) scores = all_scores[headx, heady] if scores == None: print 'Failed to load precomputed reachability data. That is a problem. Abort!' return None, None # for score in scores: # if score[0][4]!=0: # print score[0] # print 'Time to load pickle: %fs' % (time.time()-start_time) start_time = time.time() ## Set the weights for the different scores. alpha = 0.0001 # Weight on base's closeness to goal beta = 1. # Weight on number of reachable goals gamma = 1. # Weight on manipulability of arm at each reachable goal zeta = .0007 # Weight on distance to move to get to that goal location # pr2_B_headfloor = self.pr2_B_head*self.headfloor_B_head.I # headfloor_B_pr2 = pr2_B_headfloor.I pr2_loc = np.array([self.origin_B_pr2[0, 3], self.origin_B_pr2[1, 3]]) length = len(scores) temp_scores = np.zeros([length, 1]) temp_locations = scores[:, 0] print 'Original number of scores: ', length print 'Time to start data processing: %fs' % (time.time()-start_time) for j in xrange(length): #print 'score: \n',score dist_score = 0 for i in xrange(len(scores[j, 0][0])): #headfloor_B_goal = createBMatrix([scores[j,0][i],scores[j,1][i],0],tr.matrix_to_quaternion(tr.rotZ(scores[j,2][i]))) #print 'from createbmatrix: \n', headfloor_B_goal #headfloor_B_goal = np.matrix([[ m.cos(scores[j,2][i]), -m.sin(scores[j,2][i]), 0., scores[j,0][i]], # [ m.sin(scores[j,2][i]), m.cos(scores[j,2][i]), 0., scores[j,1][i]], # [ 0., 0., 1., 0.], # [ 0., 0., 0., 1.]]) #dist = np.linalg.norm(pr2_loc-scores[j,0:2][i]) #headfloor_B_goal = np.diag([1.,1.,1.,1.]) #headfloor_B_goal[0,3] = scores[j,0][i] #headfloor_B_goal[1,3] = scores[j,1][i] #th = scores[j,2][i] #ct=m.cos(th) #st = m.sin(th) #headfloor_B_goal[0,0] = ct #headfloor_B_goal[0,1] = -st #headfloor_B_goal[1,0] = st #headfloor_B_goal[1,1] = ct #headfloor_B_goal[0:2,0:2] = np.array([[m.cos(scores[j,2][i]),-m.sin(scores[j,2][i])],[m.sin(scores[j,2][i]),m.cos(scores[j,2][i])]]) #print 'from manual: \n', headfloor_B_goal #dist_score += np.linalg.norm((pr2_B_headfloor*headfloor_B_goal)[0:2,3]) # print pr2_loc # print scores[0, 0] # print 'i ', i current_x = np.array([self.origin_B_pr2[0, 0], self.origin_B_pr2[1, 0], self.origin_B_pr2[2, 0]]) des_x = np.array([m.cos(scores[j, 0][2][i]), m.sin(scores[j, 0][2][i]), 0]) angle_change = m.acos(np.dot(current_x, des_x)/(np.linalg.norm(current_x)*np.linalg.norm(des_x))) dist_score += np.linalg.norm([pr2_loc[0]-scores[j, 0][0][i], pr2_loc[1]-scores[j, 0][1][i]]) # if np.round(scores[j, 0][2][i],3) == 2.356: # print 'using 3/4 pi' # angle_change = 0 dist_score += angle_change # I should eventually use the following line instead, because it normalizes to 0-1 range. This way the # weights can be compared better. # dist_score += angle_change/(2*m.pi) # This adds to dist score a cost for moving the robot in the z axis. Commented out currently. # dist_score += np.max([t for t in ((np.linalg.norm(self.robot_z - scores[j, 0][3][i])) # for i in xrange(len(scores[j, 0][0])) # ) # ]) thisScore = -alpha*scores[j, 1][0]+beta*scores[j, 1][1]+gamma*scores[j, 1][2]-zeta*dist_score if thisScore < 0: thisScore = 0 #print 'This score: ',thisScore # temp_scores[j,0] = 0 temp_scores[j, 0] = copy.copy(thisScore) #if thisScore>0: # temp_scores[j] = copy.copy(thisScore) #temp_locations[num] = np.vstack([temp_locations,score[0]]) #else: # temp_scores = np.delete(temp_scores,j,0) # temp_locations = np.delete(temp_locations,j,0) #print 'Time to run through data: %fs'%(time.time()-start_time) #temp_locations = np.delete(temp_scores,0,0) # temp_scores = np.hstack([list(temp_locations), temp_scores]) out_score = [] for i in xrange(length): out_score.append([temp_locations[i], temp_scores[i]]) out_score = np.array(out_score) #reachable.append(score[1]) #manipulable.append(score[2]) print 'Final version of scores is: \n', out_score[0] self.score_sheet = np.array(sorted(out_score, key=lambda t:t[1], reverse=True)) self.score_length = len(self.score_sheet) print 'Best score and configuration is: \n', self.score_sheet[0] print 'Number of scores in score sheet: ', self.score_length print 'I have finished preparing the data for the task!' if self.score_sheet[0, 1] == 0: print 'There are no base locations with a score greater than 0. There are no good base locations!!' return None, None print 'Time to adjust base scores: %fs' % (time.time()-start_time) # Published the wheelchair location to create a marker in rviz for visualization to compare where the service believes the wheelchair is to # where the person is (seen via kinect). pos_goal, ori_goal = Bmat_to_pos_quat(subject_location) self.publish_subject_marker(pos_goal, ori_goal) visualize_plot = False if visualize_plot: self.plot_final_score_sheet() return self.handle_returning_base_goals()
def get_raw_data(self): rospack = rospkg.RosPack() pkg_path = rospack.get_path('hrl_base_selection') #tool_init_pos = np.array([0.357766509056, 0.593838989735, 0.936517715454]) #tool_init_rot = np.array([-0.23529052448, -0.502738550591, 3.00133908425]) #head_init_pos = np.array([-0.142103180289, 0.457304298878, 1.13128328323]) #head_init_rot = np.array([0.304673484999, -0.304466132626, 0.579344748594]) #world_B_headinit = createBMatrix(head_init_pos,tft.quaternion_from_euler(head_init_rot[0],head_init_rot[1],head_init_rot[2],'rxyz')) #world_B_toolinit = createBMatrix(tool_init_pos,tft.quaternion_from_euler(tool_init_rot[0],tool_init_rot[1],tool_init_rot[2],'rxyz')) self.file_length = 0 world_B_headc_reference_raw_data = np.array([map(float,line.strip().split()) for line in open(''.join([pkg_path,'/data/',self.subject,'_head.log']))]) world_B_tool_raw_data = np.array([map(float,line.strip().split()) for line in open(''.join([pkg_path,'/data/',self.subject,'_tool.log']))]) #dist = [] #for num,line in enumerate(world_B_headc_reference_raw_data): #dist.append(np.linalg.norm(world_B_headc_reference_raw_data[num,1:4] - world_B_tool_raw_data[num,1:4])) #print 'raw distance: \n',np.min(np.array(dist)) # max_length is the length of the shorter of the two files. They sometimes differ in length by a few points. max_length = np.min([len(world_B_headc_reference_raw_data),len(world_B_tool_raw_data)]) # If data_finish is 'end' then it does from whatever is the start data to the end. self.length is the number of data points we are looking at. if self.data_finish == 'end': self.data_finish = max_length if self.data_finish > max_length: self.data_finish = max_length an = -m.pi/2 tool_correction = np.matrix([[ m.cos(an), 0., m.sin(an), 0.], #.45 #.438 [ 0., 1., 0., 0.], #0.34 #.42 [-m.sin(an), 0., m.cos(an), 0.], [ 0., 0., 0., 1.]]) self.length = np.min(np.array([max_length,self.data_finish-self.data_start])) world_B_headc_reference_data = np.zeros([self.length,4,4]) world_B_tool_data = np.zeros([self.length,4,4]) for num,line in enumerate(world_B_headc_reference_raw_data[self.data_start:self.data_finish]): world_B_headc_reference_data[num] = np.array(createBMatrix([line[1],line[2],line[3]],tft.quaternion_from_euler(line[4],line[5],line[6],'rxyz'))) for num,line in enumerate(world_B_tool_raw_data[self.data_start:self.data_finish]): world_B_tool_data[num] = np.array(createBMatrix([line[1],line[2],line[3]],tft.quaternion_from_euler(line[4],line[5],line[6],'rxyz'))) # I don't think data was collected on the tool tip, so I don't know what this data is. It has way more data than the others' #raw_tool_tip_data = np.array([map(float,line.strip().split()) for line in open(''.join([pkg_path,'/data/sub1_shaver_self_1_tool_tip.log']))]) #tool_tip_data = np.zeros([len(raw_tool_tip_data),4,4]) # We set the reference options here. self.reference_options = [] self.reference_options.append('head') self.reference = [] # Head references are associated with a value in self.reference of 0. #for num,line in enumerate(raw_tool_tip_data): # tool_tip_data[num] = np.array(createBMatrix([line[2],line[3],line[4]],tft.quaternion_from_euler#(line[5],line[6],line[7],'rxyz'))) self.distance = [] self.raw_goal_data = [] #np.zeros([self.length,4,4]) self.max_start_distance = .4 #0.2 i = 0 while self.raw_goal_data == []: #temp = np.array(np.matrix(world_B_headc_reference_data[i]).I*np.matrix(world_B_tool_data[i])*tool_correction) temp = np.array(np.matrix(world_B_headc_reference_data[i]).I*np.matrix(world_B_tool_data[i])) if np.linalg.norm(temp[0:3,3])<self.max_start_distance: self.raw_goal_data.append(temp) self.reference.append(0) else: i+=1 print 'The first ', i, ' data points in the file was noise' for num in xrange(1,self.length): temp = np.array(np.matrix(world_B_headc_reference_data[num]).I*np.matrix(world_B_tool_data[num])) pos2, ori2 =Bmat_to_pos_quat(temp) pos1, ori1 =Bmat_to_pos_quat(self.raw_goal_data[len(self.raw_goal_data)-1]) if np.linalg.norm(pos1-pos2)<self.max_distance: self.raw_goal_data.append(temp) self.reference.append(0) self.file_length+=1 self.distance.append(np.linalg.norm(temp[0:3,3])) self.raw_goal_data = np.array(self.raw_goal_data) print 'Minimum distance between center of head and goal location from raw data = ', np.min(np.array(self.distance)) return self.raw_goal_data
def main(): rospack = rospkg.RosPack() pkg_path = rospack.get_path('hrl_base_selection') #bag_path = '/home/ari/svn/robot1_data/2011_ijrr_phri/log_data/bag/' bag_path = '/home/ari/git/gt-ros-pkg.hrl-assistive/hrl_base_selection/data/yogurt/bags/' bag_name_list = [ '2014-06-17-16-13-46', #'2014-06-17-16-17-20', This is a bad bag '2014-06-17-16-19-02', '2014-06-17-16-21-57', '2014-06-17-16-24-03', '2014-06-17-16-26-12', '2014-06-17-16-27-46', '2014-06-17-16-29-18', '2014-06-17-16-31-24', '2014-06-17-16-33-13', '2014-06-18-12-37-23', '2014-06-18-12-39-43', '2014-06-18-12-41-42', '2014-06-18-12-43-45', '2014-06-18-12-45-26', '2014-06-18-12-47-22', '2014-06-18-12-49-04', '2014-06-18-12-50-52', '2014-06-18-12-52-39', '2014-06-18-12-54-26' ] # bag_name = '2014-06-17-16-17-20' # bag_name = '2014-06-17-16-19-02' # bag_name = # bag_name = # bag_name = # bag_name = # bag_name = # bag_name = # bag_name = # bag_name = # bag_name = for num, bag_name in enumerate(bag_name_list): print 'Going to open bag: ', bag_name, '.bag' print 'Starting on a tool log file! \n' bag = rosbag.Bag(''.join([bag_path, bag_name, '.bag']), 'r') tool = open( ''.join([pkg_path, '/data/yogurt/logs/', bag_name, '_tool.log']), 'w') for topic, msg, t in bag.read_messages(): if topic == "/spoon/pose": tx = copy.copy(msg.transform.translation.x) ty = copy.copy(msg.transform.translation.y) tz = copy.copy(msg.transform.translation.z) rx = copy.copy(msg.transform.rotation.x) ry = copy.copy(msg.transform.rotation.y) rz = copy.copy(msg.transform.rotation.z) rw = copy.copy(msg.transform.rotation.w) world_B_spoon = createBMatrix( [tx, ty, tz], [rx, ry, rz, rw]) #*adl2_B_pr2goal eul = tft.euler_from_matrix(world_B_spoon, 'rxyz') #print world_B_shaver,'\n' #print eul,'\n' #print 'time: \n',t tool.write(''.join([ str(t), ' %f %f %f %f %f %f \n' % (world_B_spoon[0, 3], world_B_spoon[1, 3], world_B_spoon[2, 3], eul[0], eul[1], eul[2]) ])) #tool.write(''.join([world_B_shaver[0,3],' ',world_B_shaver[1,3],' ',world_B_shaver[2,3],' ',eul[0],' ',eul[1],' ',eul[2],'\n'])) bag.close() tool.close() print 'Starting on a head log file! \n' bag = rosbag.Bag(''.join([bag_path, bag_name, '.bag']), 'r') head = open( ''.join([pkg_path, '/data/yogurt/logs/', bag_name, '_head.log']), 'w') for topic, msg, t in bag.read_messages(): if topic == "/head/pose": tx = copy.copy(msg.transform.translation.x) ty = copy.copy(msg.transform.translation.y) tz = copy.copy(msg.transform.translation.z) rx = copy.copy(msg.transform.rotation.x) ry = copy.copy(msg.transform.rotation.y) rz = copy.copy(msg.transform.rotation.z) rw = copy.copy(msg.transform.rotation.w) world_B_head = createBMatrix( [tx, ty, tz], [rx, ry, rz, rw]) #*adl2_B_pr2goal eul = copy.copy(tft.euler_from_matrix(world_B_head, 'rxyz')) head.write(''.join([ str(t), ' %f %f %f %f %f %f \n' % (world_B_head[0, 3], world_B_head[1, 3], world_B_head[2, 3], eul[0], eul[1], eul[2]) ])) #head.write(''.join([t,' ',world_B_head[0,3],' ',world_B_head[1,3],' ',world_B_head[2,3],' ',eul[0],' ',eul[1],' ',eul[2],' ',eul[3],'\n'])) bag.close() head.close() print "Saved file! Finished bag #", num + 1 print 'Done with all bag files!'