示例#1
0
    def predictSinglePose(self, armName, curPose, prevPose, dt=1.0):
        if dt <= 0:
            print 'Error: Illegal timestamp'
            return None

        # Convert pose to numpy matrix
        curTrans = tft.translation_matrix([curPose.position.x, curPose.position.y, curPose.position.z])
        curRot = tft.quaternion_matrix([curPose.orientation.x, curPose.orientation.y ,curPose.orientation.z, curPose.orientation.w])
        curPoseMatrix = np.dot(curTrans, curRot)

        prevTrans = tft.translation_matrix([prevPose.position.x, prevPose.position.y, prevPose.position.z])
        prevRot = tft.quaternion_matrix([prevPose.orientation.x, prevPose.orientation.y ,prevPose.orientation.z, prevPose.orientation.w])
        prevPoseMatrix = np.dot(prevTrans, prevRot)
        
        deltaPoseMatrix = np.linalg.inv(prevPoseMatrix).dot(curPoseMatrix)
        deltaAngles = euler_from_matrix(deltaPoseMatrix[:3,:3])
        deltaPos = deltaPoseMatrix[:3,3]

        #deltaAngles = np.array([a / dt for a in deltaAngles])
        deltaPos = deltaPos / dt
        #deltaPoseMatrix = euler_matrix(deltaAngles[0], deltaAngles[1], deltaAngles[2])
        deltaPoseMatrix[:3,3] = deltaPos

        gpList, sysList = self.predict(armName, [curPoseMatrix], [deltaPoseMatrix])
        return tfx.pose(gpList[0], frame=curPose.frame, stamp=curPose.stamp), tfx.pose(sysList[0], frame=curPose.frame, stamp=curPose.stamp)
    def publish_state(self, t):
        state_msg = TransformStamped()
        t_ned_imu = tft.translation_matrix(self.model.get_position())
        R_ned_imu = tft.quaternion_matrix(self.model.get_orientation())
        T_ned_imu = tft.concatenate_matrices(t_ned_imu, R_ned_imu)
        #rospy.loginfo("T_ned_imu = \n" + str(T_ned_imu))
        if self.T_imu_vicon is None:
            # grab the static transform from imu to vicon frame from param server:
            frames = rospy.get_param("frames", None)
            ypr = frames['vicon_to_imu']['rotation']
            q_vicon_imu = tft.quaternion_from_euler(*[radians(x) for x in ypr], axes='rzyx') # xyzw
            R_vicon_imu = tft.quaternion_matrix(q_vicon_imu)
            t_vicon_imu = tft.translation_matrix(frames['vicon_to_imu']['translation'])
#            rospy.loginfo(str(R_vicon_imu))
#            rospy.loginfo(str(t_vicon_imu))
            self.T_vicon_imu = tft.concatenate_matrices(t_vicon_imu, R_vicon_imu)
            self.T_imu_vicon = tft.inverse_matrix(self.T_vicon_imu)
            self.T_enu_ned = tft.euler_matrix(radians(90), 0, radians(180), 'rzyx')
            rospy.loginfo(str(self.T_enu_ned))
            rospy.loginfo(str(self.T_imu_vicon))
            #rospy.loginfo(str(T_vicon_imu))
        # we have /ned -> /imu, need to output /enu -> /vicon:
        T_enu_vicon = tft.concatenate_matrices(self.T_enu_ned, T_ned_imu, self.T_imu_vicon )
        state_msg.header.stamp  = t
        state_msg.header.frame_id = "/enu"
        state_msg.child_frame_id = "/simflyer1/flyer_vicon"
        stt = state_msg.transform.translation
        stt.x, stt.y, stt.z = T_enu_vicon[:3,3]
        stro = state_msg.transform.rotation
        stro.x, stro.y, stro.z, stro.w = tft.quaternion_from_matrix(T_enu_vicon)
        
        self.pub_state.publish(state_msg)
	def object_pose_callback(self,msg):
		print 'object pose cb'
		(tf_trans,tf_rot) = self.pr2.tf_listener.lookupTransform(msg.header.frame_id,self.base_frame,rospy.Time(0))
		msg_tf = numpy.mat(numpy.dot(tft.translation_matrix(tf_trans),tft.quaternion_matrix(tf_rot)))
		
		q = numpy.array([msg.pose.orientation.x,msg.pose.orientation.y,msg.pose.orientation.z,msg.pose.orientation.w])
		p = numpy.array([msg.pose.position.x,msg.pose.position.y,msg.pose.position.z])
		rot = numpy.mat(tft.quaternion_matrix(q))
		trans = numpy.mat(tft.translation_matrix(p))
		self.object_pose = msg_tf * trans * rot
    def tag_callback(self, msg_tag):
        # Listen for the transform of the tag in the world
        avg = PoseAverage.PoseAverage()
        for tag in msg_tag.detections:
            try:
                Tt_w = self.tfbuf.lookup_transform(self.world_frame, "tag_{id}".format(id=tag.id), rospy.Time(), rospy.Duration(1))
                Mtbase_w=self.transform_to_matrix(Tt_w.transform)
                Mt_tbase = tr.concatenate_matrices(tr.translation_matrix((0,0,0.17)), tr.euler_matrix(0,0,np.pi))
                Mt_w = tr.concatenate_matrices(Mtbase_w,Mt_tbase)
                Mt_r=self.pose_to_matrix(tag.pose)
                Mr_t=np.linalg.inv(Mt_r)
                Mr_w=np.dot(Mt_w,Mr_t)
                Tr_w = self.matrix_to_transform(Mr_w)
                avg.add_pose(Tr_w)
                self.publish_sign_highlight(tag.id)
            except(tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as ex:
                rospy.logwarn("Error looking up transform for tag_%s", tag.id)
                rospy.logwarn(ex.message)

        Tr_w =  avg.get_average() # Average of the opinions

        # Broadcast the robot transform
        if Tr_w is not None:
            # Set the z translation, and x and y rotations to 0
            Tr_w.translation.z = 0
            rot = Tr_w.rotation
            rotz=tr.euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))[2]
            (rot.x, rot.y, rot.z, rot.w) = tr.quaternion_from_euler(0, 0, rotz)
            T = TransformStamped()
            T.transform = Tr_w
            T.header.frame_id = self.world_frame
            T.header.stamp = rospy.Time.now()
            T.child_frame_id = self.duckiebot_frame
            self.pub_tf.publish(TFMessage([T]))
            self.lifetimer = rospy.Time.now()
	def localCb(self, data):
		self.localPose.setPoseStamped(data)
		
		if(not (self.enuTickPose.isNone() or self.offset is None)):
			t = self.localPose.getTranslation()
			q = self.enuTickPose.getQuaternion()
			
			q = quaternion_matrix(q)
			t = translation_matrix(t)
		
			self.localEnuPose.setMatrix(numpy.dot(q,t))
			
			t = self.localEnuPose.getTranslation()
			
			t[0] -= self.offset[0]
			t[1] -= self.offset[1]
			t[2] -= self.offset[2]
			
			q = self.localEnuPose.getQuaternion()
			
			self.localEnuPose.setTranslationQuaternion(t, q)
			
			p = PointStamped()
			p.point.x = self.offset[0]
			p.point.y = self.offset[1]
			p.point.z = self.offset[2]
			
			p.header = Header(0, rospy.rostime.get_rostime(), "world")
			
			self.offsetPub.publish(p)
			
			self.localEnuPub.publish(self.localEnuPose.getPoseStamped())
	def get_Tmatrix(self,disableinvert=False):
		Tmatrix = dot(tft.translation_matrix([self.x_offset,self.y_offset,self.z_offset]),array(tb_to_tf(self.yaw, self.pitch, self.roll, rad=True)))
		
		if not disableinvert and self.invert:
			Tmatrix = tft.inverse_matrix(Tmatrix)
		
		return Tmatrix
示例#7
0
 def _makePreGraspPose(self, boxMat, axis):
     if axis==0: #x axis
         alpha = 0
         gamma = 0
     else: #y axis
         alpha = pi/2
         gamma = -pi/2
     ik_request = PositionIKRequest()
     ik_request.ik_link_name = self.toolLinkName
     with self._joint_state_lock:
         ik_request.ik_seed_state.joint_state = copy.deepcopy(self._joint_states)
     ik_request.pose_stamped = PoseStamped()
     ik_request.pose_stamped.header.stamp = rospy.Time.now()
     ik_request.pose_stamped.header.frame_id = self.frameID
     beta = pi/2
     while beta < 3*pi/2:
         rotationMatrix = transformations.euler_matrix(alpha, beta, gamma, 'rzyz')
         distance = self.preGraspDistance + self.gripperFingerLength
         preGraspMat = transformations.translation_matrix([0,0,-distance])
         fullMat = transformations.concatenate_matrices(boxMat, rotationMatrix, preGraspMat)
         p = transformations.translation_from_matrix(fullMat)
         q = transformations.quaternion_from_matrix(fullMat)
         print "trying {} radians".format(beta)
         try:
             self._ik_server(ik_request, Constraints(), rospy.Duration(5.0))
         except rospy.service.ServiceException:
             beta += 0.1
         else:
             if ik_resp.error_code.val > 0:
                 return beta
             else:
                 #print ik_resp.error_code.val
                 beta += 0.01
     rospy.logerr('No way to pick this up. All IK solutions failed.')
     return 7 * pi / 6
  def __init__(self, side='r', tf_listener = None, tf_broadcaster = None, ctrl_mng=None, moveit_cmdr=None):

    if side == "right" or side == "r":
      self.arm = "right_arm"
      self.side = "r"
    elif side == 'left' or side == 'l':
      self.arm = "left_arm"
      self.side = "l"
    else:
      rospy.logerr("Side " + side + " is not supported")
      raise

    self.tf_listener = tf.TransformListener() if not tf_listener else tf_listener
    #init a TF transform broadcaster for the object frame
    self.tf_broadcaster = tf.TransformBroadcaster() if not tf_broadcaster else tf_broadcaster
    self.ctrl_mng = ControllerManagerClient() if not ctrl_mng else ctrl_mng

    #the pretouch sensor frame
    self.pretouch_sensor_frame_id = '/r_gripper_l_finger_tip_pretouch_frame'

    #Gripper client and open the gripper
    rospy.loginfo('open the ' + side + '_gripper')
    self.gripper = Gripper(self.arm)
    self.gripper.open()    

    #controller_magager_client
    self.ctrl_mng = ControllerManagerClient() if not ctrl_mng else ctrl_mng
    self.ctrl_mng.switch_controllers([self.side+'_arm_controller'], [self.side+'_cart'])
    self.count = 0
    #PoseStamped command publisher for jt controller
    self.pose_pub = rospy.Publisher('/r_cart/command_pose', PoseStamped)
    self.eef_frame = self.side + "_wrist_roll_link"
    self.reference_frame = "base_link"

    # MoveIt! Commander
    self.moveit_cmd = MoveGroupCommander(self.arm) if not moveit_cmdr else moveit_cmdr
    self.moveit_cmd.set_pose_reference_frame(self.reference_frame)
    self.move_arm_to_side() # Move the arm to the sidea
    self.step_size = 0.0002
    self.move_step_mat = np.matrix(translation_matrix(np.array([self.step_size, 0.0, 0.0])))

    #pickup action server
    #self.pickup_client = actionlib.SimpleActionClient('/object_manipulator/object_manipulator_pickup', PickupAction)
    #rospy.loginfo("waiting for PickupAction Server")
    #self.pickup_client.wait_for_server()
    #rospy.loginfo("PickupAction Server Connected")

    #service client to /add_point service
    self.add_point_client = rospy.ServiceProxy('add_point', AddPoint)

    #draw_functions object for drawing stuff in rviz
    self.draw_functions = draw_functions.DrawFunctions('pretouch_executor_markers')

    #the transform from wrist_frame to the center of the fingers
    self.gripper_to_r_finger = np.eye(4)
    self.gripper_to_r_finger[0][3] = 0.1615 #x-translation from wrist_frame (0.1615)
    self.gripper_to_r_finger[1][3] = -0.0400 #y-translation from wrist_frame (0.0425)    
    self.gripper_to_l_finger = np.eye(4)
    self.gripper_to_l_finger[0][3] = 0.1615 #x-translation from wrist_frame (0.1615)
    self.gripper_to_l_finger[1][3] = 0.0400 #y-translation from wrist_frame (-0.0425)
示例#9
0
 def transformation_matrix(rot_angle, rot_dir, trans):
     # print "rot_angle: ", rot_angle
     # print "rot_dir: ", rot_dir
     # print "trans: ", trans
     return tfs.concatenate_matrices(
         tfs.rotation_matrix(rot_angle, rot_dir),
         tfs.translation_matrix(trans))
示例#10
0
  def adjustPretouchPose(self, pose):    
    #transform the pose from fingertip_frame to X_wrist_roll_link
    r_gripper_l_fingertip_to_r_wrist_roll_link_mat = np.matrix(translation_matrix(np.array([-0.18, -0.049, 0.0])))
  
    current_pose = pose
    pretouch_mat = pose_to_mat(current_pose)
    pretouch_wrist_roll_pose = mat_to_pose(pretouch_mat * r_gripper_l_fingertip_to_r_wrist_roll_link_mat)

    #TODO Query IK solution for that pretouch pose
    # while no IK solution, rotate w.r.t Z-axis toward the robot (positive rotation)
    
    #print 'ik solution for l_probe: ', self.getConstraintAwareIK(l_probe.pretouch_pose)
    #print 'ik solution for r_probe: ', self.getConstraintAwareIK(r_probe.pretouch_pose)

		#select one here...
    #(joint_position, have_solution) = self.getIK(pretouch_wrist_roll_pose)
    (joint_position, have_solution) = self.getConstraintAwareIK(pretouch_wrist_roll_pose)
    
    '''
    if not have_solution:
      #rotate the pose toward the robot
      #rot_mat = rotation_matrix(l_rot_z, np.array([0,0,1])) #0.175 radians = 10 degree
      print 'No IK solution found'
    '''
    return (pretouch_wrist_roll_pose, joint_position, have_solution)
示例#11
0
def matrix_from_StampedTransform(msg):
    T = msg.transform
    trans = [T.translation.x, T.translation.y, T.translation.z]
    quat = [T.rotation.x, T.rotation.y, T.rotation.z, T.rotation.w]

    return tfs.concatenate_matrices(
        tfs.translation_matrix(trans),
        tfs.quaternion_matrix(quat))
示例#12
0
 def getBaseTransform(self):
   now = rospy.Time()
   self.tf_listener.waitForTransform('/base_link','/odom_combined', now,
                                     rospy.Duration(10))
   pos, quat = self.tf_listener.lookupTransform('/odom_combined','/base_link', now)
   trans = transf.translation_matrix(pos)
   rot = transf.quaternion_matrix(quat)
   return np.dot(trans, rot)
示例#13
0
    def predictSinglePose(self, pose, arm_side):
        # Convert pose to numpy matrix
        p = tft.translation_matrix([pose.position.x,pose.position.y,pose.position.z])
        rot = tft.quaternion_matrix([pose.orientation.x,pose.orientation.y,pose.orientation.z,pose.orientation.w])
        input_pose = np.dot(p,rot)

        gpList, sysList = self.predict([input_pose], arm_side)
        return tfx.pose(gpList[0], frame=pose.frame, stamp=pose.stamp), tfx.pose(sysList[0], frame=pose.frame, stamp=pose.stamp)
示例#14
0
    def _makeGraspPath(self, preGraspGoal):
        '''Given a pregrasp MoveArmGoal message, generate a MoveArmGoal message to perform the final
        approach to the object to grasp it.'''
        
        graspGoal = MoveArmGoal()
        graspGoal.planner_service_name = self.plannerServiceName
        motion_plan_request = MotionPlanRequest()
        motion_plan_request.group_name = self.armGroupName
        motion_plan_request.num_planning_attempts = 25
        motion_plan_request.planner_id = ""
        motion_plan_request.allowed_planning_time = rospy.Duration(5,0)
        
        #Orientation constraint is the same as for pregrasp
        motion_plan_request.goal_constraints.orientation_constraints = copy.deepcopy(preGraspGoal.motion_plan_request.goal_constraints.orientation_constraints)
        #motion_plan_request.goal_constraints.orientation_constraints[0].orientation = Quaternion(0.656778, 0.261999, 0.648401, -0.282093) #stow orientation for debug
        graspGoal.motion_plan_request = motion_plan_request
        
        #Translate from pregrasp position to final position in a roughly straight line
        o = motion_plan_request.goal_constraints.orientation_constraints[0].orientation
        p = preGraspGoal.motion_plan_request.goal_constraints.position_constraints[0].position
        preGraspMat = transformations.quaternion_matrix([o.x,o.y,o.z,o.w])
        preGraspMat[:3, 3] = [p.x,p.y,p.z]
        distance = self.preGraspDistance * .5 + self.gripperFingerLength * .5
        graspTransMat = transformations.translation_matrix([0,0,distance])
        graspMat = transformations.concatenate_matrices(preGraspMat, graspTransMat)
        #print preGraspMat
        #print graspTransMat
        #print graspMat
        p = transformations.translation_from_matrix(graspMat)
       
        #Publish grasp transform for visualization
        self._tf_broadcaster.sendTransform(
                (p[0],p[1],p[2]),
                (o.x, o.y, o.x, o.w),
                motion_plan_request.goal_constraints.orientation_constraints[0].header.stamp,
                "grasp",
                motion_plan_request.goal_constraints.orientation_constraints[0].header.frame_id)
 
        pos_constraint = PositionConstraint()
        pos_constraint.header = motion_plan_request.goal_constraints.orientation_constraints[0].header
        pos_constraint.link_name = self.toolLinkName
        pos_constraint.position = Point(p[0],p[1],p[2])
        #pos_constraint.position = Point(-0.0644721, 0.609922, 0) #Stow position for debug
        pos_constraint.constraint_region_shape.type = Shape.SPHERE
        pos_constraint.constraint_region_shape.dimensions = [0.01]
        pos_constraint.weight = 1
        motion_plan_request.goal_constraints.position_constraints.append(pos_constraint)
        #TODO: Add path constraint to require a (roughly) cartesian move
        
        #Turn off collision operations between the gripper and all objects
        for collisionName in self.gripperCollisionNames:
            collisionOperation = CollisionOperation(collisionName, 
                                    CollisionOperation.COLLISION_SET_ALL,
                                    0.0,
                                    CollisionOperation.DISABLE)
            graspGoal.operations.collision_operations.append(collisionOperation)
        return graspGoal
示例#15
0
 def approach(self, approach_dist, step_size):
   self.ctrl_mng.switch_controllers([self.side+'_cart'], [self.side+'_arm_controller'])
   current_mat = self.current_mat
   move_step_mat = np.matrix(translation_matrix(np.array([step_size, 0.0, 0.0])))
   for i in range(int(approach_dist/step_size)):
     current_mat = current_mat * move_step_mat
     self.pose_pub.publish(stamp_pose(mat_to_pose(current_mat), self.reference_frame))
     rospy.sleep(0.1)    
   self.ctrl_mng.switch_controllers([self.side+'_arm_controller'], [self.side+'_cart'])
示例#16
0
def pose_to_matrix(pose_msg):
    ''' Convert a ros Transform message to a 4x4 homogeneous transform matrix '''
    rot = pose_msg.orientation
    q = np.array([rot.x, rot.y, rot.z, rot.w])
    pos = pose_msg.position
    t = np.array([pos.x, pos.y, pos.z])
    H_t = transformations.translation_matrix(t)
    H_r = transformations.quaternion_matrix(q)
    return np.matrix(np.dot(H_t, H_r))
示例#17
0
def transform_to_matrix(transform_msg):
    ''' Convert a ros Transform message to a 4x4 homogeneous transform matrix '''
    rot = transform_msg.rotation
    q = np.array([rot.x, rot.y, rot.z, rot.w])
    trans = transform_msg.translation
    t = np.array([trans.x, trans.y, trans.z])
    H_t = transformations.translation_matrix(t)
    H_r = transformations.quaternion_matrix(q)
    return np.matrix(np.dot(H_t, H_r))
示例#18
0
	def pose_callback(self,msg):
		#print 'got msg'
		frame_id = msg.header
		pose = msg.pose
		p = tft.translation_matrix([pose.position.x,pose.position.y,pose.position.z])
		rot = tft.quaternion_matrix([pose.orientation.x,pose.orientation.y,pose.orientation.z,pose.orientation.w])
		
		Tcam_to_cb = dot(p,rot)
		#print 'Tcam_to_cb',Tcam_to_cb
		
		#Tworld_to_cb = dot(tft.translation_matrix([self.x_offset,self.y_offset,self.z_offset]),tft.euler_matrix(0,0,pi/2))
		Tworld_to_cb = dot(tft.translation_matrix([self.x_offset,self.y_offset,self.z_offset]),array(tb_to_mat(self.yaw, self.pitch, self.roll)))
		#print 'Tworld_to_cb',Tworld_to_cb
		
		if self.invert_tf:
			Tworld_to_cb = tft.inverse_matrix(Tworld_to_cb)
		
		Tworld_to_cam = dot(Tworld_to_cb,tft.inverse_matrix(Tcam_to_cb))
		#print 'Tworld_to_cam',Tworld_to_cam
		
		Tworld_to_cam_p = Tworld_to_cam[0:3,3]
		Tworld_to_cam_q = tft.quaternion_from_matrix(Tworld_to_cam)
		
		pub_msg = PoseStamped()
		pub_msg.header.stamp = msg.header.stamp
		pub_msg.header.frame_id = '/world'
		pub_msg.pose.position = Point(*(Tworld_to_cam_p.tolist()))
		pub_msg.pose.orientation = Quaternion(*(Tworld_to_cam_q.tolist()))
		
		#print pub_msg
		self.pub.publish(pub_msg)
		
		pub_cb_msg = PoseStamped()
		pub_cb_msg.header.stamp = msg.header.stamp
		pub_cb_msg.header.frame_id = '/world'
		pub_cb_msg.pose.position = Point(*(Tworld_to_cb[0:3,3].tolist()))
		pub_cb_msg.pose.orientation = Quaternion(*(tft.quaternion_from_matrix(Tworld_to_cb).tolist()))
		
		self.pub_cb.publish(pub_cb_msg)
		
		self.transform = Tworld_to_cam
		self.transform_frame = msg.header.frame_id
		
		self.br.sendTransform(Tworld_to_cam_p,Tworld_to_cam_q,msg.header.stamp,self.transform_frame,'/world')
示例#19
0
    def calc_transformation(self, name, relative_to=None):
        calc_from_joint = False
        if relative_to:
            if relative_to in self.urdf.link_map:
                parent_link_name = relative_to
            elif relative_to in self.urdf.joint_map:
                parent_link_name = self.urdf.joint_map[name].parent
                calc_from_joint = True
        else:
            parent_link_name = self.urdf.get_root()

        calc_to_joint = False
        if name in self.urdf.link_map:
            child_link_name = name
        elif name in self.urdf.joint_map:
            child_link_name = self.urdf.joint_map[name].child
            calc_to_joint = True

        chains = self.urdf.get_chain(parent_link_name, child_link_name,
                                     joints=True, links=True)
        if calc_from_joint:
            chains = chains[1:]
        if calc_to_joint:
            chains = chains[:-1]

        poses = []
        for name in chains:
            if name in self.urdf.joint_map:
                joint = self.urdf.joint_map[name]
                if joint.origin is not None:
                    poses.append(joint.origin)
            elif name in self.urdf.link_map:
                link = self.urdf.link_map[name]
                if link.visual is not None and link.visual.origin is not None:
                    poses.append(link.visual.origin)
        m = np.dot(T.translation_matrix((0,0,0)),
                   T.euler_matrix(0,0,0))
        for p in poses:
            n = np.dot(T.translation_matrix(tuple(p.xyz)),
                       T.euler_matrix(*tuple(p.rpy)))
            m = np.dot(m, n)
        t = T.translation_from_matrix(m)
        q = T.quaternion_from_matrix(m)
        return tuple(t), (q[3], q[0], q[1], q[2])
示例#20
0
 def lift(self, grasp, lift_dist, step_size):
   self.ctrl_mng.switch_controllers([self.side+'_cart'], [self.side+'_arm_controller'])
   current_mat = pose_to_mat(grasp.grasp_pose.pose)
   move_step_mat = np.matrix(translation_matrix(np.array([0.0, 0.0, -step_size])))
   for i in range(int(lift_dist/step_size)):
     #current_mat = current_mat * move_step_mat
     current_mat[2,3] += step_size
     self.pose_pub.publish(stamp_pose(mat_to_pose(current_mat), self.reference_frame))
     rospy.sleep(0.05)    
   self.ctrl_mng.switch_controllers([self.side+'_arm_controller'], [self.side+'_cart'])
示例#21
0
def get_trans_to_trans(trans1, rot1, trans2, rot2):
    quat_mat1 = transformations.quaternion_matrix(rot1)
    quat_mat2 = transformations.quaternion_matrix(rot2)
    trans_mat1 = transformations.translation_matrix(trans1)
    trans_mat2 = transformations.translation_matrix(trans2)
    mat1 = transformations.concatenate_matrices(trans_mat1, quat_mat1)
    mat2 = transformations.concatenate_matrices(trans_mat2, quat_mat2)
    rel_mat = numpy.linalg.inv(mat1).dot(mat2)
    rel_trans = transformations.translation_from_matrix(rel_mat)
    rel_rot = transformations.quaternion_from_matrix(rel_mat)
    rel_rot = [rel_rot[3], rel_rot[0], rel_rot[1], rel_rot[2]]
    print "rel_pose: [%f, %f, %f, %f, %f, %f, %f]" % (
        rel_trans[0],
        rel_trans[1],
        rel_trans[2],
        rel_rot[0],
        rel_rot[1],
        rel_rot[2],
        rel_rot[3],
    )
示例#22
0
文件: listener.py 项目: rll/graveyard
    def fromTranslationRotation(self, translation, rotation):
        """
        :param translation: translation expressed as a tuple (x,y,z)
        :param rotation: rotation quaternion expressed as a tuple (x,y,z,w)
        :return: a :class:`numpy.matrix` 4x4 representation of the transform
        :raises: any of the exceptions that :meth:`~tf.Transformer.lookupTransform` can raise
        
        Converts a transformation from :class:`tf.Transformer` into a representation as a 4x4 matrix.
        """

        return numpy.dot(transformations.translation_matrix(translation), transformations.quaternion_matrix(rotation))
示例#23
0
 def update_pos(self, icr_x, icr_y, icr_z):
     x = icr_x * self.pos_increment
     y = icr_y * self.pos_increment
     z = icr_z * self.pos_increment
     if not self.invert_icrement:
         self.x_offset += x
         self.y_offset += y
         self.z_offset += z
     else:
         p = tft.inverse_matrix(tft.translation_matrix([x, y, z]))
         self.set_from_matrix(dot(self.get_Tmatrix(), p))
示例#24
0
    def _makeMove(self):
        '''Move forward a small distance'''
        print "Making move goal"
        graspGoal = MoveArmGoal()
        graspGoal.planner_service_name = self.plannerServiceName
        motion_plan_request = MotionPlanRequest()
        motion_plan_request.group_name = self.armGroupName
        motion_plan_request.num_planning_attempts = 5
        motion_plan_request.planner_id = ""
        motion_plan_request.allowed_planning_time = rospy.Duration(5,0)
        #Orientation constraint is the same as for previous
        #Create Orientation constraint object
        o_constraint = OrientationConstraint()
        o_constraint.header.frame_id = self.frameID
        o_constraint.header.stamp = rospy.Time.now()
        o_constraint.link_name = self.toolLinkName
        o_constraint.orientation = Quaternion(0.656788, 0.261971, 0.648416, -0.282059)
        o_constraint.absolute_roll_tolerance = 0.04
        o_constraint.absolute_pitch_tolerance = 0.04
        o_constraint.absolute_yaw_tolerance = 0.04
        o_constraint.weight = 1
        motion_plan_request.goal_constraints.orientation_constraints.append(o_constraint)
        
        #Translate from pregrasp position to final position in a roughly straight line
        o = o_constraint.orientation
        p = Point(-0.064433, 0.609915, 0)
        preGraspMat = transformations.quaternion_matrix([o.x,o.y,o.z,o.w])
        preGraspMat[:3, 3] = [p.x,p.y,p.z]
        distance = .3#self.preGraspDistance + self.gripperFingerLength/2
        graspTransMat = transformations.translation_matrix([0,0,distance])
        graspMat = transformations.concatenate_matrices(preGraspMat, graspTransMat)
        #print preGraspMat
        #print graspTransMat
        #print graspMat
        p = transformations.translation_from_matrix(graspMat)
       
        #Publish grasp transform for visualization
        self._tf_broadcaster.sendTransform(
                (p[0],p[1],p[2]),
                (o.x, o.y, o.x, o.w),
                o_constraint.header.stamp,
                "grasp",
                o_constraint.header.frame_id)
 
        pos_constraint = PositionConstraint()
        pos_constraint.header = o_constraint.header
        pos_constraint.link_name = self.toolLinkName
        pos_constraint.position = Point(p[0],p[1],p[2])
        pos_constraint.constraint_region_shape.type = Shape.SPHERE
        pos_constraint.constraint_region_shape.dimensions = [0.01]#[0.01, 0.01, 0.01]
        pos_constraint.weight = 1
        motion_plan_request.goal_constraints.position_constraints.append(pos_constraint)
        graspGoal.motion_plan_request = motion_plan_request
        return graspGoal
示例#25
0
    def run(self):
        r = rospy.Rate(50)
        while not rospy.is_shutdown():
            try:
                common_link = "/base_link"
                c_T_rgrip = tfu.transform(common_link, "/r_gripper_tool_frame", self.tflistener)
                c_T_lgrip = tfu.transform(common_link, "/l_gripper_tool_frame", self.tflistener)
                gripper_right_c = np.matrix(tr.translation_from_matrix(c_T_rgrip * tr.translation_matrix([0, 0, 0.0])))
                gripper_left_c = np.matrix(tr.translation_from_matrix(c_T_lgrip * tr.translation_matrix([0, 0, 0.0])))
                look_at_point_c = ((gripper_right_c + gripper_left_c) / 2.0).A1.tolist()

                g = PointHeadGoal()
                g.target.header.frame_id = "/base_link"
                g.target.point = Point(*look_at_point_c)
                g.min_duration = rospy.Duration(1.0)
                g.max_velocity = 10.0
                self.head_client.send_goal(g)
                self.head_client.wait_for_result(rospy.Duration(1.0))
                r.sleep()
            except tf.LookupException, e:
                print e
示例#26
0
 def pose_relative_trans(self, pose, x=0., y=0., z=0.):
     """Return a pose translated relative to a given pose."""
     ps = deepcopy(pose)
     M_trans = tft.translation_matrix([x,y,z])
     q_ps = [ps.pose.orientation.x, ps.pose.orientation.y, ps.pose.orientation.z, ps.pose.orientation.w]
     M_rot = tft.quaternion_matrix(q_ps)
     trans = np.dot(M_rot,M_trans)
     ps.pose.position.x += trans[0][-1]
     ps.pose.position.y += trans[1][-1]
     ps.pose.position.z += trans[2][-1]
     #print ps
     return ps
示例#27
0
    def __init__(self):
        """ 
              ^ L2                   R2
              |  |                   |
            50mm |                   |
              |  |                   |
              - L1                   R1
              |  |                   |
            250mm|                   |
              |  | wh             wh |
              V  | eel---Robot---eel |
gmapping
                   <-----183mm----->  
                 <------250mm-------->  
            x
            ^
            |
        y<--O is at center of the "Robot" above, coaxis with the wheel

        Burger dimension getting from 
        https://emanual.robotis.com/assets/images/platform/turtlebot3/hardware_setup/turtlebot3_dimension1.png
        """
        # List of sensor
        self.sensors = dict()
        self.sensor_total = 2
        self.sensors['LG'] =NavSensor(tfs.translation_matrix([0.3, 0.1, 0.0]))
        self.sensors['RG'] =NavSensor(tfs.translation_matrix([0.3, -0.1, 0.0]))
        # Gen left sensor list
        self.gen_sensor_list('L', (0,0.1), (0.4, 0.1), 5)
        self.gen_sensor_list('L', (0,0.2), (0.4, 0.2), 5)
        # Gen right sensor list
        self.gen_sensor_list('R', (0,-0.1), (0.4, -0.1), 5)
        self.gen_sensor_list('R', (0,-0.2), (0.4, -0.2), 5)
        # Threhold
        self.APPROACHABLE_THRESHOLD = 0.4
        # Goal to run to when has obstacle

        # Publisher to visualize
        self.marker_pub = rospy.Publisher('tran0966/nav_sensors_array', MarkerArray)
示例#28
0
 def get_world_point_value_in_gmap(self, point):
     """
         point is of type point
         return the value of the gmap cell at the specified location in the real world
     """
     if self.raw_data != None:
         point_tf = tfs.translation_matrix([point.x, point.y, point.z])
         point_gmap_pos = tfs.translation_from_matrix(np.dot(tfs.inverse_matrix(self.tf), point_tf))
         x = point_gmap_pos[0]
         y = point_gmap_pos[1]
         info = self.raw_data.info
         cell_index = int((x//info.resolution) * info.width + (y//info.resolution))
         return self.raw_data.data[cell_index]
示例#29
0
文件: ARTIV.py 项目: yooona1214/ARTIV
def speed_meter(point):


    # Publish a sphere using a numpy transform matrix
    T = transformations.translation_matrix((point.x,point.y,point.z+0.5))
    scale = Vector3(0.5,0.5,0.5) # diameter
    color = [1,0,0] # list of RGB values (red)
    markers.publishSphere(T, color, scale, 5.0) # pose, color, scale, lifetime

    center = Point(point.x, point.y, 0.8)
    P = Pose(center,Quaternion(0,0,0,1))
    scale = Vector3(0.15,0.15,0.2)
    markers.publishText(P, '30km/h Speed Limit!', 'red', scale, 5.0) # pose, text, color, scale, lifetime
示例#30
0
    def convert_pose_inverse_transform(self, pose):
        """ This is a helper method to invert a transform (this is built into
			the tf C++ classes, but ommitted from Python) """
        transform = t.concatenate_matrices(
            t.translation_matrix(
                [pose.position.x, pose.position.y, pose.position.z]),
            t.quaternion_matrix([
                pose.orientation.x, pose.orientation.y, pose.orientation.z,
                pose.orientation.w
            ]))
        inverse_transform_matrix = t.inverse_matrix(transform)
        return (t.translation_from_matrix(inverse_transform_matrix),
                t.quaternion_from_matrix(inverse_transform_matrix))
示例#31
0
    def move_pose_relative(self, dpose, velocity):
        if velocity is None:
            self.moveg.set_max_velocity_scaling_factor(self.velocity)
        else:
            self.moveg.set_max_velocity_scaling_factor(velocity)

        pose = pose_to_list(self.moveg.get_current_pose().pose)

        t_xform = np.dot(tr.translation_matrix(pose[0:3]),
                         tr.quaternion_matrix(pose[3:]))
        s_xform = np.dot(tr.translation_matrix(dpose[0:3]),
                         tr.euler_matrix(*dpose[3:]))

        xform = np.dot(t_xform, s_xform)
        pose = tr.translation_from_matrix(xform).tolist() + list(
            tr.euler_from_matrix(xform))

        wp = [list_to_pose(pose)]  # waypoints
        self.moveg.set_start_state(self.robot.get_current_state())
        (plan,
         fraction) = self.moveg.compute_cartesian_path(wp,
                                                       eef_step=0.01,
                                                       jump_threshold=0.0)
        if fraction < 1.0:
            self.last_error_msg = "No motion plan found."
            return False

        v = self.velocity if velocity is None else velocity
        plan = self.moveg.retime_trajectory(self.robot.get_current_state(),
                                            plan, v)

        try:
            self.moveg.execute(plan, wait=True)
            self.moveg.stop()
        except MoveItCommanderException as e:
            self.last_error_msg = str(e)
            return False

        return True
示例#32
0
    def __init__(self, **kwargs):
        super(RBOHand2Kuka, self).__init__()

        #old parameters below - must be updated to new convention!
        self['surface_grasp']['initial_goal'] = np.array([
            -0.05864322834179703, 0.4118988657714642, -0.05864200146127985,
            -1.6887810963180838, -0.11728653060066829, -0.8237944986945402, 0
        ])
        self['surface_grasp']['pregrasp_pose'] = tra.translation_matrix(
            [0, 0, -0.2])
        self['surface_grasp']['hand_pose'] = tra.concatenate_matrices(
            tra.translation_matrix([0, 0, 0]),
            tra.rotation_matrix(math.radians(0.), [0, 0, 1]))
        self['surface_grasp']['downward_force'] = 7.
        self['surface_grasp']['valve_pattern'] = (np.array([[0.,
                                                             4.1], [0., 0.1],
                                                            [0., 5.], [0., 5.],
                                                            [0., 2.],
                                                            [0., 3.5]]),
                                                  np.array([[1, 0]] * 6))

        self['wall_grasp']['pregrasp_pose'] = tra.translation_matrix(
            [0.05, 0, -0.2])
        self['wall_grasp']['table_force'] = 7.0
        self['wall_grasp']['sliding_speed'] = 0.1
        self['wall_grasp']['up_speed'] = 0.1
        self['wall_grasp']['down_speed'] = 0.1
        self['wall_grasp']['wall_force'] = 10.0
        self['wall_grasp']['angle_of_attack'] = 1.0  #radians
        self['wall_grasp']['object_lift_time'] = 4.5

        self['edge_grasp']['initial_goal'] = np.array([
            -0.05864322834179703, 0.4118988657714642, -0.05864200146127985,
            -1.6887810963180838, -0.11728653060066829, -0.8237944986945402, 0
        ])
        self['edge_grasp']['pregrasp_pose'] = tra.translation_matrix(
            [0.2, 0, 0.4])
        self['edge_grasp']['hand_object_pose'] = tra.concatenate_matrices(
            tra.translation_matrix([0, 0, 0.05]),
            tra.rotation_matrix(math.radians(10.), [1, 0, 0]),
            tra.euler_matrix(0, 0, -math.pi / 2.))
        self['edge_grasp']['grasp_pose'] = tra.concatenate_matrices(
            tra.translation_matrix([0, -0.05, 0]),
            tra.rotation_matrix(math.radians(10.), [1, 0, 0]),
            tra.euler_matrix(0, 0, -math.pi / 2.))
        self['edge_grasp']['postgrasp_pose'] = tra.translation_matrix(
            [0, 0, -0.1])
        self['edge_grasp']['downward_force'] = 4.0
        self['edge_grasp']['sliding_speed'] = 0.04
        self['edge_grasp']['valve_pattern'] = (np.array([[0, 0], [0,
                                                                  0], [1, 0],
                                                         [1, 0], [1, 0],
                                                         [1, 0]]),
                                               np.array([[0, 3.0]] * 6))
示例#33
0
def matrix_from_tf(transform):
    if transform._type == 'geometry_msgs/TransformStamped':
        transform = transform.transform

    trans = (transform.translation.x, transform.translation.y,
             transform.translation.z)
    quat_ = (transform.rotation.x, transform.rotation.y, transform.rotation.z,
             transform.rotation.w)

    tmat = translation_matrix(trans)
    qmat = quaternion_matrix(quat_)

    return np.dot(tmat, qmat)
示例#34
0
 def approach(self, approach_dist, step_size):
     self.ctrl_mng.switch_controllers([self.side + '_cart'],
                                      [self.side + '_arm_controller'])
     current_mat = self.current_mat
     move_step_mat = np.matrix(
         translation_matrix(np.array([step_size, 0.0, 0.0])))
     for i in range(int(approach_dist / step_size)):
         current_mat = current_mat * move_step_mat
         self.pose_pub.publish(
             stamp_pose(mat_to_pose(current_mat), self.reference_frame))
         rospy.sleep(0.1)
     self.ctrl_mng.switch_controllers([self.side + '_arm_controller'],
                                      [self.side + '_cart'])
示例#35
0
 def computeTransform(self):
     if self.pose is None:
         return
     robotXyz = numpy.array(translation_from_matrix(self.pose))[:3]
     robotQuat = numpy.array(quaternion_from_matrix(self.pose))
     (r, p, yaw) = euler_from_quaternion(robotQuat)
     robotQuat = quaternion_from_euler(0.0, 0.0, yaw) 
     m = PoseWithCovarianceStamped()
     m.header.frame_id = self.mapFrame
     m.header.stamp = self.imageTime
     m.pose.pose.orientation.x = robotQuat[0]
     m.pose.pose.orientation.y = robotQuat[1]
     m.pose.pose.orientation.z = robotQuat[2]
     m.pose.pose.orientation.w = robotQuat[3]
     m.pose.pose.position.x = robotXyz[0]
     m.pose.pose.position.y = robotXyz[1]
     m.pose.pose.position.z = robotXyz[2]
     """
     These values are designed to work with robot_localization.
     See http://wiki.ros.org/robot_localization/Tutorials/Migration%20from%20robot_pose_ekf
     """
     m.pose.covariance = [0.01,  0,     0,      0,     0,     0,
                          0,     0.01,  0,      0,     0,     0,
                          0,     0,     0.01,   0,     0,     0,
                          0,     0,     0,      0.01,  0,     0,
                          0,     0,     0,      0,     0.01,  0,
                          0,     0,     0,      0,     0,     0.01]
     self.posePub.publish(m)
     if self.odomFrame != "":
         try: 
             if self.imageTime is None:
                 rospy.logerr("imageTime is bogus!")
             odomt, odomr = self.lr.lookupTransform(self.poseFrame, self.odomFrame, self.imageTime)
             odom = numpy.dot(translation_matrix((odomt[0], odomt[1], odomt[2])),
                              quaternion_matrix((odomr[0], odomr[1], odomr[2], odomr[3])))
             pose = numpy.dot(self.pose, odom)
         except:
             traceback.print_exc()
             rospy.logerr("Unable to lookup transfrom from odom to robot (%s to %s)" % \
                          (self.poseFrame, self.odomFrame))
             return
     else:
         pose = self.pose
     self.publishLock.acquire()
     self.robotXyz = numpy.array(translation_from_matrix(pose))[:3]
     robotQuat = numpy.array(quaternion_from_matrix(pose))
     (r, p, yaw) = euler_from_quaternion(robotQuat)
     self.robotQuat = quaternion_from_euler(0.0, 0.0, yaw) 
     self.robotYaw = yaw
     self.publishLock.release()
     self.publishTransform()
def odomVisualPublisher(v, th, time):
    global last_time, voX, voY, voT
    current_time = time

    dt = (current_time.to_sec() - last_time.to_sec())
    voX += v * math.cos(voT) * dt
    voY += v * math.sin(voT) * dt
    voT += th * dt

    msg = Odometry()
    msg.header.stamp = current_time
    msg.pose.pose.position = Point(voX, voY, 0.0)

    tx, ty, tz, tw = tf.transformations.quaternion_from_euler(0.0, 0.0, voT)
    msg.pose.pose.orientation = Quaternion(tx, ty, tz, tw)
    pub1.publish(msg)

    pos = msg.pose.pose
    inversed_transform = t.concatenate_matrices(
        t.translation_matrix((pos.position.x, pos.position.y, pos.position.z)),
        t.quaternion_matrix((pos.orientation.x, pos.orientation.y,
                             pos.orientation.z, pos.orientation.w)))
    inversed_transform = t.inverse_matrix(inversed_transform)

    (position1,
     orientation1) = tfListener.lookupTransform("/odom", "/base_footprint",
                                                rospy.Time(0))
    inversed_transform_1 = t.concatenate_matrices(
        t.translation_matrix(position1), t.quaternion_matrix(orientation1))
    inversed_transform_1 = t.inverse_matrix(inversed_transform_1)

    multiply = np.dot(inversed_transform, inversed_transform_1)
    tran = t.translation_from_matrix(multiply)
    rot = t.quaternion_from_matrix(multiply)

    antena.sendTransform(tran, rot, current_time, 'odom_visual',
                         'base_footprint')
    last_time = current_time
示例#37
0
 def lift(self, grasp, lift_dist, step_size):
     self.ctrl_mng.switch_controllers([self.side + '_cart'],
                                      [self.side + '_arm_controller'])
     current_mat = pose_to_mat(grasp.grasp_pose.pose)
     move_step_mat = np.matrix(
         translation_matrix(np.array([0.0, 0.0, -step_size])))
     for i in range(int(lift_dist / step_size)):
         #current_mat = current_mat * move_step_mat
         current_mat[2, 3] += step_size
         self.pose_pub.publish(
             stamp_pose(mat_to_pose(current_mat), self.reference_frame))
         rospy.sleep(0.05)
     self.ctrl_mng.switch_controllers([self.side + '_arm_controller'],
                                      [self.side + '_cart'])
示例#38
0
def get_object_pose_as_matrix(object_name):
    global object_poses
    obj = object_poses[object_name]
    obj_trans = (obj.pose.pose.pose.position.x, obj.pose.pose.pose.position.y, obj.pose.pose.pose.position.z)
    obj_quat = (
        obj.pose.pose.pose.orientation.x,
        obj.pose.pose.pose.orientation.y,
        obj.pose.pose.pose.orientation.z,
        obj.pose.pose.pose.orientation.w,
    )
    obj_quat_mat = transformations.quaternion_matrix(obj_quat)
    obj_trans_mat = transformations.translation_matrix(obj_trans)
    obj_mat = transformations.concatenate_matrices(obj_trans_mat, obj_quat_mat)
    return obj_mat
示例#39
0
    def fk(self):
        for joint in self.joints.values():
            if joint.is_revolute():
                Rj = tf.euler_matrix(joint.pos, joint.pos, joint.pos, 'sxyz')
                joint.Hj = tf.concatenate_matrices(joint.Hb, Rj)

            elif joint.is_prismatic():
                Pj = tf.translation_matrix(joint.pos, joint.pos, joint.pos)
                joint.Hj = tf.concatenate_matrices(joint.Hb, Pj)

            # TODO: add FLOATING and PLANAR cases

            else:
                joint.Hj = joint.Hb
示例#40
0
    def get_transform_by_string(str_q, str_p):
        """ takes a quaternion (q) from frame A to frame B and a position (p) in frame A
                to calculate a transformation A => B
                same semantics as tf strings"""
        q = np.array([float(x) for x in str_q.split(" ")])
        p = np.array([float(x) for x in str_p.split(" ")])

        # calculate transformation
        # pre-multiply rotation , as P is in frame A
        transform = np.dot(
            transformations.quaternion_matrix([q[3], q[0], q[1], q[2]]).T,
            transformations.translation_matrix(-p))

        return transform
示例#41
0
文件: ARTIV.py 项目: yooona1214/ARTIV
def speed_bump_y(point):

    # Publish a rotated plane using a numpy transform matrix
    radian = 0.523599 # 30도
    sin = 0.5
    cos = 0.866
    depth = 0.3
    width = 2.0

    R_x_1 = transformations.rotation_matrix(radian, (1,0,0)) # Rotate around y-axis by 0.3 radians
    T0_1 = transformations.translation_matrix((point.x, point.y-0.5*cos*depth,0.5*sin*depth))
    T_1 = transformations.concatenate_matrices(T0_1, R_x_1)
    markers.publishPlane(T_1, width, depth, 'yellow', 5.0) # pose, width, depth, color, lifetime

    R_x_2 = transformations.rotation_matrix(-1*radian, (1,0,0)) # Rotate around y-axis by 0.3 radians
    T0_2 = transformations.translation_matrix((point.x,point.y+ 0.5*cos*depth,0.5*sin*depth))
    T_2 = transformations.concatenate_matrices(T0_2, R_x_2)
    markers.publishPlane(T_2, width, depth, 'yellow', 5.0) # pose, width, depth, color, lifetime

    center = Point(point.x, point.y, sin*depth + 0.1)
    P = Pose(center,Quaternion(0,0,0,1))
    scale = Vector3(0.15,0.15,0.2)
    markers.publishText(P, 'Speed Bump', 'white', scale, 5.0) # pose, text, color, scale, lifetime
示例#42
0
    def _make_local_map(self, scan):
        #target frame is map_frame, source frame is laser_link
        trans = self.tf_buffer.lookup_transform(
            target_frame=self.map_frame,
            source_frame=scan.header.frame_id,
            time=scan.header.stamp,
            timeout=rospy.Duration(5)
        )

        pos = trans.transform.translation
        orient = trans.transform.rotation

        # transform from base to map
        transform = np.dot(
            transformations.translation_matrix([pos.x, pos.y, pos.z]),
            transformations.quaternion_matrix([orient.x, orient.y, orient.z, orient.w])
        )

        self.map_info.origin.position.x = pos.x - self.map_info.resolution*self.width/2.0
        self.map_info.origin.position.y = pos.y - self.map_info.resolution*self.height/2.0
        self.ros_map.info = self.map_info
        self.ros_map.data[...] = -1
        mcl_local_map = mcl.Map(self.ros_map)

        mask = (scan.ranges < scan.range_max) & (scan.ranges > scan.range_min)
        angles = np.arange(scan.angle_min,scan.angle_max,scan.angle_increment)

        x = scan.ranges[mask]*np.cos(angles[mask])
        y = scan.ranges[mask]*np.sin(angles[mask])

        # set the last component to zero to ignore translation
        ii, jj = mcl_local_map.index_at(
            np.vstack((x,y,np.zeros(np.sum(mask)),np.ones(np.sum(mask)))).T,
            world_to_map=transform
        )

        ok = (
            (jj >= 0) & (jj < self.width) &
            (ii >= 0) & (ii < self.height)
        )
        ii = ii[ok]
        jj = jj[ok]
        mcl_local_map.grid[ii,jj] = 100


        ### TODO: figure out faster serialization issue
        map_msg = msgify(numpy_msg(OccupancyGrid), mcl_local_map.grid, info=self.map_info)
        map_msg.header = scan.header
        map_msg.header.frame_id = 'map'
        return map_msg
示例#43
0
	def calENU(self):
		if(self.body.isNone() or self.headingAngle is None):
			return False
		else:
			br = self.body.getRotationAroundZ()
			
			q = rotation_matrix(br+self.headingAngle, (0,0,1))
			
			t = translation_from_matrix(self.body.getMatrix())
			t = translation_matrix(t)
			
			self.enu.setMatrix(t).transformByMatrix(q)
			return True
#eoc
示例#44
0
文件: mc_planner.py 项目: ashokzg/cpb
    def calculate_bridge_and_base_pose(self, table_state, phi):
        # Calculate bridge pose from cue position and phi
        phi_radians = math.pi * phi / 180.0
        cue_x,    cue_y    = self.get_cue_ball_pos(table_state)
        bridge_x, bridge_y = self.get_bridge_pos(cue_x, cue_y, phi)
        (qx, qy, qz, qw)   = transformations.quaternion_about_axis(phi_radians, (0, 0, 1))

        bridge_pos         = transformations.translation_matrix([bridge_x, bridge_y, 0.0])
        bridge_orient      = transformations.quaternion_matrix([qx, qy, qz, qw])
        bridge_pose        = transformations.concatenate_matrices(bridge_pos, bridge_orient)
        bridge_pose_pos    = transformations.translation_from_matrix(bridge_pose)
        bridge_pose_orient = transformations.quaternion_from_matrix(bridge_pose)

        # Produce base pose via fixed transform from bridge pose
        bridge_to_base_trans = transformations.translation_matrix([Constants.BRIDGE_IN_BASE_X, Constants.BRIDGE_IN_BASE_Y, Constants.BRIDGE_IN_BASE_Z])
        bridge_to_base_rot   = transformations.quaternion_matrix([Constants.BRIDGE_IN_BASE_QX, Constants.BRIDGE_IN_BASE_QY, Constants.BRIDGE_IN_BASE_QZ, Constants.BRIDGE_IN_BASE_QW])
        bridge_to_base       = transformations.concatenate_matrices(bridge_to_base_trans, bridge_to_base_rot)
        base_to_bridge       = transformations.inverse_matrix(bridge_to_base)
        base_pose            = transformations.concatenate_matrices(bridge_pose, base_to_bridge)  # correct order?
        base_pose_pos        = transformations.translation_from_matrix(base_pose)
        base_pose_orient     = transformations.quaternion_from_matrix(base_pose)
        print "BASE_POSE", base_pose, "BRIDGE_POSE", bridge_pose 
        return ((bridge_pose_pos, bridge_pose_orient), (base_pose_pos, base_pose_orient))
示例#45
0
    def get_twisted_zAxis(self, rot):
        initial_zAxis = np.array([0, 0, 1])
        if len(rot) == 3:
            rot_mat = tf.euler_matrix(rot[0], rot[0], rot[0])
        elif len(rot) == 4:
            rot_mat = tf.quaternion_matrix(rot)
        else:
            print("Given rot component's number is wrong!")
            return TypeError
        initial_tf = tf.translation_matrix(initial_zAxis)
        twisted_tf = tf.concatenate_matrices(rot_mat, initial_tf)
        twisted_tf[np.abs(twisted_tf)<1e-5] = 0

        return twisted_tf[:3, 3]
示例#46
0
    def predictSinglePose(self, armName, curPose, prevPose, dt=1.0):
        if dt <= 0:
            print 'Error: Illegal timestamp'
            return None

        # Convert pose to numpy matrix
        curTrans = tft.translation_matrix(
            [curPose.position.x, curPose.position.y, curPose.position.z])
        curRot = tft.quaternion_matrix([
            curPose.orientation.x, curPose.orientation.y,
            curPose.orientation.z, curPose.orientation.w
        ])
        curPoseMatrix = np.dot(curTrans, curRot)

        prevTrans = tft.translation_matrix(
            [prevPose.position.x, prevPose.position.y, prevPose.position.z])
        prevRot = tft.quaternion_matrix([
            prevPose.orientation.x, prevPose.orientation.y,
            prevPose.orientation.z, prevPose.orientation.w
        ])
        prevPoseMatrix = np.dot(prevTrans, prevRot)

        deltaPoseMatrix = np.linalg.inv(prevPoseMatrix).dot(curPoseMatrix)
        deltaAngles = euler_from_matrix(deltaPoseMatrix[:3, :3])
        deltaPos = deltaPoseMatrix[:3, 3]

        #deltaAngles = np.array([a / dt for a in deltaAngles])
        deltaPos = deltaPos / dt
        #deltaPoseMatrix = euler_matrix(deltaAngles[0], deltaAngles[1], deltaAngles[2])
        deltaPoseMatrix[:3, 3] = deltaPos

        gpList, sysList = self.predict(armName, [curPoseMatrix],
                                       [deltaPoseMatrix])
        return tfx.pose(gpList[0], frame=curPose.frame,
                        stamp=curPose.stamp), tfx.pose(sysList[0],
                                                       frame=curPose.frame,
                                                       stamp=curPose.stamp)
示例#47
0
    def updatePoseFromMarkers(self, ids, corners, image):

        if ids:
            xs, ys, yaws = [], [], []
            for i, idx in enumerate(ids):

                rvec, tvec = cv2.aruco.estimatePoseSingleMarkers(
                    corners[i], self.marker_dict[str(idx[0])].len,
                    self.cam_mtx, self.distort_mtx)
                rvec = rvec[0][0]
                tvec = tvec[0][0]

                tmat = translation_matrix((tvec[0], tvec[1], tvec[2]))
                qmat = np.zeros((4, 4))
                qmat[:3, :3], _ = cv2.Rodrigues(rvec)
                qmat[3, 3] = 1.

                tf_cam2mark = np.dot(tmat, qmat)
                tf_mark2cam = inverse_matrix(tf_cam2mark)

                marker = self.marker_dict[str(idx[0])]

                tf_map2cam = np.dot(marker.tf_mat, tf_cam2mark)
                # trans = translation_from_matrix(tf_map2cam)
                rot = euler_from_quaternion(quaternion_from_matrix(tf_map2cam))

                tf_map2baselink = np.dot(tf_map2cam, self.tf_cam2baselink)

                baselink_pose = Pose()
                baselink_pose.position = Vector3(
                    *translation_from_matrix(tf_map2baselink))
                baselink_pose.orientation = Quaternion(
                    *quaternion_from_matrix(tf_map2baselink))

                print("detected marker", marker.id)
                print("cam pose", baselink_pose)

                # Draw axis on marker and publish image
                image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
                image = cv2.aruco.drawAxis(image, self.cam_mtx,
                                           self.distort_mtx, rvec, tvec, 0.1)
            """
            NOTE
            Still need to take average of poses when multiple markers are detected
            """

            return baselink_pose, image
        else:
            return None, image
示例#48
0
    def newTf(self, msg):
        self.currentSeq = msg.image_seq
        self.imageTime = msg.header.stamp
        self.tfs = {}
        rospy.loginfo("got tfs from image seq %d", self.currentSeq)

        for m in msg.transforms:
            id = m.fiducial_id
            trans = m.transform.translation
            rot = m.transform.rotation
            mat = numpy.dot(translation_matrix((trans.x, trans.y, trans.z)),
                            quaternion_matrix((rot.x, rot.y, rot.z, rot.w)))
            invMat = numpy.linalg.inv(mat)
            self.tfs[id] = (mat, invMat, m.object_error, m.image_error)
            self.transFile.write("%d %d %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n" % \
                                 (id, self.currentSeq, trans.x, trans.y, trans.z, 
                                  rot.x, rot.y, rot.z, rot.w,
                                  m.object_error, m.image_error, m.fiducial_area))
        """
        if self.useExternalPose:
            # XXXX needs to be verified with respect to time
            fiducialKnown = False
            for f in self.tfs.keys():
            if self.fiducials.has_key(f):
                fiducialKnown = True 
                if not fiducialKnown:
                    for f in self.tfs.keys():
                        self.updateMapFromExternal(f)
        """
        self.updatePose()
        if not self.pose is None:
            robotXyz = numpy.array(translation_from_matrix(self.pose))[:3]
            robotQuat = numpy.array(quaternion_from_matrix(self.pose))
            (r, p, robotYaw) = euler_from_quaternion(robotQuat)
            # Only update the map if the robot has moved significantly, to 
            # avoid the map variances decaying from repeated observations
            if self.lastUpdateXyz is None:
                self.updateMap()
                self.lastUpdateXyz = robotXyz
                self.lastUpdateYaw = robotYaw
            else:
                dist = numpy.linalg.norm(self.lastUpdateXyz - robotXyz)
                angle = self.lastUpdateYaw - self.robotYaw
                print "Distance moved", dist, angle
                if self.mappingMode or dist > MIN_UPDATE_TRANSLATION \
                   or angle > MIN_UPDATE_ROTATION:
                    self.updateMap()
                    self.lastUpdateXyz = robotXyz
                    self.lastUpdateYaw = robotYaw
示例#49
0
def main():
    rospy.init_node("xens_calibration")
    rospy.Subscriber("ar_pose_marker", AlvarMarkers, cb)
    listener = tf.TransformListener()
    flag_marker = False
    broadcaster = tf.TransformBroadcaster()
    rospy.loginfo("Node started, shutdown in 3s if no tf coming ")
    rospy.loginfo("Calibrating")
    publish_rate = 50
    r = rospy.Rate(publish_rate)
    while not rospy.is_shutdown():

        look_up_t = rospy.Time(0)
        listener.waitForTransform('t8_sternum_up_xsens', 'right_hand_xsens',
                                  look_up_t, rospy.Duration(3))
        try:
            t8_to_r_hand = listener.lookupTransform('t8_sternum_up_xsens',
                                                    'right_hand_xsens',
                                                    look_up_t)
        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException):
            continue
        t8_to_r_hand_quat = t8_to_r_hand[1]
        t8_to_r_hand_mat = listener.fromTranslationRotation(*t8_to_r_hand)
        # ipdb.set_trace()
        trans = (t8_to_r_hand_mat[0][0], t8_to_r_hand_mat[0][1],
                 t8_to_r_hand_mat[0][2])
        quat = (0, 0, 0, 1)

        t8_to_r_hand_mat = numpy.dot(
            translation_matrix(trans),
            quaternion_matrix(quat))  # xsense tf matrix

        if shared_msg is not None:
            base_to_marker_mat = shared_msg
            base_to_r_hand = numpy.dot(
                t8_to_r_hand_mat,
                base_to_marker_mat)  # transform between xsense and base
            base_to_r_hand_trans = translation_from_matrix(base_to_r_hand)
            rospy.loginfo("Calibration result is %s" % base_to_r_hand_trans)

            broadcaster.sendTransform(
                base_to_r_hand_trans,
                t8_to_r_hand_quat,
                rospy.Time.now(),
                "right_hand_xsens_new",
                'base',
            )
            r.sleep()
示例#50
0
    def __init__(self):
        """ 
              ^ L2                   R2
              |  |                   |
            50mm |                   |
              |  |                   |
              - L1                   R1
              |  |                   |
            250mm|                   |
              |  | wh             wh |
              V  | eel---Robot---eel |

                   <-----183mm----->  
                 <------184mm-------->  
            x
            ^
            |
        y<--O is at center of the "Robot" above, coaxis with the wheel

        Burger dimension getting from 
        https://emanual.robotis.com/assets/images/platform/turtlebot3/hardware_setup/turtlebot3_dimension1.png
        """
        # List of sensor
        self.sensors = dict()
        self.sensors['R1'] = NavSensor(
            sensor_tf=tfs.translation_matrix([0.250, -0.092, 0.0]))
        self.sensors['R2'] = NavSensor(
            sensor_tf=tfs.translation_matrix([0.300, -0.092, 0.0]))
        self.sensors['L1'] = NavSensor(
            sensor_tf=tfs.translation_matrix([0.250, 0.092, 0.0]))
        self.sensors['L2'] = NavSensor(
            sensor_tf=tfs.translation_matrix([0.300, 0.092, 0.0]))
        self.APPROACHABLE_THRESHOLD = self.sensors['R2'].tf[0, 3]
        # Publisher to visualize
        self.marker_pub = rospy.Publisher('tran0966/nav_sensors_array',
                                          MarkerArray)
示例#51
0
 def gen_sensor_list(self, pre, p1, p2, n):
     """
     p1,p2 (x,y)
     lay n sensor from p1->p2 with sensor name prefixed as pre
     """
     p1 = np.array(p1)
     p2 = np.array(p2)
     p12 = p2 - p1
     p12l = math.sqrt(np.sum(p12*p12))
     u12 = tfs.unit_vector(p12)
     sen_dist = p12l / (n-1)
     for i in range(n):
         sensor_pos = p1 + i * sen_dist * u12 
         self.sensors[f'{pre}{self.sensor_total}'] = NavSensor(sensor_tf = tfs.translation_matrix([sensor_pos[0], sensor_pos[1], 0.0]))
         self.sensor_total += 1
示例#52
0
    def predictSinglePose(self, pose, arm_side):
        # Convert pose to numpy matrix
        p = tft.translation_matrix(
            [pose.position.x, pose.position.y, pose.position.z])
        rot = tft.quaternion_matrix([
            pose.orientation.x, pose.orientation.y, pose.orientation.z,
            pose.orientation.w
        ])
        input_pose = np.dot(p, rot)

        gpList, sysList = self.predict([input_pose], arm_side)
        return tfx.pose(gpList[0], frame=pose.frame,
                        stamp=pose.stamp), tfx.pose(sysList[0],
                                                    frame=pose.frame,
                                                    stamp=pose.stamp)
示例#53
0
    def printTransform(self):
        while not rospy.is_shutdown():
            try:
                trans = self.tfBuffer.lookup_transform(self.depthCameraTopic, worldFrame, rospy.Time())
            except (tf2_ros.LookupException , tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
                self.rate.sleep()
                continue
            
            print trans.transform.translation, trans.transform.rotation

            translation_matrix(translation2list(trans.transform.translation))
            quaternion_matrix(qaternion2list(trans.transform.rotation))

            pt = PointStamped()
            pt.header.stamp = trans.header.stamp
            print pt.header.stamp
            pt.header.frame_id = self.depthCameraTopic
            pt.point.x =0.0
            pt.point.y = 1.0
            pt.point.z = 0.0
            self.targetPnt = self.tfBuffer.transform(pt,"world")
            print self.targetPnt

            self.rate.sleep()
示例#54
0
def tran_mat44(trans):
    """将ros格式中的Transform数据转为矩阵(4X4)

    Args:
        trans: ros格式Transform数据

    """

    trans_xyz = [
        trans.transform.translation.x, trans.transform.translation.y, 0
    ]
    trans_quat = [0, 0, trans.transform.rotation.z, trans.transform.rotation.w]
    mat44 = np.dot(tft.translation_matrix(trans_xyz),
                   tft.quaternion_matrix(trans_quat))
    return mat44
示例#55
0
def main():
    #quaternion = numpy.empty((4, ), dtype=numpy.float64)
    print qx
    quaternion = [qx, qy, qz, qw]
    euler = t.euler_from_quaternion(t.quaternion_inverse(quaternion))
    transform = t.concatenate_matrices(t.translation_matrix([-x, -y, -z]),
                                       t.quaternion_matrix(quaternion))
    inversed_transform = t.inverse_matrix(transform)
    print "Inversed tf: "
    print inversed_transform
    print "HOW TO GO ON with that????? Just get the simple transform which might be wrong"
    print "Trans (m): "
    print[-x, -y, -z]
    print "Rot (rad): "
    print euler
示例#56
0
 def pose_relative_trans(self, pose, x=0., y=0., z=0.):
     """Return a pose translated relative to a given pose."""
     ps = deepcopy(pose)
     M_trans = tft.translation_matrix([x, y, z])
     q_ps = [
         ps.pose.orientation.x, ps.pose.orientation.y,
         ps.pose.orientation.z, ps.pose.orientation.w
     ]
     M_rot = tft.quaternion_matrix(q_ps)
     trans = np.dot(M_rot, M_trans)
     ps.pose.position.x += trans[0][-1]
     ps.pose.position.y += trans[1][-1]
     ps.pose.position.z += trans[2][-1]
     #print ps
     return ps
示例#57
0
    def integrate_spin(self, event):
        dx = self.twist.linear.x * self.dt
        dy = self.twist.linear.y * self.dt
        dz = self.twist.linear.z * self.dt
        trans_mat = t.translation_matrix([dx, dy, dz])

        droll = self.twist.angular.x * self.dt
        dpitch = self.twist.angular.y * self.dt
        dyaw = self.twist.angular.z * self.dt
        rot_mat = t.euler_matrix(droll, dpitch, dyaw)

        homogeneous_mat = np.dot(trans_mat, rot_mat)

        # update transformation matrix (world->base_link)
        self.transformation_mat = np.dot(self.transformation_mat, homogeneous_mat)
示例#58
0
def pose_to_mat44(pose):
    """
    Convert pose msg to 4*4 transformation matrix
    :param pose:
    :return:
    """
    [x, y, z] = [pose.position.x, pose.position.y, pose.position.z]
    trans_mat = transformations.translation_matrix([x, y, z])

    [x, y, z, w] = [
        pose.orientation.x, pose.orientation.y, pose.orientation.z,
        pose.orientation.w
    ]
    rot_mat = transformations.quaternion_matrix([x, y, z, w])

    return np.dot(trans_mat, rot_mat)
示例#59
0
    def _make_local_map(self, scan):
        #target frame is map_frame, source frame is laser_link
        trans = self.tf_buffer.lookup_transform(
            target_frame=self.map_frame,
            source_frame=scan.header.frame_id,
            time=scan.header.stamp,
            timeout=rospy.Duration(5))

        pos = trans.transform.translation
        orient = trans.transform.rotation

        # transform from base to map
        transform = np.dot(
            transformations.translation_matrix([pos.x, pos.y, pos.z]),
            transformations.quaternion_matrix(
                [orient.x, orient.y, orient.z, orient.w]))

        self.map_info.origin.position.x = pos.x - self.map_info.resolution * self.width / 2.0
        self.map_info.origin.position.y = pos.y - self.map_info.resolution * self.height / 2.0
        self.ros_map.info = self.map_info
        self.ros_map.data[...] = -1
        mcl_local_map = mcl.Map(self.ros_map)

        mask = (scan.ranges < scan.range_max) & (scan.ranges > scan.range_min)
        angles = np.arange(scan.angle_min, scan.angle_max,
                           scan.angle_increment)

        x = scan.ranges[mask] * np.cos(angles[mask])
        y = scan.ranges[mask] * np.sin(angles[mask])

        # set the last component to zero to ignore translation
        ii, jj = mcl_local_map.index_at(np.vstack(
            (x, y, np.zeros(np.sum(mask)), np.ones(np.sum(mask)))).T,
                                        world_to_map=transform)

        ok = ((jj >= 0) & (jj < self.width) & (ii >= 0) & (ii < self.height))
        ii = ii[ok]
        jj = jj[ok]
        mcl_local_map.grid[ii, jj] = 100

        ### TODO: figure out faster serialization issue
        map_msg = msgify(numpy_msg(OccupancyGrid),
                         mcl_local_map.grid,
                         info=self.map_info)
        map_msg.header = scan.header
        map_msg.header.frame_id = 'map'
        return map_msg
示例#60
0
    def get_Tpose(self):
        if self.options.listen:
            if not self.input_pose: return None, None, None
            stamp = self.input_pose.header.stamp
            if not self.options.frame:
                frame_id = self.input_pose.header.frame_id
            else:
                frame_id = self.options.frame
            pose = self.input_pose.pose
            p = tft.translation_matrix(
                [pose.position.x, pose.position.y, pose.position.z])
            rot = tft.quaternion_matrix([
                pose.orientation.x, pose.orientation.y, pose.orientation.z,
                pose.orientation.w
            ])
            Tinput_pose = dot(p, rot)
            if self.options.invert_input_pose:
                Tinput_pose = tft.inverse_matrix(Tinput_pose)
                frame_id = self.options.frame
            Tpose = dot(Tinput_pose, self.get_Tmatrix())
        else:
            frame_id = self.options.frame
            stamp = rospy.Time.now()
            Tpose = self.get_Tmatrix()

        if self.options.invert_output:
            Tpose = tft.inverse_matrix(Tpose)

        if self.options.tf:
            if self.options.invert_tf:
                from_frame = self.options.tf
                to_frame = frame_id
            else:
                from_frame = frame_id
                to_frame = self.options.tf
        else:
            from_frame = None
            to_frame = None

        frames = (frame_id, from_frame, to_frame)

        if self.options.invert_tf:
            Ttf = tft.inverse_matrix(Tpose)
        else:
            Ttf = Tpose

        return Tpose, Ttf, frames, stamp