Exemplo n.º 1
0
    def predictSinglePose(self, armName, curPose, prevPose, dt=1.0):
        if dt <= 0:
            print 'Error: Illegal timestamp'
            return None

        # Convert pose to numpy matrix
        curTrans = tft.translation_matrix([curPose.position.x, curPose.position.y, curPose.position.z])
        curRot = tft.quaternion_matrix([curPose.orientation.x, curPose.orientation.y ,curPose.orientation.z, curPose.orientation.w])
        curPoseMatrix = np.dot(curTrans, curRot)

        prevTrans = tft.translation_matrix([prevPose.position.x, prevPose.position.y, prevPose.position.z])
        prevRot = tft.quaternion_matrix([prevPose.orientation.x, prevPose.orientation.y ,prevPose.orientation.z, prevPose.orientation.w])
        prevPoseMatrix = np.dot(prevTrans, prevRot)
        
        deltaPoseMatrix = np.linalg.inv(prevPoseMatrix).dot(curPoseMatrix)
        deltaAngles = euler_from_matrix(deltaPoseMatrix[:3,:3])
        deltaPos = deltaPoseMatrix[:3,3]

        #deltaAngles = np.array([a / dt for a in deltaAngles])
        deltaPos = deltaPos / dt
        #deltaPoseMatrix = euler_matrix(deltaAngles[0], deltaAngles[1], deltaAngles[2])
        deltaPoseMatrix[:3,3] = deltaPos

        gpList, sysList = self.predict(armName, [curPoseMatrix], [deltaPoseMatrix])
        return tfx.pose(gpList[0], frame=curPose.frame, stamp=curPose.stamp), tfx.pose(sysList[0], frame=curPose.frame, stamp=curPose.stamp)
    def publish_state(self, t):
        state_msg = TransformStamped()
        t_ned_imu = tft.translation_matrix(self.model.get_position())
        R_ned_imu = tft.quaternion_matrix(self.model.get_orientation())
        T_ned_imu = tft.concatenate_matrices(t_ned_imu, R_ned_imu)
        #rospy.loginfo("T_ned_imu = \n" + str(T_ned_imu))
        if self.T_imu_vicon is None:
            # grab the static transform from imu to vicon frame from param server:
            frames = rospy.get_param("frames", None)
            ypr = frames['vicon_to_imu']['rotation']
            q_vicon_imu = tft.quaternion_from_euler(*[radians(x) for x in ypr], axes='rzyx') # xyzw
            R_vicon_imu = tft.quaternion_matrix(q_vicon_imu)
            t_vicon_imu = tft.translation_matrix(frames['vicon_to_imu']['translation'])
#            rospy.loginfo(str(R_vicon_imu))
#            rospy.loginfo(str(t_vicon_imu))
            self.T_vicon_imu = tft.concatenate_matrices(t_vicon_imu, R_vicon_imu)
            self.T_imu_vicon = tft.inverse_matrix(self.T_vicon_imu)
            self.T_enu_ned = tft.euler_matrix(radians(90), 0, radians(180), 'rzyx')
            rospy.loginfo(str(self.T_enu_ned))
            rospy.loginfo(str(self.T_imu_vicon))
            #rospy.loginfo(str(T_vicon_imu))
        # we have /ned -> /imu, need to output /enu -> /vicon:
        T_enu_vicon = tft.concatenate_matrices(self.T_enu_ned, T_ned_imu, self.T_imu_vicon )
        state_msg.header.stamp  = t
        state_msg.header.frame_id = "/enu"
        state_msg.child_frame_id = "/simflyer1/flyer_vicon"
        stt = state_msg.transform.translation
        stt.x, stt.y, stt.z = T_enu_vicon[:3,3]
        stro = state_msg.transform.rotation
        stro.x, stro.y, stro.z, stro.w = tft.quaternion_from_matrix(T_enu_vicon)
        
        self.pub_state.publish(state_msg)
Exemplo n.º 3
0
    def Update(self, timeout=10):
        marker_message = rospy.wait_for_message(self.marker_topic,
                                                MarkerArray,
                                                timeout=timeout)
        
        added_kinbodies = []
        updated_kinbodies = []
        
        for marker in marker_message.markers:
            if marker.ns in self.marker_data:
                kinbody_file, kinbody_offset = self.marker_data[marker.ns]
                kinbody_offset = numpy.array(kinbody_offset)
                marker_pose = numpy.array(quaternion_matrix([
                        marker.pose.orientation.x,
                        marker.pose.orientation.y,
                        marker.pose.orientation.z,
                        marker.pose.orientation.w]))
                marker_pose[0,3] = marker.pose.position.x
                marker_pose[1,3] = marker.pose.position.y
                marker_pose[2,3] = marker.pose.position.z
                
                self.listener.waitForTransform(
                        self.detection_frame,
                        self.destination_frame,
                        rospy.Time(),
                        rospy.Duration(timeout))
                frame_trans, frame_rot = self.listener.lookupTransform(
#                        self.detection_frame,
                        self.destination_frame,
                        self.detection_frame,
                        rospy.Time(0))
                frame_offset = numpy.matrix(quaternion_matrix(frame_rot))
                frame_offset[0,3] = frame_trans[0]
                frame_offset[1,3] = frame_trans[1]
                frame_offset[2,3] = frame_trans[2]
                
                kinbody_pose = numpy.array(numpy.dot(numpy.dot(frame_offset,marker_pose),
                                                     kinbody_offset))
                
                kinbody_name = kinbody_file.replace('.kinbody.xml', '')
                kinbody_name = kinbody_name + str(marker.id)
                
                # load the object if it does not exist
                if self.env.GetKinBody(kinbody_name) is None:
                    new_body = prpy.rave.add_object(
                            self.env,
                            kinbody_name,
                            os.path.join(self.kinbody_directory, kinbody_file))
                    added_kinbodies.append(new_body)
                    self.generated_bodies.append(new_body)
                
                body = self.env.GetKinBody(kinbody_name)
                body.SetTransform(kinbody_pose)
                updated_kinbodies.append(body)
        
        return added_kinbodies, updated_kinbodies
	def object_pose_callback(self,msg):
		print 'object pose cb'
		(tf_trans,tf_rot) = self.pr2.tf_listener.lookupTransform(msg.header.frame_id,self.base_frame,rospy.Time(0))
		msg_tf = numpy.mat(numpy.dot(tft.translation_matrix(tf_trans),tft.quaternion_matrix(tf_rot)))
		
		q = numpy.array([msg.pose.orientation.x,msg.pose.orientation.y,msg.pose.orientation.z,msg.pose.orientation.w])
		p = numpy.array([msg.pose.position.x,msg.pose.position.y,msg.pose.position.z])
		rot = numpy.mat(tft.quaternion_matrix(q))
		trans = numpy.mat(tft.translation_matrix(p))
		self.object_pose = msg_tf * trans * rot
Exemplo n.º 5
0
 def update(self, dt, desired, current):
     ((desired_p, desired_o), (desired_p_dot, desired_o_dot), (desired_p_dotdot, desired_o_dotdot)), ((p, o), (p_dot, o_dot)) = desired, current
     
     world_from_body = transformations.quaternion_matrix(o)[:3, :3]
     x_dot = numpy.concatenate([
         world_from_body.dot(p_dot),
         world_from_body.dot(o_dot),
     ])
     
     world_from_desiredbody = transformations.quaternion_matrix(desired_o)[:3, :3]
     desired_x_dot = numpy.concatenate([
         world_from_body.dot(desired_p_dot),
         world_from_body.dot(desired_o_dot),
     ])
     desired_x_dotdot = numpy.concatenate([
         world_from_body.dot(desired_p_dotdot),
         world_from_body.dot(desired_o_dotdot),
     ])
     
     error_position_world = numpy.concatenate([
         desired_p - p,
         quat_to_rotvec(transformations.quaternion_multiply(
             desired_o,
             transformations.quaternion_inverse(o),
         )),
     ])
     
     world_from_body2 = numpy.zeros((6, 6))
     world_from_body2[:3, :3] = world_from_body2[3:, 3:] = world_from_body
     body_gain = lambda x: world_from_body2.dot(x).dot(world_from_body2.T)
     
     
     error_velocity_world = (desired_x_dot + body_gain(numpy.diag(self.config['k'])).dot(error_position_world)) - x_dot
     
     pd_output = body_gain(numpy.diag(self.config['ks'])).dot(error_velocity_world)
     
     output = pd_output
     if self.config['use_rise']:
         rise_term_int = body_gain(numpy.diag(self.config['ks'] * self.config['alpha'])).dot(error_velocity_world) + \
                         body_gain(numpy.diag(self.config['beta'])).dot(numpy.sign(error_velocity_world))
         
         self._rise_term = self._rise_term + dt/2*(rise_term_int + self._rise_term_int_prev)
         self._rise_term_int_prev = rise_term_int
         
         output = output + self._rise_term
     else:
         # zero rise term so it doesn't wind up over time
         self._rise_term = numpy.zeros(6)
         self._rise_term_int_prev = numpy.zeros(6)
     output = output + body_gain(numpy.diag(self.config['accel_feedforward'])).dot(desired_x_dotdot)
     output = output + body_gain(numpy.diag(self.config['vel_feedforward'])).dot(desired_x_dot)
     
     wrench_from_vec = lambda output: (world_from_body.T.dot(output[0:3]), world_from_body.T.dot(output[3:6]))
     return wrench_from_vec(pd_output), wrench_from_vec(output)
Exemplo n.º 6
0
    def recalculate_transform(self, currentTime):
        """
        Creates updated transform from /odom to /map given recent odometry and
        laser data.
        
        :Args:
            | currentTime (rospy.Time()): Time stamp for this update
         """
        
        transform = Transform()

        T_est = transformations.quaternion_matrix([self.estimatedpose.pose.pose.orientation.x,
                                                   self.estimatedpose.pose.pose.orientation.y,
                                                   self.estimatedpose.pose.pose.orientation.z,
                                                   self.estimatedpose.pose.pose.orientation.w])
        T_est[0, 3] = self.estimatedpose.pose.pose.position.x
        T_est[1, 3] = self.estimatedpose.pose.pose.position.y
        T_est[2, 3] = self.estimatedpose.pose.pose.position.z
        
        T_odom = transformations.quaternion_matrix([self.last_odom_pose.pose.pose.orientation.x,
                                                   self.last_odom_pose.pose.pose.orientation.y,
                                                   self.last_odom_pose.pose.pose.orientation.z,
                                                   self.last_odom_pose.pose.pose.orientation.w])
        T_odom[0, 3] = self.last_odom_pose.pose.pose.position.x
        T_odom[1, 3] = self.last_odom_pose.pose.pose.position.y
        T_odom[2, 3] = self.last_odom_pose.pose.pose.position.z
        T = np.dot(T_est, np.linalg.inv(T_odom))
        q = transformations.quaternion_from_matrix(T) #[:3, :3])

        transform.translation.x = T[0, 3] 
        transform.translation.y = T[1, 3] 
        transform.translation.z = T[2, 3] 
        transform.rotation.x = q[0]
        transform.rotation.y = q[1]
        transform.rotation.z = q[2]
        transform.rotation.w = q[3]
        

        # Insert new Transform into a TransformStamped object and add to the
        # tf tree
        new_tfstamped = TransformStamped()
        new_tfstamped.child_frame_id = "/odom"
        new_tfstamped.header.frame_id = "/map"
        new_tfstamped.header.stamp = currentTime
        new_tfstamped.transform = transform

        # Add the transform to the list of all transforms
        self.tf_message = tfMessage(transforms=[new_tfstamped])
Exemplo n.º 7
0
    def process_wrench(self, msg):
        cur_wrench = np.mat([msg.wrench.force.x, 
                             msg.wrench.force.y, 
                             msg.wrench.force.z, 
                             msg.wrench.torque.x, 
                             msg.wrench.torque.y, 
                             msg.wrench.torque.z]).T
        try:
            (ft_pos, ft_quat) = self.tf_list.lookupTransform(self.gravity_frame, 
                                                             self.netft_frame, rospy.Time(0))
        except:
            return
        rot_mat = np.mat(tf_trans.quaternion_matrix(ft_quat))[:3,:3]
        z_grav = self.react_mult * rot_mat.T * np.mat([0, 0, -1.]).T
        force_grav = np.mat(np.zeros((6, 1)))
        force_grav[:3, 0] = self.mass * g * z_grav
        torque_grav = np.mat(np.zeros((6, 1)))
        torque_grav[3:, 0] = np.mat(np.cross(self.com_pos.T.A[0], force_grav[:3, 0].T.A[0])).T
        zeroing_wrench = force_grav + torque_grav + self.wrench_zero
        zeroed_wrench = self.react_mult * (cur_wrench - zeroing_wrench)
        
        if not self.got_zero:
            self.wrench_zero = (cur_wrench - (force_grav + torque_grav))
            self.got_zero = True

        tf_zeroed_wrench = self.transform_wrench(zeroed_wrench)
        if tf_zeroed_wrench is None:
            return
        zero_msg = WrenchStamped(msg.header, 
                                 Wrench(Vector3(*tf_zeroed_wrench[:3,0]), Vector3(*tf_zeroed_wrench[3:,0])))
        self.zero_pub.publish(zero_msg)
        self.visualize_wrench(tf_zeroed_wrench)
Exemplo n.º 8
0
	def localCb(self, data):
		self.localPose.setPoseStamped(data)
		
		if(not (self.enuTickPose.isNone() or self.offset is None)):
			t = self.localPose.getTranslation()
			q = self.enuTickPose.getQuaternion()
			
			q = quaternion_matrix(q)
			t = translation_matrix(t)
		
			self.localEnuPose.setMatrix(numpy.dot(q,t))
			
			t = self.localEnuPose.getTranslation()
			
			t[0] -= self.offset[0]
			t[1] -= self.offset[1]
			t[2] -= self.offset[2]
			
			q = self.localEnuPose.getQuaternion()
			
			self.localEnuPose.setTranslationQuaternion(t, q)
			
			p = PointStamped()
			p.point.x = self.offset[0]
			p.point.y = self.offset[1]
			p.point.z = self.offset[2]
			
			p.header = Header(0, rospy.rostime.get_rostime(), "world")
			
			self.offsetPub.publish(p)
			
			self.localEnuPub.publish(self.localEnuPose.getPoseStamped())
Exemplo n.º 9
0
def getSkeletonTransformation(user_id, tf, tf_name, world_name, last_updated):
    """
    Return the rototranslation matrix between tf_name and world_name 
    and the time stamp
    
    @param user_id id of the human
    @param tf 
    @param tf_name name of the element in the tf tree
    @param world_name name of the world in the tf tree
    @param last_update time stamp of the last time the tf tree was updated
    """
    
    tf_name_full = getFullTfName(user_id, tf_name)
    if tf.frameExists(tf_name_full) and tf.frameExists(world_name):   
        try:                
            time = tf.getLatestCommonTime(tf_name_full, world_name)
            pos, quat = tf.lookupTransform(world_name, tf_name_full, time)
        except:
            return last_updated, None
        last_updated = time.secs if last_updated <= time.secs else last_updated
        r = transformations.quaternion_matrix(quat)
        skel_trans = transformations.identity_matrix()
        skel_trans[0:4,0:4] = r
        skel_trans[0:3, 3] = pos    
        return last_updated, skel_trans
    else:
        return last_updated, None
Exemplo n.º 10
0
def filterPointCloud(bin_num, pts, source_pc2_kinect_header, height=None, width=None, retPointCloud=True):
    # input height/width if you need filtered uv
    with Timer('b1'):
        global tfListener
        source_pc_kinect = PointCloud()
        source_pc_kinect.header = source_pc2_kinect_header
    with Timer('b2'):
        source_pc_kinect.points = pts
    
    # transform point cloud from kinect frmae to shelf frame
    with Timer('b3'):
        import tf.transformations as tfm
        import numpy as np
        (pos, ori) = tfListener.lookupTransform('/shelf', source_pc_kinect.header.frame_id, source_pc_kinect.header.stamp)
        
        points = []
        T = np.dot(tfm.compose_matrix(translate=pos) , tfm.quaternion_matrix(ori) )

        for pt in pts:
            #tmp = np.dot(T, np.array(pt+[1])).tolist()
            points.append( np.dot(T, np.array(pt+[1])).tolist() )
            
    pts = []
    cnt = 0
    
    with Timer('b4'):
        if height and width:
            filtered_uvs = []
            filtered_uvs_mask = [False for i in xrange(height*width)]
            for point in points:  # change source to source_pc_kinect
                if ~math.isnan(point[0]) and inside_bin(point, bin_num):
                    pts.append(point)   # can be removed becuase we only need mask
                    filtered_uvs_mask[cnt] = True
                    filtered_uvs.append([cnt//width, cnt%width])
                cnt += 1
        else:
            for point in points:  # change source to source_pc_kinect
                if inside_bin(point, bin_num):
                    pts.append(point)
    
    if retPointCloud:
        with Timer('b5'):
            filtered_pc_shelf = PointCloud()
            filtered_pc_shelf.header = source_pc2_kinect_header
            filtered_pc_shelf.header.frame_id = '/shelf'
            filtered_pc_shelf.points = [Point32(pt[0], pt[1], pt[2]) for pt in pts]
            
            # render filtered point cloud
            filtered_pc_map = tfListener.transformPointCloud('/map', filtered_pc_shelf)
            createPointsMarker(filtered_pc_map.points, marker_id=2, frame_id='/map', rgba=(0,1,0,1)) #points are in Point32
        
    
    with Timer('b6'):
        if height and width:
            if retPointCloud:
                return (filtered_pc_map, filtered_uvs, filtered_uvs_mask)
            else:
                return filtered_uvs_mask
        else:
            return filtered_pc_map
Exemplo n.º 11
0
    def on_image(self, img):
        if self.info is None:
            return

        self.camera_model.fromCameraInfo(self.info)
        # self.camera_model.rectifyImage(img, img)

        self.tf.waitForTransform('ardrone/odom',
                                 'ardrone/ardrone_base_frontcam',
                                 rospy.Time(0),
                                 rospy.Duration(3))

        trans, rot = self.tf.lookupTransform('ardrone/odom',
                                             'ardrone/ardrone_base_frontcam',
                                             rospy.Time(0))

        rot_matrix = np.array(quaternion_matrix(rot))

        for a in range(0, 360, 30):
            vector = np.array(np.array([0.1 * math.cos(a * math.pi / 180), 0.1 * math.sin(a * math.pi / 180), 0, 0]))
            point = vector.dot(rot_matrix)
            x, y = self.camera_model.project3dToPixel(point)
            cv2.circle(img, (int(x), int(y)), 5, (0, 0, 255), -1)

        return img
Exemplo n.º 12
0
    def orientation_error(self, q_a, q_b):
        '''Error from q_a to q_b
        '''
        M_a = transformations.quaternion_matrix(q_a)[:3, :3]
        M_b = transformations.quaternion_matrix(q_b)[:3, :3]
        error_matrix = M_b.dot(np.transpose(M_a))
        try:
            lie_alg_error = np.real(linalg.logm(error_matrix))
        except Exception:
            # We get an Exception("Internal Inconsistency") when the error is zero
            # No error!
            return np.array([0.0, 0.0, 0.0])

        angle_axis = sub8_utils.deskew(lie_alg_error)
        assert np.linalg.norm(angle_axis) < (2 * np.pi) + 0.01, "uh-oh, unnormalized {}".format(angle_axis)
        return angle_axis
    def cb(data):
        # rospy.loginfo("{0}".format(data))
        stamp = data.header.stamp
        tf_listener.waitForTransform(world_frame, link_frame, stamp, rospy.Duration(4.0))

        source_frame = world_frame
        target_frame = link_frame
        tf_link = tf_listener.lookupTransform(source_frame, target_frame, stamp)
        T_link = transformations.quaternion_matrix(tf_link[1])
        T_link[:3, 3] = tf_link[0]

        pose_chessboard = data.pose
        T_chessboard = pose_matrix(pose_chessboard)
        # T_chessboard =  numpy.linalg.inv(T_chessboard)

        yaml_data.append({"T_chessboard": sum(T_chessboard.tolist(), []), "T_link": sum(T_link.tolist(), [])})

        yf = open(out_fn, "w")
        yaml.dump(yaml_data, yf)

        print len(yaml_data)
        print stamp
        print T_chessboard
        print T_link
        print "---"
        yf.close()
Exemplo n.º 14
0
    def detect(self, timeout=10):
        marker_message = rospy.wait_for_message(self.marker_topic, MarkerArray, timeout=timeout)
        camera_message = rospy.wait_for_message(self.camera_topic, CameraInfo, timeout=timeout)
        camera_matrix = np.array([0])
        for marker in marker_message.markers:
            if marker.id == self.marker_number:
                marker_pose = np.array(
                    quaternion_matrix(
                        [
                            marker.pose.orientation.x,
                            marker.pose.orientation.y,
                            marker.pose.orientation.z,
                            marker.pose.orientation.w,
                        ]
                    )
                )
                marker_pose[0, 3] = marker.pose.position.x
                marker_pose[1, 3] = marker.pose.position.y
                marker_pose[2, 3] = marker.pose.position.z
                marker_pose = np.delete(marker_pose, 3, 0)

                camera_intrinsics = np.array(camera_message.K).reshape(3, 3)

                print camera_intrinsics
                print marker_pose
                camera_matrix = np.dot(camera_intrinsics, marker_pose)
                print camera_matrix
        return camera_matrix
    def odometry_callback(self, msg):
        """Handle updated measured velocity callback."""
        if not bool(self.config):
            return

        p = msg.pose.pose.position
        q = msg.pose.pose.orientation
        p = numpy.array([p.x, p.y, p.z])
        q = numpy.array([q.x, q.y, q.z, q.w])

        if not self.initialized:
            # If this is the first callback: Store and hold latest pose.
            self.pos_des  = p
            self.quat_des = q
            self.initialized = True

        # Compute control output:
        t = msg.header.stamp.to_sec()

        # Position error wrt. body frame
        e_pos = trans.quaternion_matrix(q).transpose()[0:3,0:3].dot(self.pos_des - p)

        vz = self.pid_depth.regulate(e_pos[2], t)
        vx = self.pid_forward.regulate(numpy.linalg.norm(e_pos[0:2]), t)
        wx = self.pid_heading.regulate(numpy.arctan2(), t)

        v_linear = numpy.array([vx, 0, vz])
        v_angular = numpy.array([0, 0, wz])

        # Convert and publish vel. command:
        cmd_vel = geometry_msgs.Twist()
        cmd_vel.linear = geometry_msgs.Vector3(*v_linear)
        cmd_vel.angular = geometry_msgs.Vector3(*v_angular)

        self.pub_cmd_vel.publish(cmd_vel)
Exemplo n.º 16
0
def box_array_cb(box_array):
    polygon_array = PolygonArray()
    model_coefficients_array = ModelCoefficientsArray()
    for box in box_array.boxes:
        polygon_stamped = PolygonStamped()
        coefficients = ModelCoefficients()
        quaternion = np.array([box.pose.orientation.x, box.pose.orientation.y, box.pose.orientation.z, box.pose.orientation.w])
        rotation_matrix = transformations.quaternion_matrix(quaternion)
        ux = numpy.array([rotation_matrix[0][0], rotation_matrix[1][0], rotation_matrix[2][0]])
        uy = numpy.array([rotation_matrix[0][1], rotation_matrix[1][1], rotation_matrix[2][1]])
        uz = numpy.array([rotation_matrix[0][2], rotation_matrix[1][2], rotation_matrix[2][2]])
        dim_x = box.dimensions.x/2
        dim_y = box.dimensions.y/2
        dim_z = box.dimensions.z/2
        point = box.pose.position
        for x, y in [[-dim_x, -dim_y], [dim_x, -dim_y], [dim_x, dim_y], [-dim_x, dim_y]]:
            polygon_stamped.polygon.points.append(Point32(x*ux[0]+y*uy[0]-dim_z*uz[0]+point.x,x*ux[1]+y*uy[1]-dim_z*uz[1]+point.y,x*ux[2]+y*uy[2]-dim_z*uz[2]+point.z))
        polygon_stamped.header = box.header
        polygon_array.polygons.append(polygon_stamped)
        coefficients.values = [-uz[0], -uz[1], -uz[2], ((point.x-dim_z*uz[1])*uz[0]+(point.y-dim_z*uz[1])*uz[1]+(point.z-dim_z*uz[2])*uz[2])]
        coefficients.header = box.header
        model_coefficients_array.coefficients.append(coefficients)
    polygon_array.header = box_array.header
    PArrayPub.publish(polygon_array)
    model_coefficients_array.header = box_array.header
    MArrayPub.publish(model_coefficients_array)
Exemplo n.º 17
0
def transformFt2Global(ftlist):
    global listener
    # transform ft data to global frame
    (pos_trasform, ori_trasform) = lookupTransform('base_link', 'link_ft', listener)
    rotmat = tfm.quaternion_matrix(ori_trasform)
    ft_global = np.dot(rotmat, ftlist + [1.0])
    return ft_global[0:3].tolist()
Exemplo n.º 18
0
def getpose():
    pub_joint_cmd=rospy.Publisher('/robot/limb/right/joint_command',JointCommand)
    command_msg=JointCommand()
    command_msg.names=['right_s0', 'right_s1', 'right_e0', 'right_e1',  'right_w0', 'right_w1', 'right_w2']
    command_msg.mode=JointCommand.POSITION_MODE
    control_rate = rospy.Rate(100)
    start = rospy.get_time()

    joint_positions=rospy.wait_for_message("/robot/joint_states",JointState)
    qc = (joint_positions.position[9:16])
    angles = [qc[2],qc[3],qc[0],qc[1],qc[4],qc[5],qc[6]]
    pub_joint_cmd=rospy.Publisher('/robot/limb/right/joint_command',JointCommand)
    command_msg=JointCommand()
    command_msg.names=['right_e1']
    command_msg.command=[0]
    command_msg.mode=JointCommand.POSITION_MODE
    control_rate = rospy.Rate(100)
    start = rospy.get_time()
    exit = 0
    listener2=tf.TransformListener()

    #the transformations
    now=rospy.Time()
    listener2.waitForTransform("/torso","/right_hand",now,rospy.Duration(1.0))
    (trans08,rot08)=listener2.lookupTransform("/torso","/right_hand",now)

    # Get 4*4 rotational matrix from quaternion ( 4th colume is [0][0][0][1])
    R08 = transformations.quaternion_matrix(rot08)
    T08 = numpy.vstack((numpy.column_stack((R08[0:3,0:3], numpy.transpose(numpy.array(trans08)))),[0,0,0,1]))
    return (angles, T08)
def get_tag_obj_transforms(input_markers):

    output_markers = MarkerArray()
    tag_detections = {}

    for marker in input_markers.markers:
        tag_id = marker.id
        position_data = marker.pose.position
        orientation_data = marker.pose.orientation

        tag_pose = numpy.matrix(quaternion_matrix([orientation_data.x,
                                                   orientation_data.y,
                                                   orientation_data.z,
                                                   orientation_data.w]))
        tag_pose[0,3] = position_data.x
        tag_pose[1,3] = position_data.y
        tag_pose[2,3] = position_data.z

        tag_detections[tag_id] = tag_pose
    

    #Setup the data structure to dump into config file
    # config_file_data = {}
    # config_file_data['objects'] = {}
    # config_file_data['objects'][OBJECT_TO_ENTER] = {}
    # config_file_data['objects'][OBJECT_TO_ENTER]['tags'] = {}
    if TAG_AT_CENTRE in tag_detections:
        transform = tag_detections[TAG_AT_CENTRE]
        tag_transforms = {}
        for tag in tag_detections:
            tag_transforms[tag] = numpy.linalg.inv(tag_detections[tag])*transform

        print (tag_transforms)
 def computePlaceToBaseMatrix(self, place):
     place_quaternion = place[3:]
     rotation_matrix = quaternion_matrix(place_quaternion)
     translation = place[0:3]
     rotation_matrix[[0, 1, 2], 3] = translation
     place_to_base_matrix = rotation_matrix
     return place_to_base_matrix
Exemplo n.º 21
0
    def planCallback(self, msg):
        # get the goal
        pose_stamped = msg.poses[-1]
        pose = pose_stamped.pose

        # look ahead one meter
        R = quaternion_matrix([pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w])
        point = [1, 0, 0, 1]
        M = R*point

        p = PointStamped()
        p.header.frame_id = pose_stamped.header.frame_id
        p.header.stamp = rospy.Time(0)
        p.point.x += pose_stamped.pose.position.x + M[0,0]
        p.point.y += pose_stamped.pose.position.y + M[1,0]
        p.point.z += pose_stamped.pose.position.z + M[2,0]

        # transform to base_link
        p = self.listener.transformPoint("base_link", p)

        # update
        with self.mutex:
            if p.point.x < 0.65:
                self.x = 0.65
            else:
                self.x = p.point.x
            if p.point.y > 0.5:
                self.y = 0.5
            elif p.point.y < -0.5:
                self.y = -0.5
            else:
                self.y = p.point.y
    def execute(self, userdata):


        userdata.marker_pose.header.stamp = rospy.Time.now()
        pose = self.tf.transformPose('/base_link', userdata.marker_pose)
        p = [pose.pose.position.x, pose.pose.position.y, pose.pose.position.z]
        q = [pose.pose.orientation.x, pose.pose.orientation.y, pose.pose.orientation.z, pose.pose.orientation.w]
        rm = tfs.quaternion_matrix(q)
        # assemble a new coordinate frame that has z-axis aligned with
        # the vertical direction and x-axis facing the z-axis of the
        # original frame
        z = rm[:3, 2]
        z[2] = 0
        axis_x = tfs.unit_vector(z)
        axis_z = tfs.unit_vector([0, 0, 1])
        axis_y = np.cross(axis_x, axis_z)
        rm = np.vstack((axis_x, axis_y, axis_z)).transpose()
        # shift the pose along the x-axis
        p += np.dot(rm, [self.DISTANCE, 0, 0])[:3]
        userdata.goal_pose = pose
        userdata.goal_pose.pose.position.x = p[0]
        userdata.goal_pose.pose.position.y = p[1]
        userdata.goal_pose.pose.position.z = p[2]
        yaw = tfs.euler_from_matrix(rm)[2]
        q = tfs.quaternion_from_euler(0, 0, yaw - math.pi)
        userdata.goal_pose.pose.orientation.x = q[0]
        userdata.goal_pose.pose.orientation.y = q[1]
        userdata.goal_pose.pose.orientation.z = q[2]
        userdata.goal_pose.pose.orientation.w = q[3]
        return 'succeeded'
Exemplo n.º 23
0
def aim_pose_to(ps, pts, point_dir=(1,0,0)):
    if not (ps.header.frame_id.lstrip('/') == pts.header.frame_id.lstrip('/')):
        rospy.logerr("[Pose_Utils.aim_pose_to]:"+
                     "Pose and point must be in same frame: %s, %s"
                    %(ps.header.frame_id, pt2.header.frame_id))
    target_pt = np.array((pts.point.x, pts.point.y, pts.point.z))
    base_pt = np.array((ps.pose.position.x,
                        ps.pose.position.y,
                        ps.pose.position.z)) 
    base_quat = np.array((ps.pose.orientation.x, ps.pose.orientation.y,
                          ps.pose.orientation.z, ps.pose.orientation.w))

    b_to_t_vec = np.array((target_pt[0]-base_pt[0],
                           target_pt[1]-base_pt[1],
                           target_pt[2]-base_pt[2]))
    b_to_t_norm = np.divide(b_to_t_vec, np.linalg.norm(b_to_t_vec))

    point_dir_hom = (point_dir[0], point_dir[1], point_dir[2], 1)
    base_rot_mat = tft.quaternion_matrix(base_quat)
    point_dir_hom = np.dot(point_dir_hom, base_rot_mat.T)
    point_dir = np.array((point_dir_hom[0]/point_dir_hom[3],
                         point_dir_hom[1]/point_dir_hom[3],
                         point_dir_hom[2]/point_dir_hom[3]))
    point_dir_norm = np.divide(point_dir, np.linalg.norm(point_dir))
    axis = np.cross(point_dir_norm, b_to_t_norm)
    angle = np.arccos(np.vdot(point_dir_norm, b_to_t_norm))
    quat = tft.quaternion_about_axis(angle, axis)
    new_quat = tft.quaternion_multiply(quat, base_quat)
    ps.pose.orientation = Quaternion(*new_quat)
Exemplo n.º 24
0
    def __init__(self, index, topic, pos, orientation, axis=DEFAULT_AXIS):
        """Thruster class constructor.

        Parameters
        ----------
        index: int
            Thruster's index.
        topic: str
            Name of the thruster's command topic.
        pos: numpy.array
            3D position of the thruster with relation to the reference frame.
        orientation: numpy.array
            Quaternion with the orientation of the thruster with relation to
            the reference frame.
        """
        self._index = index
        self._topic = topic
        self._pos = None
        self._orientation = None
        self._force_dist = None
        if pos is not None and orientation is not None:
            self._pos = pos
            self._orientation = orientation
            # compute contribution to configuration matrix of this thruster
            thrust_body = trans.quaternion_matrix(orientation).dot(
                axis.transpose())[0:3]
            torque_body = numpy.cross(pos, thrust_body)
            self._force_dist = numpy.hstack((
                thrust_body, torque_body)).transpose()
        self._command = 0
        self._thrust = 0
        self._command_pub = rospy.Publisher(self._topic, FloatStamped,
                                            queue_size=10)

        print 'Thruster #%d - %s - %s' % (self._index, self.LABEL, self._topic)
Exemplo n.º 25
0
    def test_LArmIK(self):
        #   /WAIST /LARM_JOINT5_Link
        # - Translation: [0.325, 0.182, 0.074]
        # - Rotation: in Quaternion [-0.000, -0.707, 0.000, 0.707]
        #             in RPY [-1.694, -1.571, 1.693]
        for torso_angle in ([0, -10, 10]):
            torso_goal = self.goal_Torso()
            torso_goal = self.setup_Positions(torso_goal, [[torso_angle]])
            self.torso.send_goal_and_wait(torso_goal)
            for p in [[ 0.325, 0.182, 0.074], # initial
                      [ 0.3, -0.2, 0.074], [ 0.3, -0.1, 0.074], [ 0.3,  0.0, 0.074],
                      [ 0.3,  0.1, 0.074], [ 0.3,  0.2, 0.074], [ 0.3,  0.3, 0.074],
                      [ 0.4, -0.1, 0.074], [ 0.4, -0.0, 0.074], [ 0.4,  0.1, 0.074],
                      [ 0.4,  0.2, 0.074], [ 0.4,  0.3, 0.074], [ 0.4,  0.4, 0.074],
                      ] :

                print "solve ik at p = ", p
                ret = self.set_target_pose('LARM', p, [-1.694,-1.571, 1.693], 1.0)
                self.assertTrue(ret.operation_return, "ik failed")
                ret = self.wait_interpolation_of_group('LARM')
                self.assertTrue(ret.operation_return, "wait interpolation failed")

                rospy.sleep(1)
                now = rospy.Time.now()
                self.listener.waitForTransform("WAIST", "LARM_JOINT5_Link", now, rospy.Duration(1.0))
                (trans, rot) = self.listener.lookupTransform("WAIST", "LARM_JOINT5_Link", now)
                numpy.testing.assert_array_almost_equal(trans, p, decimal=1)
                print "current position   p = ", trans
                print "                 rot = ", rot
                print "                     = ", quaternion_matrix(rot)[0:3,0:3]
Exemplo n.º 26
0
    def odometry_callback(self, msg):
        """Handle updated measured velocity callback."""
        if not bool(self.config):
            return

        linear = msg.twist.twist.linear
        angular = msg.twist.twist.angular
        v_linear = numpy.array([linear.x, linear.y, linear.z])
        v_angular = numpy.array([angular.x, angular.y, angular.z])

        if self.config['odom_vel_in_world']:
            # This is a temp. workaround for gazebo's pos3d plugin not behaving properly:
            # Twist should be provided wrt child_frame, gazebo provides it wrt world frame
            # see http://docs.ros.org/api/nav_msgs/html/msg/Odometry.html
            xyzw_array = lambda o: numpy.array([o.x, o.y, o.z, o.w])
            q_wb = xyzw_array(msg.pose.pose.orientation)
            R_bw = trans.quaternion_matrix(q_wb)[0:3, 0:3].transpose()

            v_linear = R_bw.dot(v_linear)
            v_angular = R_bw.dot(v_angular)

        # Compute compute control output:
        t = msg.header.stamp.to_sec()
        e_v_linear = (self.v_linear_des - v_linear)
        e_v_angular = (self.v_angular_des - v_angular)

        a_linear = self.pid_linear.regulate(e_v_linear, t)
        a_angular = self.pid_angular.regulate(e_v_angular, t)

        # Convert and publish accel. command:
        cmd_accel = geometry_msgs.Accel()
        cmd_accel.linear = geometry_msgs.Vector3(*a_linear)
        cmd_accel.angular = geometry_msgs.Vector3(*a_angular)
        self.pub_cmd_accel.publish(cmd_accel)
Exemplo n.º 27
0
    def predictSinglePose(self, pose, arm_side):
        # Convert pose to numpy matrix
        p = tft.translation_matrix([pose.position.x,pose.position.y,pose.position.z])
        rot = tft.quaternion_matrix([pose.orientation.x,pose.orientation.y,pose.orientation.z,pose.orientation.w])
        input_pose = np.dot(p,rot)

        gpList, sysList = self.predict([input_pose], arm_side)
        return tfx.pose(gpList[0], frame=pose.frame, stamp=pose.stamp), tfx.pose(sysList[0], frame=pose.frame, stamp=pose.stamp)
Exemplo n.º 28
0
def matrix_from_StampedTransform(msg):
    T = msg.transform
    trans = [T.translation.x, T.translation.y, T.translation.z]
    quat = [T.rotation.x, T.rotation.y, T.rotation.z, T.rotation.w]

    return tfs.concatenate_matrices(
        tfs.translation_matrix(trans),
        tfs.quaternion_matrix(quat))
Exemplo n.º 29
0
 def transform_in_frame(self, pos, quat, off_point):
     pos = make_column(pos)    
     quat = make_list(quat)
     invquatmat = np.mat(tftrans.quaternion_matrix(quat))
     invquatmat[0:3,3] = pos 
     trans = np.matrix([off_point[0],off_point[1],off_point[2],1.]).T
     transpos = invquatmat * trans
     return np.resize(transpos, (3, 1))
Exemplo n.º 30
0
def flipOrbToNDT (orbPose):
    qOrb = [orbPose.qx, orbPose.qy, orbPose.qz, orbPose.qw]
    orbFlip = trafo.concatenate_matrices(
        trafo.quaternion_matrix(qOrb),
        trafo.rotation_matrix(np.pi/2, (1,0,0)),
        trafo.rotation_matrix(np.pi/2, (0,0,1))
    )
    return trafo.quaternion_from_matrix(orbFlip)
Exemplo n.º 31
0
    queue_size=10)

group = moveit_commander.MoveGroupCommander("left_arm")
group.set_planning_time(20)
group.allow_replanning(True)

tf_listener = tf.TransformListener(rospy.Duration(1))

id_num = raw_input("Input Grasp ID: ")

tf_listener.waitForTransform('base', "sample_grasp_" + str(id_num),
                             rospy.Time(0), rospy.Duration(2.0))
t, q = tf_listener.lookupTransform("base", "sample_grasp_" + str(id_num),
                                   rospy.Time())

matrix = quaternion_matrix(q)
new_matrix = matrix

offset = -0.3
t_pre = t + new_matrix[0:3, 2] * offset

q = quaternion_from_matrix(new_matrix)

tf_listener.waitForTransform('base', "sample_grasp_" + str(id_num),
                             rospy.Time(0), rospy.Duration(2.0))
t, q = tf_listener.lookupTransform("base", "sample_grasp_" + str(id_num),
                                   rospy.Time())
group.clear_pose_targets()
matrix = quaternion_matrix(q)
new_matrix = np.eye(4)
new_matrix[:, 0] = matrix[:, 2]
Exemplo n.º 32
0
def fromTransformToMatrix(transform):
    matrix = quaternion_matrix(transform[1])
    matrix[0][3] = transform[0][0]
    matrix[1][3] = transform[0][1]
    matrix[2][3] = transform[0][2]
    return matrix
Exemplo n.º 33
0
 def test_trace(self, q):
     m = quaternion_matrix(q)
     np.testing.assert_array_almost_equal(
         w.compile_and_execute(w.trace, [m]), np.trace(m))
Exemplo n.º 34
0
        #print cam_rotation_matrix()
        DCM = T*cam_rotation_matrix()
        print "Transform %s to %s (with rotated coordinate frame):"%(space_to_str(space),chainName)
        print np_mat_to_str(DCM)

        stamp = rospy.Time()
        frame1 = 'Torso_link'
        frame2 = 'CameraTop_frame'
        try:
            listener.waitForTransform(frame1,frame2,stamp,rospy.Duration(1.0))
            (trans,rot) = listener.lookupTransform(frame1,frame2,stamp)
        except tf.Exception as e:
            print "ERROR using TF"
            print "%s"%(e)
            sys.exit(-1)

        m = transformations.quaternion_matrix(rot)
        for i in range(0,3):
            m[i,3] = trans[i]
        print "[tf] Transform %s to %s:"%(frame1,frame2)
        print np_mat_to_str(m)

        e = np.linalg.norm(DCM - m)
        print "Error is: ",e
        if e > 1e-5:
            print "ERROR: Something is wrong with your TF transformations.  Transforms do not match!"
        else:
            print "Test ok. Done"


Exemplo n.º 35
0
    def _depth_img_callback(self, msg):
        # Doing a rospy.wait_for_message is super slow, compared to just subscribing and keeping the newest one.
        self.curr_img_time = time.time()
        self.last_image_pose = tfh.current_robot_pose(self.base_frame, self.camera_frame)
        if not self.last_image_pose:
            return
        self.curr_depth_img = bridge.imgmsg_to_cv2(msg)

        depth = self.curr_depth_img.copy()
        camera_pose = self.last_image_pose
        cam_p = camera_pose.position

        camera_rot = tft.quaternion_matrix(tfh.quaternion_to_list(camera_pose.orientation))[0:3, 0:3]

        # Do grasp prediction
        depth_crop, depth_nan_mask = process_depth_image(depth, self.img_crop_size, 300, return_mask=True, crop_y_offset=self.img_crop_y_offset)
        points, angle, width_img, _ = predict(depth_crop, self.model, process_depth=False, depth_nan_mask=depth_nan_mask, filters=(2.0, 2.0, 2.0))

        invalid_mask = np.zeros((300, 300)).astype("bool")
        if self.gripper == "robotiq":
            invalid_mask[:100, :] = True

        points[invalid_mask] = 0

        # Mask Points Here
        angle -= np.arcsin(camera_rot[0, 1])  # Correct for the rotation of the camera
        angle = (angle + np.pi/2) % np.pi - np.pi/2  # Wrap [-np.pi/2, np.pi/2]

        # Convert to 3D positions.
        imh, imw = depth.shape
        x = ((np.vstack((np.linspace((imw - self.img_crop_size) // 2, (imw - self.img_crop_size) // 2 + self.img_crop_size, depth_crop.shape[1], np.float), )*depth_crop.shape[0]) - self.cam_K[0, 2])/self.cam_K[0, 0] * depth_crop).flatten()
        y = ((np.vstack((np.linspace((imh - self.img_crop_size) // 2 - self.img_crop_y_offset, (imh - self.img_crop_size) // 2 + self.img_crop_size - self.img_crop_y_offset, depth_crop.shape[0], np.float), )*depth_crop.shape[1]).T - self.cam_K[1,2])/self.cam_K[1, 1] * depth_crop).flatten()
        pos = np.dot(camera_rot, np.stack((x, y, depth_crop.flatten()))).T + np.array([[cam_p.x, cam_p.y, cam_p.z]])

        width_m = width_img / 300.0 * 2.0 * depth_crop * np.tan(self.cam_fov * self.img_crop_size/depth.shape[0] / 2.0 / 180.0 * np.pi)

        maxes = peak_local_max(points, min_distance=10, threshold_abs=0.1, num_peaks=3)
        if maxes.shape[0] == 0:
            rospy.logerr("No Local Maxes")
            return
        
        if self.prev_mp is None:
            best_g_unr = maxes[np.argmin(np.linalg.norm(maxes, axis=1))]
            self.prev_mp = best_g_unr
        else:
            best_g_unr = maxes[np.argmin(np.linalg.norm(maxes - self.prev_mp, axis=1))]
            self.prev_mp = (best_g_unr * 0.25 + self.prev_mp * 0.75).astype(np.int)
        best_g = np.ravel_multi_index(((best_g_unr[0]), (best_g_unr[1])), points.shape)
        best_g_unr = np.unravel_index(best_g, points.shape)

        g = Grasp()
        g.pose.position.x = pos[best_g, 0]
        g.pose.position.y = pos[best_g, 1]
        g.pose.position.z = pos[best_g, 2]
        g.pose.orientation = tfh.list_to_quaternion(tft.quaternion_from_euler(np.pi, 0, ((angle[best_g_unr]%np.pi) - np.pi/2)))
        g.width = width_m[best_g_unr]
        g.quality = points[best_g_unr]

        grasp_img = cv2.applyColorMap((points * 255).astype(np.uint8), cv2.COLORMAP_JET)
        rr, cc = circle(self.prev_mp[0], self.prev_mp[1], 5)
        grasp_img[rr, cc, 0] = 0
        grasp_img[rr, cc, 1] = 255
        grasp_img[rr, cc, 2] = 0

        show = gridshow('Display',
                 [depth_crop, grasp_img],
                 [(0.30, 0.55), None, (-np.pi/2, np.pi/2)],
                 [cv2.COLORMAP_BONE, cv2.COLORMAP_JET, cv2.COLORMAP_BONE],
                 3,
                 False)

        self.img_pub.publish(bridge.cv2_to_imgmsg(show))
        self.cmd_pub.publish(g)
Exemplo n.º 36
0
	def setRwb (self, _Rwb = PoseArray()):
		self.Rwb[i] = tf.quaternion_matrix([_Rwb.poses.orientation.x, _Rwb.poses.orientation.y, _Rwb.poses.orientation.z, _Rwb.poses.orientation.w]) 
Exemplo n.º 37
0
 def _x_axis_from_quaternion(q):
     m = tft.quaternion_matrix([q.x, q.y, q.z, q.w])
     return m[:3, 0]  # First column
Exemplo n.º 38
0
def quat_to_rod(q):
    # q = [qx, qy, qz, qw]
    rot = tfm.quaternion_matrix(q)[0:3][:, 0:3]
    dst, jacobian = cv2.Rodrigues(rot)
    return dst.T.tolist()[0]
Exemplo n.º 39
0
        print 'P: ', [self.x0_int[0], 0.0, self.x0_int[2], 0.0, 0.0, self.x0_int[1], self.x0_int[3], 0.0, 0.0, 0.0, 1.0, 0.0]
        #print res_1
        return res_1.x
        

if __name__ == '__main__':
    color1 = (0,255,255)
    color2 = (0,0,255)
    color3 = (255,0,255)
    
    # x0_ext_old = [1.08592196e-01, -5.96469288e-01, 6.09164354e-01] + 
        # [9.05378371e-01, 3.06042346e-02, -5.82887034e-02, -4.19470873e-01]) # viewer
    x0_ext_old = [7.16834068e-01, -7.68849430e-02, 3.78838750e-01] + [6.89344221e-01, 6.44350485e-01, -2.20748124e-01, -2.46753445e-01]  # observer
    
    
    transform = tfm.concatenate_matrices(tfm.translation_matrix(x0_ext_old[0:3]), tfm.quaternion_matrix(x0_ext_old[3:7]))
    inversed_transform = tfm.inverse_matrix(transform)
    translation = tfm.translation_from_matrix(inversed_transform)
    quaternion = tfm.quaternion_from_matrix(inversed_transform)

    x0_ext =  translation.tolist() + quat_to_rod(quaternion.tolist())

    if sys.argv[1] == 'observer':
        if sys.argv[3] == '640':
            x0_int = [617.605, 617.605, 305.776, 238.914, 0.0,0.0,0.0,0.0,0.0]  # observer
        elif sys.argv[3] == '1080':
            x0_int = [1389.61, 1389.61, 927.997, 537.558, 0.0,0.0,0.0,0.0,0.0]  # observer
    else:
        if sys.argv[3] == '640':
            x0_int = [618.337, 618.337, 310.987, 236.15, 0.0,0.0,0.0,0.0,0.0] # viewer 
        elif sys.argv[3] == '1080':
Exemplo n.º 40
0
 def _yaw_from_quaternion(q):
     m = tft.quaternion_matrix([q.x, q.y, q.z, q.w])
     return math.atan2(m[1, 0], m[0, 0])
Exemplo n.º 41
0
def xyzw_to_mat44(ori):
    return transformations.quaternion_matrix((ori.x, ori.y, ori.z, ori.w))
            print("EXCEPTION:", e)
            #if something goes wrong with this just go to bed for a second or so and wake up hopefully refreshed.
            delay.sleep()
            continue

        #***************************************
        #***          Print current robot pose
        #***************************************

        #Print out the x,y coordinates of the transform
        print("Robot is believed to be at (x,y): (", translation[0], ",",
              translation[1], ")")

        #Convert the quaternion-based orientation of the latest message to Euler representation in order to get z axis rotation
        r_xorient, r_yorient, r_zorient = transformations.euler_from_matrix(
            transformations.quaternion_matrix(orientation))
        robot_theta = r_zorient  # only need the z axis
        print("Robot is believed to have orientation (theta): (", robot_theta,
              ")\n")

        #***************************************
        #***          Print current destination
        #***************************************

        # the waypoint variable is filled in in the waypoint_callback function above, which comes from incoming messages
        # subscribed to in the .Subscriber call above.

        #Print out the x,y coordinates of the latest message
        print("Current waypoint (x,y): (", waypoint.translation.x, ",",
              waypoint.translation.y, ")")
    def perspectiveTransformation(self, orig_point=Point()):

        if self._ros_active and self._using_ros:

            try:
                # Create TF message for point
                point_robot = PointStamped()
                point_robot.header.stamp = rospy.Time(0)
                point_robot.header.frame_id = '/base_link'
                point_robot.point.x = orig_point.x
                point_robot.point.y = orig_point.y
                point_robot.point.z = orig_point.z

                # Transform point from robot space to user space using TFs
                point_world = self._tf_listener.transformPoint(
                    '/map', point_robot)
                point_user_tf = self._tf_listener.transformPoint(
                    '/' + str(self._user_id), point_world)

                # Apply 2D perspective transformation
                point_user = np.array([
                    point_user_tf.point.x, point_user_tf.point.y,
                    point_user_tf.point.z
                ])
                user_point_perspective = self._perspective_matrix.dot(
                    point_user)
                return user_point_perspective

            except (ValueError, rospy.ROSSerializationException,
                    tf.LookupException, tf.ConnectivityException,
                    tf.ExtrapolationException) as e:
                rospy.logerr('[PERSPECTIVE TRANSFORMATION]: Caught Error: %s' %
                             e)
                return None

        else:

            # Get robot and user orientation transformations under Euler Angles
            if self._rotation_type.find('euler') != -1:

                user_euler = T.euler_from_quaternion(
                    (self._user_pose.orientation.x,
                     self._user_pose.orientation.y,
                     self._user_pose.orientation.z,
                     self._user_pose.orientation.w))
                robot_euler = T.euler_from_quaternion(
                    (self._robot_pose.orientation.x,
                     self._robot_pose.orientation.y,
                     self._robot_pose.orientation.z,
                     self._robot_pose.orientation.w))

                user_transformation = T.euler_matrix(user_euler[0],
                                                     user_euler[1],
                                                     user_euler[2])

                robot_transformation = T.euler_matrix(robot_euler[0],
                                                      robot_euler[1],
                                                      robot_euler[2])

            # Get robot and user orientation transformations under Quaternions
            elif self._rotation_type.find('quaternion') != -1:

                user_transformation = T.quaternion_matrix(
                    (self._user_pose.orientation.x,
                     self._user_pose.orientation.y,
                     self._user_pose.orientation.z,
                     self._user_pose.orientation.w))
                robot_transformation = T.quaternion_matrix(
                    (self._robot_pose.orientation.x,
                     self._robot_pose.orientation.y,
                     self._robot_pose.orientation.z,
                     self._robot_pose.orientation.w))

            else:
                print('Invalid rotation type, impossible to transform points')
                return None

            # Add translation of user and robot to transformation matrix
            robot_transformation[0, 3] = self._robot_pose.position.x
            robot_transformation[1, 3] = self._robot_pose.position.y
            robot_transformation[2, 3] = self._robot_pose.position.z

            user_transformation[0, 3] = self._user_pose.position.x
            user_transformation[1, 3] = self._user_pose.position.y
            user_transformation[2, 3] = self._user_pose.position.z

            # Transform point from robot space to user space
            robot_transformation = np.linalg.inv(robot_transformation)
            point_robot = np.array([[orig_point.x], [orig_point.y],
                                    [orig_point.z], [1]])
            point_world = robot_transformation.dot(point_robot)

            # Apply 2D perspective transformation
            point_user = user_transformation.dot(point_world)
            user_point_perspective = self._perspective_matrix.dot(point_user)
            return user_point_perspective
Exemplo n.º 44
0
 def rot_matrix(self):
     return quaternion_matrix(self._rot)[0:3, 0:3]
Exemplo n.º 45
0
def rosRGBDCallBack(rgb_data, depth_data):
    draw_contours = True
    detect_shape = False
    calculate_size = False

    try:
        cv_image = cv_bridge.imgmsg_to_cv2(rgb_data, "bgr8")
        cv_depthimage = cv_bridge.imgmsg_to_cv2(depth_data, "32FC1")
        cv_depthimage2 = np.array(cv_depthimage, dtype=np.float32)
    except CvBridgeError as e:
        print(e)

    contours_blue, mask_image_blue = HSVObjectDetection(cv_image,
                                                        toPrint=False)

    for cnt in contours_blue:
        if not draw_contours:
            xp, yp, w, h = cv2.boundingRect(cnt)

            cv2.rectangle(cv_image, (xp, yp), (xp + w, yp + h), [0, 255, 255],
                          2)
            cv2.circle(cv_image, (int(xp + w / 2), int(yp + h / 2)), 5,
                       (55, 255, 155), 4)
            if not math.isnan(
                    cv_depthimage2[int(yp + h / 2)][int(xp + w / 2)]):
                zc = cv_depthimage2[int(yp + h / 2)][int(xp + w / 2)]
                X1 = getXYZ(xp + w / 2, yp + h / 2, zc, fx, fy, cx, cy)
                print "x:", X1[0], "y:", X1[1], "z:", X1[2]
            else:
                continue
        else:
            #################Draw contours#####################################
            # In task1, you need to call the function "cv2.drawContours" to show
            # object contour in RVIZ
            #
            #
            cv2.drawContours(cv_image, contours_blue, -1, (130, 25, 60), -1)
            #x_str_blue, y_str_blue = cnt[0][0][:]
            #font_blue = cv2.FONT_HERSHEY_SIMPLEX
            #cv2.putText(cv_image, "Blue", (x_str_blue, y_str_blue), font_blue, 1, (0, 255, 255), 2, cv2.LINE_AA)
            M_blue = cv2.moments(cnt)
            cX_blue = int(M_blue["m10"] / M_blue["m00"])
            cY_blue = int(M_blue["m01"] / M_blue["m00"])
            #cv2.circle(cv_image, (cX_blue, cY_blue), 10, (1, 227, 254), -1)

            c = max(contours_blue, key=cv2.contourArea)
            extRight = tuple(c[c[:, :, 0].argmax()][0])
            #cv2.circle(cv_image, extRight, 5, (0, 255, 0), -1)

            if (len(cnt) >= 5):
                ellipse = cv2.fitEllipse(cnt)
                cv2.ellipse(cv_image, ellipse, (0, 255, 0), 2)

                (x, y), (MA, ma), angle = cv2.fitEllipse(cnt)
                angle_pre = angle
                if (angle >= 90) and (angle <= 180):
                    angle = angle - 90
                    ell_x = int(x + 100 * math.cos(angle * 0.0174532925))
                    ell_y = int(y + 100 * math.sin(angle * 0.0174532925))
                    ell_x_short = int(x + 100 * math.sin(angle * 0.0174532925))
                    ell_y_short = int(y - 100 * math.cos(angle * 0.0174532925))
                else:
                    ell_x = int(x + 100 * math.sin(angle * 0.0174532925))
                    ell_y = int(y - 100 * math.cos(angle * 0.0174532925))
                    ell_x_short = int(x - 100 * math.cos(angle * 0.0174532925))
                    ell_y_short = int(y - 100 * math.sin(angle * 0.0174532925))

                cv2.line(cv_image, (cX_blue, cY_blue), (ell_x, ell_y),
                         (0, 255, 0), 3)  # x-axis
                cv2.line(cv_image, (cX_blue, cY_blue),
                         (cX_blue, cY_blue - 100), (255, 0, 0), 3)  # z-axis
                cv2.line(cv_image, (cX_blue, cY_blue),
                         (ell_x_short, ell_y_short), (0, 0, 255), 3)  # y-axix

                if (cX_blue < len(depth) and cY_blue < len(depth[0])):
                    cZ_blue = depth[cX_blue][cY_blue]
                    xyz_blue = getXYZ(cX_blue, cY_blue, cZ_blue / 1000, fx, fy,
                                      cx, cy)

                    # rosrun tf tf_echo /arm_base_link /head_tilt_link
                    matrix = quaternion_matrix([0.937, 0.001, 0.349, -0.004])
                    matrix[0][3] = -0.117
                    matrix[1][3] = 0.000
                    matrix[2][3] = 0.488

                    # rosrun tf tf_echo /base_link /head_tilt_link
                    #matrix = quaternion_matrix([0.937, 0.001, 0.349, -0.004])
                    #matrix[0][3] = -0.02
                    #matrix[1][3] = 0.000
                    #matrix[2][3] = 0.585
                    xyz = np.array(
                        [xyz_blue[2], -xyz_blue[0], -xyz_blue[1], 1])
                    final_xyz = matrix.dot(xyz)

                    #matrix_rot_x = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
                    #final_xyz_rot_x = matrix_rot_x.dot(final_xyz)
                    global haveGrasp
                    global counter
                    leftOrRight = False
                    if (final_xyz[0] <= 0.45):
                        final_xyz_re = np.array(
                            [final_xyz[0] + 0.1, -final_xyz[1] + 0.02, 0.05])
                        if (final_xyz_re[1] >= 0.03):
                            final_xyz_re2 = np.array(
                                [final_xyz_re[0] - 0.1, final_xyz_re[1], 0.05])
                            leftOrRight = True
                            print(final_xyz_re2)
                        else:
                            leftOrRight = False
                            print(final_xyz_re)
                        if (angle_pre <= 90):
                            orien = np.array(
                                [0.2705981, 0.6532815, -0.2705981, 0.6532815])
                        else:
                            orien = np.array(
                                [-0.2705981, 0.6532815, 0.2705981, 0.6532815])

                        counter = counter + 1
                        if (counter >= 100 and haveGrasp == False):
                            haveGrasp = True
                            if (leftOrRight == False):
                                grasping(final_xyz_re[0], final_xyz_re[1],
                                         final_xyz_re[2], orien[0], orien[1],
                                         orien[2], orien[3])
                            else:
                                grasping(final_xyz_re2[0], final_xyz_re2[1],
                                         final_xyz_re2[2], orien[0], orien[1],
                                         orien[2], orien[3])
                    if (counter >= 1000):
                        counter = 0

                    command = Pose()
                    command.position.x = xyz_blue[0]
                    command.position.y = xyz_blue[1]
                    command.position.z = xyz_blue[2]
                    command.orientation.x = 0
                    command.orientation.y = 0.707
                    command.orientation.z = 0
                    command.orientation.w = 0.707
                    #print(command)
                    robot_command_pub.publish(command)
            ###################################################################

            if detect_shape:

                peri = cv2.arcLength(cnt, True)
                obj_num = cv2.approxPolyDP(cnt, 0.04 * peri, True)

                if len(obj_num) == 3:
                    obj_shape = "triangle"
                elif len(obj_num) == 4:
                    obj_shape = "square"
                else:
                    obj_shape = "circle"

                if len(cnt) < 1:
                    continue
                x_str, y_str = cnt[0][0][:]
                font = cv2.FONT_HERSHEY_SIMPLEX
                if obj_shape == "triangle":
                    cv2.putText(cv_image, "Triangle", (x_str, y_str), font, 1,
                                (0, 255, 255), 2, cv2.LINE_AA)
                elif obj_shape == "square":
                    cv2.putText(cv_image, "Square", (x_str, y_str), font, 1,
                                (0, 255, 255), 2, cv2.LINE_AA)
                elif obj_shape == "circle":
                    cv2.putText(cv_image, "Circle", (x_str, y_str), font, 1,
                                (0, 255, 255), 2, cv2.LINE_AA)

            if calculate_size:
                ##################################################
                # In task3, we offer the methods to calculate area.
                # If you want to use the method TA offers, you will need to
                # assign vertex list of single object to "vtx_list" and finish
                # functions "get_side_length", "get_radius".
                # You can also write your own code to calculate
                # area. if so, please ignore and comment the line 193 - 218,
                # and write your code there.
                #
                #
                # vtx_list = ?? #hint: vtx_list is similar to cnt
                ##################################################
                vtx_list_tri = cv2.approxPolyDP(cnt, 0.2 * peri, True)
                vtx_list_squ = cv2.approxPolyDP(cnt, 0.1 * peri, True)
                vtx_list_cir = cv2.approxPolyDP(cnt, 0.2 * peri, True)
                #print(vtx_list)
                if obj_shape == "triangle":
                    tri_side_len = get_side_length(vtx_list_tri,
                                                   cv_depthimage2)
                    if tri_side_len is None:
                        continue
                    tri_area = tri_side_len**2 * math.sqrt(3) / 4
                    string = "%.3e" % tri_area
                    cv2.putText(cv_image, string, (x_str, y_str + 30), font, 1,
                                (0, 255, 255), 2, cv2.LINE_AA)

                elif obj_shape == "square":
                    squ_side_len = get_side_length(vtx_list_squ,
                                                   cv_depthimage2)
                    if squ_side_len is None:
                        continue
                    squ_area = squ_side_len**2
                    string = "%.3e" % squ_area
                    cv2.putText(cv_image, string, (x_str, y_str + 30), font, 1,
                                (0, 255, 255), 2, cv2.LINE_AA)

                elif obj_shape == "circle":
                    circle_radius = get_radius(vtx_list_cir, cv_depthimage2)
                    if circle_radius is None:
                        continue
                    circle_area = circle_radius**2 * math.pi
                    string = "%.3e" % circle_area
                    cv2.putText(cv_image, string, (x_str, y_str + 30), font, 1,
                                (0, 255, 255), 2, cv2.LINE_AA)

    img_result_pub.publish(
        cv_bridge.cv2_to_imgmsg(cv_image, encoding="passthrough"))
Exemplo n.º 46
0
 def init_pose(self):
     T = np.array([[self.origin.position.x], [self.origin.position.y]])
     R = quaternion_matrix((self.origin.orientation.x, self.origin.orientation.y, self.origin.orientation.z,
                           self.origin.orientation.w))[0:2, 0:2]
     self.data = np.dot(R, self.data) + np.dot(T, np.ones((1, self.data.shape[1])))
Exemplo n.º 47
0
    msg = rospy.wait_for_message("baxter_available_picking_pose",
                                 AlvarMarkers)  # listen once
    robot = moveit_commander.RobotCommander()
    group = moveit_commander.MoveGroupCommander("right_arm")
    group.set_max_velocity_scaling_factor(0.3)
    group.set_max_acceleration_scaling_factor(0.3)

    group.set_goal_position_tolerance(0.01)
    group.set_goal_orientation_tolerance(0.01)

    for marker in msg.markers:
        pos = marker.pose.pose.position
        ori = marker.pose.pose.orientation
        base_to_marker_mat = numpy.dot(
            translation_matrix((pos.x, pos.y, pos.z)),
            quaternion_matrix((ori.x, ori.y, ori.z, ori.w)))

        rospy.loginfo('Press Enter to move to pre-pick pose of marker %s' %
                      marker.id)
        raw_input()
        if rospy.is_shutdown():
            break
        goal = copy.deepcopy(marker.pose.pose)
        goal.position.x -= hover_height * base_to_marker_mat[0, 2]
        goal.position.y -= hover_height * base_to_marker_mat[1, 2]
        goal.position.z -= hover_height * base_to_marker_mat[2, 2]
        group.set_start_state_to_current_state()
        group.set_pose_target(goal)
        plan = group.plan()
        group.execute(plan, wait=True)
Exemplo n.º 48
0
 try:
     transed_pose = listener.transformPose("BODY", pose_msg)
 except (tf.LookupException, tf.ConnectivityException,
         tf.ExtrapolationException), e:
     print "tf error: %s" % e
     return
 trans.header = transed_pose.header
 trans.child_frame_id = "handle"
 trans.transform.translation.x = transed_pose.pose.position.x
 trans.transform.translation.y = transed_pose.pose.position.y
 trans.transform.translation.z = transed_pose.pose.position.z
 trans_qua = numpy.array([
     transed_pose.pose.orientation.x, transed_pose.pose.orientation.y,
     transed_pose.pose.orientation.z, transed_pose.pose.orientation.w
 ])
 trans_matrix = transformations.quaternion_matrix(trans_qua)
 uz = numpy.array(
     [trans_matrix[0][2], trans_matrix[1][2], trans_matrix[2][2]])
 ux = numpy.array([1.0, 0.0, 0.0])
 uy = numpy.cross(uz, ux) / numpy.linalg.norm(numpy.cross(uz, ux))
 ux = numpy.cross(uy, uz)
 trans_qua = transformations.quaternion_from_matrix(
     get_4x4_mat(numpy.array([ux, uy, uz]).T))
 trans.transform.rotation.x = trans_qua[0]
 trans.transform.rotation.y = trans_qua[1]
 trans.transform.rotation.z = trans_qua[2]
 trans.transform.rotation.w = trans_qua[3]
 set_tf(10, trans)  # handle
 trans_handle_mat = get_4x4_mat(numpy.array([ux, uy, uz]).T)
 trans_handle_mat[0][3] = transed_pose.pose.position.x
 trans_handle_mat[1][3] = transed_pose.pose.position.y
Exemplo n.º 49
0
def take_sample():
    rospy.init_node('Take_sample', anonymous=True)
    # rospy.Subscriber('/tf', TFMessage, PoseCallback)
    # rospy.Subscriber('/camera/depth/image_rect_raw', Image, ImgCallback)
    right_arm = Limb('right')
    left_arm = Limb('left')

    cnt = 0
    flag_cap = 0

    print('start')
    while not rospy.is_shutdown():
        # # show video stream
        # Wait for a coherent pair of frames: color
        frames = pipeline.wait_for_frames()
        color_frame = frames.get_color_frame()
        if not color_frame:
            continue

        # Convert images to numpy arrays
        color_image = np.asanyarray(color_frame.get_data())

        # # Stack both images horizontally
        # images = np.hstack(color_image)

        # Show images
        cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
        cv2.imshow('RealSense', color_image)

        if flag_cap:
            # # Take samle
            # 1. Image
            cnt = cnt + 1
            cv2.imwrite('Img_marker' + str(cnt) + '.jpg', color_image)

            # 2. End effector Pose
            ee_pose = left_arm.endpoint_pose()
            ee_position = ee_pose['position']
            ee_orientation = ee_pose['orientation']

            quat = [
                ee_orientation.x, ee_orientation.y, ee_orientation.z,
                ee_orientation.w
            ]
            T_matrix = quaternion_matrix(
                quat)  # order need to be checked, here is xyzw
            transl = np.array([ee_position.x, ee_position.y, ee_position.z])
            T_matrix[0:3, 3] = transl.T
            print('id', cnt, 'EE_pose', T_matrix)

            T_matrix = np.array(T_matrix)
            f_name = str('EE_pose' + str(cnt) + '.txt')
            np.savetxt(f_name, T_matrix, fmt='%.6f', delimiter=',')
            # fo = open('EE_pose' + str(cnt) + '.txt', 'w')
            # fo.close
            flag_cap = 0  # reset flag

        key = cv2.waitKey(50)
        if key & 0xFF == ord('t'):
            flag_cap = 1  # set flag
        elif key & 0xFF == ord('q'):
            break
Exemplo n.º 50
0
class Vehicle(object):

    """Vehicle interface to be used by model-based controllers. It receives the
    parameters necessary to compute the vehicle's motion according to Fossen's.
    """

    INSTANCE = None

    def __init__(self, inertial_frame_id='world'):
        """Class constructor."""
        assert inertial_frame_id in ['world', 'world_ned']
        # Reading current namespace
        self._namespace = rospy.get_namespace()

        self._inertial_frame_id = inertial_frame_id
        self._body_frame_id = None

        if self._inertial_frame_id == 'world':
            self._body_frame_id = 'base_link'
        else:
            self._body_frame_id = 'base_link_ned'

        tf_buffer = tf2_ros.Buffer()
        listener = tf2_ros.TransformListener(tf_buffer)
        tf_trans_ned_to_enu = None
        try:
            tf_trans_ned_to_enu = tf_buffer.lookup_transform(
                'world', 'world_ned', rospy.Time(),
                rospy.Duration(1))
        except Exception, e:
            self._logger.error('No transform found between world and the '
                               'world_ned ' + self.namespace)
            self._logger.error(str(e))
            self.transform_ned_to_enu = None

        if tf_trans_ned_to_enu is not None:
            self.transform_ned_to_enu = quaternion_matrix(
                (tf_trans_ned_to_enu.transform.rotation.x,
                 tf_trans_ned_to_enu.transform.rotation.y,
                 tf_trans_ned_to_enu.transform.rotation.z,
                 tf_trans_ned_to_enu.transform.rotation.w))[0:3, 0:3]

        print('Transform world_ned (NED) to world (ENU)=\n' +
              str(self.transform_ned_to_enu))

        self._mass = 0
        if rospy.has_param('~mass'):
            self._mass = rospy.get_param('~mass')
            if self._mass <= 0:
                raise rospy.ROSException('Mass has to be positive')

        self._inertial = dict(ixx=0, iyy=0, izz=0, ixy=0, ixz=0, iyz=0)
        if rospy.has_param('~inertial'):
            print 'has inertial'
            inertial = rospy.get_param('~inertial')
            for key in self._inertial:
                if key not in inertial:
                    raise rospy.ROSException('Invalid moments of inertia')
            self._inertial = inertial

        self._cog = [0, 0, 0]
        if rospy.has_param('~cog'):
            self._cog = rospy.get_param('~cog')
            if len(self._cog) != 3:
                raise rospy.ROSException('Invalid center of gravity vector')

        self._cob = [0, 0, 0]
        if rospy.has_param('~cog'):
            self._cob = rospy.get_param('~cob')
            if len(self._cob) != 3:
                raise rospy.ROSException('Invalid center of buoyancy vector')

        self._body_frame = 'base_link'
        if rospy.has_param('~base_link'):
            self._body_frame = rospy.get_param('~base_link')

        self._volume = 0.0
        if rospy.has_param('~volume'):
            self._volume = rospy.get_param('~volume')
            if self._volume <= 0:
                raise rospy.ROSException('Invalid volume')

        # Fluid density
        self._density = 1028.0
        if rospy.has_param('~density'):
            self._density = rospy.get_param('~density')
            if self._density <= 0:
                raise rospy.ROSException('Invalid fluid density')

        # Bounding box
        self._height = 0.0
        self._length = 0.0
        self._width = 0.0
        if rospy.has_param('~height'):
            self._height = rospy.get_param('~height')
            if self._height <= 0:
                raise rospy.ROSException('Invalid height')

        if rospy.has_param('~length'):
            self._length = rospy.get_param('~length')
            if self._length <= 0:
                raise rospy.ROSException('Invalid length')

        if rospy.has_param('~width'):
            self._width = rospy.get_param('~width')
            if self._width <= 0:
                raise rospy.ROSException('Invalid width')


        # Calculating the rigid-body mass matrix
        self._M = np.zeros(shape=(6, 6), dtype=float)
        self._M[0:3, 0:3] = self._mass * np.eye(3)
        self._M[0:3, 3:6] = - self._mass * \
            cross_product_operator(self._cog)
        self._M[3:6, 0:3] = self._mass * \
            cross_product_operator(self._cog)
        self._M[3:6, 3:6] = self._calc_inertial_tensor()

        # Loading the added-mass matrix
        self._Ma = np.zeros((6, 6))
        if rospy.has_param('~Ma'):
            self._Ma = np.array(rospy.get_param('~Ma'))
            if self._Ma.shape != (6, 6):
                raise rospy.ROSException('Invalid added mass matrix')

        # Sum rigid-body and added-mass matrices
        self._Mtotal = np.zeros(shape=(6, 6))
        self._calc_mass_matrix()

        # Acceleration of gravity
        self._gravity = 9.81

        # Initialize the Coriolis and centripetal matrix
        self._C = np.zeros((6, 6))

        # Vector of restoring forces
        self._g = np.zeros(6)

        # Loading the linear damping coefficients
        self._linear_damping = np.zeros(shape=(6, 6))
        if rospy.has_param('~linear_damping'):
            self._linear_damping = np.array(rospy.get_param('~linear_damping'))
            if self._linear_damping.shape == (6,):
                self._linear_damping = np.diag(self._linear_damping)
            if self._linear_damping.shape != (6, 6):
                raise rospy.ROSException('Linear damping must be given as a 6x6 matrix or the diagonal coefficients')

        # Loading the nonlinear damping coefficients
        self._quad_damping = np.zeros(shape=(6,))
        if rospy.has_param('~quad_damping'):
            self._quad_damping = np.array(rospy.get_param('~quad_damping'))
            if self._quad_damping.shape != (6,):
                raise rospy.ROSException('Quadratic damping must be given defined with 6 coefficients')

        # Loading the linear damping coefficients proportional to the forward speed
        self._linear_damping_forward_speed = np.zeros(shape=(6, 6))
        if rospy.has_param('~linear_damping_forward_speed'):
            self._linear_damping_forward_speed = np.array(rospy.get_param('~linear_damping_forward_speed'))
            if self._linear_damping_forward_speed.shape == (6,):
                self._linear_damping_forward_speed = np.diag(self._linear_damping_forward_speed)
            if self._linear_damping_forward_speed.shape != (6, 6):
                raise rospy.ROSException('Linear damping proportional to the forward speed must be given as a 6x6 '
                                         'matrix or the diagonal coefficients')

        # Initialize damping matrix
        self._D = np.zeros((6, 6))

        # Vehicle states
        self._pose = dict(pos=np.zeros(3),
                          rot=quaternion_from_euler(0, 0, 0))
        # Velocity in the body frame
        self._vel = np.zeros(6)
        # Acceleration in the body frame
        self._acc = np.zeros(6)
        # Generalized forces
        self._gen_forces = np.zeros(6)        
Exemplo n.º 51
0
    def estimate(self,
                 t,
                 q,
                 camera_info,
                 target_position=None,
                 occlusion_treshold=0.01,
                 output_viz=True):
        perspective_timer = cv2.getTickCount()
        visible_detections = []
        rot = quaternion_matrix(q)
        trans = translation_matrix(t)
        if target_position is None:
            target = translation_matrix([0.0, 0.0, 1000.0])
            target_position = translation_from_matrix(
                np.dot(np.dot(trans, rot), target))
        view_matrix = p.computeViewMatrix(t, target_position, [0, 0, 1])

        width = HumanVisualModel.WIDTH
        height = HumanVisualModel.HEIGHT

        projection_matrix = p.computeProjectionMatrixFOV(
            HumanVisualModel.FOV, HumanVisualModel.ASPECT,
            HumanVisualModel.CLIPNEAR, HumanVisualModel.CLIPFAR)

        rendered_width = int(width / 3.0)
        rendered_height = int(height / 3.0)

        width_ratio = width / rendered_width
        height_ratio = height / rendered_height

        camera_image = p.getCameraImage(rendered_width,
                                        rendered_height,
                                        viewMatrix=view_matrix,
                                        renderer=p.ER_BULLET_HARDWARE_OPENGL,
                                        projectionMatrix=projection_matrix)

        rgb_image = cv2.resize(np.array(camera_image[2]), (width, height))
        depth_image = np.array(camera_image[3], np.float32).reshape(
            (rendered_height, rendered_width))

        far = HumanVisualModel.CLIPFAR
        near = HumanVisualModel.CLIPNEAR
        real_depth_image = far * near / (far - (far - near) * depth_image)

        if self.use_saliency is not False:
            saliency_map, saliency_heatmap = self.saliency_estimator.estimate(
                camera_image[2], real_depth_image)
            saliency_heatmap_resized = cv2.resize(saliency_heatmap,
                                                  (width, height))

        mask_image = camera_image[4]
        unique, counts = np.unique(np.array(mask_image).flatten(),
                                   return_counts=True)

        for sim_id, count in zip(unique, counts):
            if sim_id > 0:
                cv_mask = np.array(mask_image.copy())
                cv_mask[cv_mask != sim_id] = 0
                cv_mask[cv_mask == sim_id] = 255
                xmin, ymin, w, h = cv2.boundingRect(cv_mask.astype(np.uint8))

                visible_area = w * h + 1
                screen_area = rendered_width * rendered_height + 1
                if screen_area - visible_area == 0:
                    confidence = 1.0
                else:
                    confidence = visible_area / float(screen_area -
                                                      visible_area)
                #TODO compute occlusion score as a ratio between visible 2d bbox and projected 2d bbox areas
                depth = real_depth_image[int(ymin + h / 2.0)][int(xmin +
                                                                  w / 2.0)]

                xmin = int(xmin * width_ratio)
                ymin = int(ymin * height_ratio)
                w = int(w * width_ratio)
                h = int(h * height_ratio)

                id = self.simulator.reverse_entity_id_map[sim_id]
                if confidence > occlusion_treshold:
                    det = Detection(int(xmin),
                                    int(ymin),
                                    int(xmin + w),
                                    int(ymin + h),
                                    id,
                                    confidence,
                                    depth=depth)
                    visible_detections.append(det)

        # for subject_detection in visible_detections:
        #     for object_detection in visible_detections:
        #         if subject_detection != object_detection:
        #             pass #TODO create inference batch for egocentric relation detection

        perspective_fps = cv2.getTickFrequency() / (cv2.getTickCount() -
                                                    perspective_timer)

        if output_viz is True:
            viz_frame = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
            if self.use_saliency is not False:
                viz_frame = cv2.addWeighted(saliency_heatmap_resized, 0.4,
                                            viz_frame, 0.7, 0)
            cv2.rectangle(viz_frame, (0, 0), (250, 40), (200, 200, 200), -1)
            perspective_fps_str = "Perspective taking fps: {:0.1f}hz".format(
                perspective_fps)
            cv2.putText(viz_frame,
                        "Nb detections: {}".format(len(visible_detections)),
                        (5, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
            cv2.putText(viz_frame, perspective_fps_str, (5, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
            for detection in visible_detections:
                detection.draw(viz_frame, (0, 200, 0))

            return rgb_image, real_depth_image, visible_detections, viz_frame
        else:
            return rgb_image, real_depth_image, visible_detections
Exemplo n.º 52
0
def main():

    alpha0, alpha1, alpha2, alpha3, alpha4, alpha5, alpha6 = symbols(
        'alpha0:7')
    a0, a1, a2, a3, a4, a5, a6 = symbols('a0:7')  # distance along x-1
    d1, d2, d3, d4, d5, d6, d7 = symbols(
        'd1:8')  # offset (along z axis or y-axis or both?)
    q1, q2, q3, q4, q5, q6, q7 = symbols('q1:8')  # rotation along new z axis

    #
    #
    # Create Modified DH parameters
    #
    s = {
        alpha0: 0,
        a0: 0,
        d1: 0.75,
        alpha1: -pi / 2,
        a1: 0.35,
        d2: 0,
        q2: q2 - pi / 2,
        alpha2: 0,
        a2: 1.25,
        d3: 0,
        alpha3: -pi / 2,
        a3: -0.054,
        d4: 1.50,
        alpha4: pi / 2,
        a4: 0,
        d5: 0,
        alpha5: -pi / 2,
        a5: 0,
        d6: 0,
        alpha6: 0,
        a6: 0,
        d7: .303,
        q7: 0,
    }

    #
    # Define Modified DH Transformation matrix
    #
    #

    if 0:
        T0_1 = Matrix([[cos(q1), -sin(q1), 0, a0],
                       [
                           sin(q1) * cos(alpha0),
                           cos(q1) * cos(alpha0), -sin(alpha0),
                           -sin(alpha0) * d1
                       ],
                       [
                           sin(q1) * sin(alpha0),
                           cos(q1) * sin(alpha0),
                           cos(alpha0),
                           cos(alpha0) * d1
                       ], [0, 0, 0, 1]])

    T0_1 = get_mat(alpha0, a0, d1, q1)
    T0_1 = T0_1.subs(s)

    T1_2 = get_mat(alpha1, a1, d2, q2)
    T1_2 = T1_2.subs(s)

    T2_3 = get_mat(alpha2, a2, d3, q3)
    T2_3 = T2_3.subs(s)

    T3_4 = get_mat(alpha3, a3, d4, q4)
    T3_4 = T3_4.subs(s)

    T4_5 = get_mat(alpha4, a4, d5, q5)
    T4_5 = T4_5.subs(s)

    T5_6 = get_mat(alpha5, a5, d6, q6)
    T5_6 = T5_6.subs(s)

    T6_G = get_mat(alpha6, a6, d7, q7)
    T6_G = T6_G.subs(s)

    # composition of homogenous transforms
    do_simple = False
    fn_apply = simplify if do_simple == True else lambda (x): x

    T0_2 = fn_apply(T0_1 * T1_2)
    T0_3 = fn_apply(T0_2 * T2_3)
    T0_4 = fn_apply(T0_3 * T3_4)
    T0_5 = fn_apply(T0_4 * T4_5)
    T0_6 = fn_apply(T0_5 * T5_6)
    T0_G = fn_apply(T0_6 * T6_G)

    ang1 = pi
    ang2 = -pi / 2
    R_G_corrected_3x3 = simplify(rot_z(ang1) * rot_y(ang2))
    R_G_corrected = R_G_corrected_3x3.row_join(Matrix([[0], [0], [0]]))
    R_G_corrected = R_G_corrected.col_join(Matrix([[0, 0, 0, 1]]))

    # Numerically evaluate transforms
    print("T0_1 = ", T0_1.evalf(subs={
        q1: 0,
        q2: 0,
        q3: 0,
        q4: 0,
        q5: 0,
        q6: 0
    }))
    print("T0_2 = ", T0_2.evalf(subs={
        q1: 0,
        q2: 0,
        q3: 0,
        q4: 0,
        q5: 0,
        q6: 0
    }))
    print("T0_3 = ", T0_3.evalf(subs={
        q1: 0,
        q2: 0,
        q3: 0,
        q4: 0,
        q5: 0,
        q6: 0
    }))
    print("T0_4 = ", T0_4.evalf(subs={
        q1: 0,
        q2: 0,
        q3: 0,
        q4: 0,
        q5: 0,
        q6: 0
    }))
    print("T0_5 = ", T0_5.evalf(subs={
        q1: 0,
        q2: 0,
        q3: 0,
        q4: 0,
        q5: 0,
        q6: 0
    }))
    print("T0_6 = ", T0_6.evalf(subs={
        q1: 0,
        q2: 0,
        q3: 0,
        q4: 0,
        q5: 0,
        q6: 0
    }))
    print("T0_G = ", T0_G.evalf(subs={
        q1: 0,
        q2: 0,
        q3: 0,
        q4: 0,
        q5: 0,
        q6: 0
    }))

    T_total = T0_G * R_G_corrected
    T_total_val = T_total.evalf(subs={
        q1: 0,
        q2: 0,
        q3: 0,
        q4: 0,
        q5: 0,
        q6: 0
    })
    print("T0_total = ", T_total_val)

    gt_gripper_position = [2.51286, 0, 1.94653]
    gt_gripper_orientation_quaternion = [0, -0.00014835, 0, 1]
    forward_transforms = {
        'T0_1': T0_1,
        'T1_2': T1_2,
        'T2_3': T2_3,
        'q_symbols': (q1, q2, q3)
    }

    # ground truth position for the gripper using rviz
    gt_rot_mat = transformations.quaternion_matrix(
        gt_gripper_orientation_quaternion)
    print("ground truth poisiton: {} rot_mat:{} = ".format(
        gt_gripper_position, gt_rot_mat))

    thetas = inverse_kinematics(gt_gripper_position,
                                gt_gripper_orientation_quaternion,
                                forward_transforms)

    #print ("wrist centers: wx:{} wy:{} wz:{}".format(wx,wy,wz))268

    return T_total, T0_G
Exemplo n.º 53
0
 def test_rotation3_quaternion(self, q):
     np.testing.assert_array_almost_equal(
         w.compile_and_execute(w.rotation_matrix_from_quaternion, q),
         quaternion_matrix(q))
Exemplo n.º 54
0
if __name__ == '__main__':
    camera = KinovaCamera()


    hard_code = [0.51597311, 0.53908436, 0.45265119, 0.48812571]
    random_qs = [0.594, 0.219, 0.347, 0.692]
    qs2 = [-0.228, 0.605, -0.679, 0.348]
    no_transfrom = [-0.64293832 , 0.30220321, -0.34661193, 0.61250609]
    y_trans = [-0.32846894,  0.6292586,   0.64589147, -0.28100886]
    temp = [-0.34455515,  0.61269349,  0.6253454,  -0.33886807]
    # subScribe()
    rospy.init_node('kinova_cameras', anonymous = True)
    kinova = Kinova()

    rotm = np.dot(quaternion_matrix(qs2), inverse_matrix(quaternion_matrix(random_qs)))
    print rotm
    new_qs = np.dot(rotm, quaternion_matrix(kinova.getOri()))
    # kinova.cartesian_pose_client(kinova.getPos(), camera.getOri())

    print quaternion_matrix(hard_code)
    while 1:
        depth = camera.getDepthImg()
        img = camera.getImg()
        cv2.imshow('Body Recognition', depth[164:300, 323:443])
        cv2.imshow('Body Recognition2', img[323:443, 164:300, :])
        print depth[164:330, 323:443]
        cv2.waitKey(0)
        print 
        break
    sys.exit(1)
Exemplo n.º 55
0
def tf_from_odom(odom):
    P = odom.pose.position
    O = odom.pose.orientation
    H = quaternion_matrix((O.x, O.y, O.z, O.w))
    H[:3, 3] = P.x, P.y, P.z
    return H
Exemplo n.º 56
0
	def __init__(self,*args,**kwargs):
		"""Construct tb_angles from angles or from another rotation format.
		
		The class can be created with angles (in degrees by default) or with a
		quaternion, matrix, or ROS message.
		
		Examples of creating with angles:
		tb_angles(90,45,-90)
		tb_angles(pi/2,pi/4,-pi/2,rad=True)
		tb_angles({'y': pi/2, 'p': pi/4},rad=True)
		tb_angles(yaw=90,pitch=45)                   #Note roll will be 0
		tb_angles(y=90,p=45)                         #Note roll will be 0
		
		tb_angles can be created with the following rotation formats:
		Quaternion as numpy array, list or tuple.
		Rotation matrix as 3x3 or 4x4 (translation ignored) numpy array.
		Axis-angle, with axis as numpy array, list or tuple, and angle in radians.
		    The order does not matter.
		geometry_msgs/Quaternion or any geometry_msgs message with a 
		    Quaternion field.
		"""
		self._matrix = None
		self._quaternion = None
		
		
		deg = _get_deg(kwargs)
		if deg:
			conv = 1.
		else:
			conv = 180. / pi
		
		def set_direct(yaw,pitch,roll):
			self._yaw = yaw * conv
			self._pitch = pitch * conv
			self._roll = roll * conv
		
		if not args:
			_copy_ypr(kwargs)
			set_direct(kwargs.get('yaw',0.),kwargs.get('pitch',0.),kwargs.get('roll',0.))
			return
		elif len(args) == 3:
			set_direct(args[0],args[1],args[2])
			return
		elif len(args) == 0 and isinstance(args[0],tb_angles):
			self._yaw = args[0]._yaw
			self._pitch = args[0]._pitch
			self._roll = args[0]._roll
			return
		
		R = None
		if len(args) == 1 and isinstance(args[0],tb_angles):
			R = args[0].matrix
		if len(args) == 2 or (len(args) == 1 and isinstance(args[0],collections.Sequence) and len(args[0]) == 2):
			if len(args) == 2:
				val1 = args[0]
				val2 = args[1]
			else:
				val1 = args[0][0]
				val2 = args[0][1]
			if isinstance(val1,numbers.Number) and isinstance(val2,collections.Sequence) and len(val2) == 3:
				axis,angle = (np.array(val2),val1)
			elif isinstance(val2,numbers.Number) and isinstance(val1,collections.Sequence) and len(val1) == 3:
				axis,angle = (np.array(val1),val2)
			elif isinstance(val1,numbers.Number) and isinstance(val2,np.ndarray) and val2.size == 3:
				axis,angle = (val2.reshape((3,)),val1)
			elif isinstance(val2,numbers.Number) and isinstance(val1,np.ndarray) and val1.size == 3:
				axis,angle = (val1.reshape((3,)),val2)
			else:
				raise ValueError("Unknown data for tb_angles: (%s, %s)" % args)
			R = tft.quaternion_matrix(tft.quaternion_about_axis(angle, axis))[0:3,0:3]
		elif len(args) > 1:
			raise ValueError("Unknown data for tb_angles: %s" % str(args))
		else:
			data = args[0]
			
			if isinstance(data,dict):
				_copy_ypr(data)
				set_direct(data.get('yaw',0.),data.get('pitch',0.),data.get('roll',0.))
				return
			
			if _ROS and _ismsginstance(data,geometry_msgs.msg.QuaternionStamped):
				data = data.quaternion
			elif _ROS and _ismsginstance(data,geometry_msgs.msg.Pose):
				data = data.orientation
			elif _ROS and _ismsginstance(data,geometry_msgs.msg.PoseStamped):
				data = data.pose.orientation
			elif _ROS and _ismsginstance(data,geometry_msgs.msg.Transform):
				data = data.rotation
			elif _ROS and _ismsginstance(data,geometry_msgs.msg.TransformStamped):
				data = data.transform.rotation
			
			if _ROS and _ismsginstance(data,geometry_msgs.msg.Quaternion):
				R = tft.quaternion_matrix([data.x,data.y,data.z,data.w])[0:3,0:3]
			elif isinstance(data,np.ndarray) and data.size == 4:
				R = tft.quaternion_matrix(data)[0:3,0:3]
			elif isinstance(data,np.ndarray) and (data.shape == (3,3) or data.shape == (4,4)):
				R = np.mat(np.empty((3,3)))
				R[:,:] = data[0:3,0:3]
			elif isinstance(data,collections.Sequence):
				if (len(data) == 3 and all([isinstance(d, collections.Sequence) and len(d) == 3 for d in data])) \
						or (len(data) == 4 and all([isinstance(d, collections.Sequence) and len(d) == 4 for d in data])):
					R = np.matrix(data)[0:3,0:3]
				elif len(data) == 4 and all([isinstance(d, numbers.Number) for d in data]):
					R = tft.quaternion_matrix(data)[0:3,0:3]
				
			if R is None:
				raise ValueError("Unknown data for tb_angles: %s" % data)
			
		yaw = 0;
		pitch = 0;
		roll = 0;
		
		skip = False
		if fabs(R[0,1]-R[1,0]) < _FLOAT_CLOSE_ENOUGH and fabs(R[0,2]-R[2,0]) < _FLOAT_CLOSE_ENOUGH and fabs(R[1,2]-R[2,1]) < _FLOAT_CLOSE_ENOUGH:
			#matrix is symmetric
			if fabs(R[0,1]+R[1,0]) < _FLOAT_CLOSE_ENOUGH and fabs(R[0,2]+R[2,0]) < _FLOAT_CLOSE_ENOUGH and fabs(R[1,2]+R[2,1]) < _FLOAT_CLOSE_ENOUGH:
				#diagonal
				if R[0,0] > -_FLOAT_CLOSE_ENOUGH:
					if R[1,1] > -_FLOAT_CLOSE_ENOUGH:
						skip = True
					else:
						roll = pi;
				elif R[1,1] > -_FLOAT_CLOSE_ENOUGH:
					pitch = pi;
				else:
					yaw = pi;
				skip=True
		
		if not skip:
			vx = R[0:3,0:3] * np.matrix([1,0,0]).transpose();
			vy = R[0:3,0:3] * np.matrix([0,1,0]).transpose();
	
			yaw = atan2(vx[1,0],vx[0,0]);
			pitch = atan2(-vx[2,0], sqrt(vx[0,0]*vx[0,0] + vx[1,0]*vx[1,0]));
	
			Ryaw = np.matrix(
						 [[cos(yaw), -sin(yaw), 0],
						 [sin(yaw),  cos(yaw), 0],
						 [0,		 0,		1]]);
			Rpitch = np.matrix(
					 [[cos(pitch), 0, sin(pitch)],
					 [0,		  1, 0],
					[-sin(pitch), 0, cos(pitch)]]);
			vyp = Ryaw * Rpitch * np.matrix([0,1,0]).transpose();
			vzp = Ryaw * Rpitch * np.matrix([0,0,1]).transpose();
	
			if vzp.transpose() * vy >= 0:
				coeff = 1
			else:
				coeff = -1
	
			val = vyp.transpose() * vy
			if val > 1:
				val = 1
			elif val < -1:
				val = -1
			roll = coeff * acos(val);
		
		self._yaw = yaw * 180. / pi;
		self._pitch = pitch * 180. / pi;
		self._roll = roll * 180. / pi;
Exemplo n.º 57
0
def quaternion2matrix(q):
    R = quaternion_matrix(q)
    return R[:3, :3]
Exemplo n.º 58
0
    def callback(self, target_oe, chaser_oe):
        # calculate baseline
        osc_O_t = rospace_lib.KepOrbElem()
        osc_O_c = rospace_lib.KepOrbElem()

        osc_O_t.from_message(target_oe.position)
        osc_O_c.from_message(chaser_oe.position)

        # convert to TEME
        S_t = rospace_lib.CartesianTEME()
        S_c = rospace_lib.CartesianTEME()

        S_t.from_keporb(osc_O_t)
        S_c.from_keporb(osc_O_c)
        # vector from chaser to target in chaser body frame in [m]

        # get current rotation of body
        R_J2K_CB = transformations.quaternion_matrix([
            chaser_oe.orientation.x, chaser_oe.orientation.y,
            chaser_oe.orientation.z, chaser_oe.orientation.w
        ])

        r_J2K = (S_t.R - S_c.R) * 1000
        r_CB = np.dot(R_J2K_CB[0:3, 0:3].T, r_J2K)

        # publish observation as marker for visualization
        msg = Marker()
        msg.header.frame_id = "chaser_1"
        msg.type = Marker.ARROW
        msg.action = Marker.ADD
        msg.points.append(Point(0, 0, 0))
        msg.points.append(Point(r_CB[0], r_CB[1], r_CB[2]))
        msg.scale.x = 100
        msg.scale.y = 200
        msg.color.a = 1.0
        msg.color.r = 0.9
        msg.color.g = 0.5
        msg.color.b = 0.1
        self.pub_m.publish(msg)

        # check if visible and augment sensor value
        visible, value = sensor_obj.get_measurement(r_CB)
        msg = AzimutElevationRangeStamped()
        msg.header.stamp = target_oe.header.stamp

        # throttle publishing to maximum rate
        if not (target_oe.header.stamp -
                self.last_publish_time).to_sec() > 1.0 / self.rate:
            return

        # if target is visible, publish message
        if visible:

            # these measurements already have noise added
            msg.value.azimut = value[0]
            msg.value.elevation = value[1]
            msg.valid = True

            # if range measurement is activated
            if len(sensor_obj.mu) == 3:
                msg.value.range = np.linalg.norm(r_CB) + np.asscalar(
                    np.random.normal(sensor_obj.mu[2], sensor_obj.sigma[2], 1))

        else:
            msg.valid = False

        self.pub.publish(msg)
        self.last_publish_time = target_oe.header.stamp
Exemplo n.º 59
0
                odom_tf = tf_buffer.lookup_transform("cf1/odom", "map",
                                                     rospy.Time.now(),
                                                     rospy.Duration(0.5))
            except:
                pass  # Keep old transform
            odom_trans = (odom_tf.transform.translation.x,
                          odom_tf.transform.translation.y,
                          odom_tf.transform.translation.z)

            odom_quat = (odom_tf.transform.rotation.x,
                         odom_tf.transform.rotation.y,
                         odom_tf.transform.rotation.z,
                         odom_tf.transform.rotation.w)

            tmat_odom = translation_matrix(odom_trans)
            qmat_odom = quaternion_matrix(odom_quat)
            odom_matrix = np.dot(tmat_odom, qmat_odom)

            height = goal.z  # Store/maintain given height
            # Goal in map coords
            tmat_goal = translation_matrix((goal.x, goal.y, goal.z))
            qmat_goal = quaternion_matrix(
                quaternion_from_euler(0, 0, math.radians(goal.yaw)))
            goal_matrix = np.dot(tmat_goal, qmat_goal)

            # Goal in odom coords
            final_mat = np.dot(odom_matrix, goal_matrix)
            trans = translation_from_matrix(final_mat)
            goal.x = trans[0]
            goal.y = trans[1]
            goal.z = height
Exemplo n.º 60
0
    def lecture_des_normales(self, _bag_mires):
        self.rotation_mires = [
        ]  # Rotation observée entre la mire horizontale et la verticale
        self.translation_mires = [
        ]  # Translation observée entre la mire horizontale et la verticale
        self.mires = [
        ]  # Transformation observée entre la mire horizontale et la verticale

        #Lecture de la normale depuis le fichier texte
        #		with open("/home/denis/catkin_ws/src/autocalibration/src/data.txt", "r") as f:
        #    			for line in f.readlines():
        #        			if 'Nw' in line:
        #					line= line.strip('\n')
        #					y,z = line.split(':')
        #					z = z.split(',')
        #    			z1 = [float(i) for i in z]
        #			Nw_lect = np.array(z1)

        #Mise en mémoire des détection des transformations cible caméra
        for (topic, msg,
             t) in _bag_mires.read_messages(topics='tag_detections'):
            self.mires.append([
                msg.detections[0].pose.pose.pose,
                msg.detections[1].pose.pose.pose
            ])

        a = 0
        #Transformation des données de rotation de quaternions en matrices et calcul des matrices de rotation sol-capteur et capteur-mur
        R_17_c = np.array(
            tf.quaternion_matrix([
                self.mires[a][0].orientation.x, self.mires[a][0].orientation.y,
                self.mires[a][0].orientation.z, self.mires[a][0].orientation.w
            ]))
        R_c_21 = np.transpose(
            tf.quaternion_matrix([
                self.mires[a][1].orientation.x, self.mires[a][1].orientation.y,
                self.mires[a][1].orientation.z, self.mires[a][1].orientation.w
            ]))

        #Réduction des matrices de rotation à 3 dimensions
        R_17_c = R_17_c[:3, :3]
        R_c_21 = R_c_21[:3, :3]

        #Calcul des translations sol-capteur et capteur-mur
        t_17_c = np.array([
            self.mires[0][0].position.x, self.mires[0][0].position.y,
            self.mires[0][0].position.z
        ])
        t_21_c = np.array([
            self.mires[0][1].position.x, self.mires[0][1].position.y,
            self.mires[0][1].position.z
        ])
        t_c_21 = -np.dot(R_c_21, t_21_c)

        #Calcul de rotation totale entre le sol et le mur
        self.rotation_mires = np.dot(R_c_21, R_17_c)

        #Addition des deux translations
        self.translation_mires = np.add(t_17_c, -t_21_c)
        #		print t_17_c
        #		print t_c_21
        print self.translation_mires

        return self.translation_mires, self.rotation_mires