def Rot_k_phi(self, k_vec, phi): #Eigen::Vector3d k_vec,float phi
	    #Eigen::Matrix3d R_k_phi;
	    kx = k_vec[0]
	    ky = k_vec[1]
	    kz = k_vec[2]
	    
	    #K_nrow = Vector(0.0, -kz, ky)
	    #K_trow = Vector(kz, 0.0, -kx)
	    #K_brow = Vector(-ky, kx, 0.0)

	    N = 3
	    K = Rotation(0.0,-kz,ky, kz,0.0,-kx, -ky,kx,0.0)
	    print "K",K
	    Ksquare = K*K
	    print "K^2",Ksquare
	    I = Rotation()
	    print "I",I
	    K_Frame = Frame(K)
	    print "K_Frame",K_Frame
	    K_Frame_numpy = posemath.toMatrix(K_Frame)
	    print "K_Frame_numpy",K_Frame_numpy
	    K_numpy = K_Frame_numpy[0:3,0:3]
	    print "K_numpy",K_numpy
	    I_numpy = np.identity(N)
	    print "I_numpy",I_numpy
	    K_square_Frame = Frame(Ksquare)
	    print "K_square_Frame",K_square_Frame
	    K_square_Frame_numpy = posemath.toMatrix(K_square_Frame)
	    print "K_square_Frame",K_square_Frame
	    K_square_numpy = K_square_Frame_numpy[0:3,0:3]
	    I_numpy = np.identity(N)
	    print "math.sin(phi)",(math.sin(phi))
	    print "1-math.cos(phi)",(1-math.cos(phi))
	    print "K_numpy*K_numpy",K_numpy*K_numpy
	    R_k_phi_numpy = I_numpy + math.sin(phi)*K_numpy + (1-math.cos(phi))*K_numpy*K_numpy
	    print "R_k_phi_numpy",R_k_phi_numpy
	    R_k_phi_numpy_FrameTemp = np.c_[R_k_phi_numpy, np.ones(N)]
	    print "R_k_phi_numpy_FrameTemp",R_k_phi_numpy_FrameTemp
	    R_k_phi_numpy_Frame = np.r_[R_k_phi_numpy_FrameTemp,[R_k_phi_numpy_FrameTemp[1]]]
	    print "R_k_phi_numpy_Frame",R_k_phi_numpy_Frame
	    R_k_phi_Frame = posemath.fromMatrix(R_k_phi_numpy_Frame)
	    print "R_k_phi_Frame",R_k_phi_Frame
	    R_k_phi = R_k_phi_Frame.M
	    print  "R_k_phi",R_k_phi



	    R_k_phi_numpy_square = I_numpy + math.sin(phi)*K_numpy + (1-math.cos(phi))*K_square_numpy
	    print "R_k_phi_numpy_square",R_k_phi_numpy_square
	    R_k_phi_numpy_FrameTemp_square = np.c_[R_k_phi_numpy_square, np.ones(N)]
	    print "R_k_phi_numpy_FrameTemp_square",R_k_phi_numpy_FrameTemp_square
	    R_k_phi_numpy_Frame_square = np.r_[R_k_phi_numpy_FrameTemp_square,[R_k_phi_numpy_FrameTemp_square[1]]]
	    print "R_k_phi_numpy_Frame_square",R_k_phi_numpy_Frame_square
	    R_k_phi_Frame_square = posemath.fromMatrix(R_k_phi_numpy_Frame_square)
	    print "R_k_phi_Frame",R_k_phi_Frame_square
	    R_k_phi_square = R_k_phi_Frame_square.M
	    print  "R_k_phi",R_k_phi_square


	    return R_k_phi
 def get_des_twist(self, dT):
     """ Get desired twist @ time t, by computing differential of traj_frame @ t=T-1 and t=T
     """
     if self.cur_wp_idx > 1: 
         return PyKDL.diff(pm.fromMatrix(self.traj_list[self.cur_wp_idx - 1]), pm.fromMatrix(self.traj_list[self.cur_wp_idx]), dT)
     else: # cur_wp_idx == num_wp
         return PyKDL.diff(pm.fromMatrix(self.traj_list[self.cur_wp_idx]), pm.fromMatrix(self.traj_list[self.cur_wp_idx + 1]), dT)
예제 #3
0
def publish_grasp_tran(tran):
    """@brief - Add a grasp relative to the world origin to the scene graph
    @param tran - the grasp transform., 
    """
    bc = tf.TransformBroadcaster()
    ttf = pm.toTf(pm.fromMatrix(tran))
    bc.sendTransform(ttf[0], ttf[1], rospy.Time.now(),  "/hand_goal_pose", "/world")  
예제 #4
0
    def update_position(self):

        if self.q0 is None or self.old_q0 is None:
            return

        self.ee_pose = pm.fromMatrix(self.kdl_kin.forward(self.q0))

        if self.goal is not None:

            #goal_diff = np.abs(self.goal - self.q0).sum() / self.q0.shape[0]
            cart_diff = 0  #(self.ee_pose.p - self.goal.p).Norm()
            rot_diff = 0  #self.goal_rotation_weight * (pm.Vector(*self.ee_pose.M.GetRPY()) - pm.Vector(*self.goal.M.GetRPY())).Norm()
            goal_diff = cart_diff + rot_diff

            if goal_diff < self.max_goal_diff:
                self.at_goal = True

            if goal_diff < 10 * self.max_goal_diff:
                self.near_goal = True

        q_diff = np.abs(self.old_q0 - self.q0).sum()

        if q_diff < self.max_q_diff:
            self.moving = False
        else:
            self.moving = True
예제 #5
0
def generateCameraPose(vector3_origin,
                       vector3_point,
                       camera_view_axis,
                       camera_z_axis=np.array([0, 0, 1]),
                       set_constraint_z=True):
    camera_view_vector = vector3_origin - vector3_point
    camera_view_vector /= np.linalg.norm(camera_view_vector)
    v = np.cross(camera_view_axis, camera_view_vector)

    v_x = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
    cosine = camera_view_axis.dot(camera_view_vector)
    if (cosine == -1): cosine = -0.99999
    rot_matrix = np.identity(3) + v_x + v_x.dot(v_x) * (1 / (1 + cosine))

    if set_constraint_z:
        new_x_vector = rot_matrix[:3, 0]
        new_y_vector = np.cross(camera_z_axis, new_x_vector)
        new_z_vector = np.cross(new_x_vector, new_y_vector)
        rot_matrix[:3, 1] = new_y_vector / np.linalg.norm(new_y_vector)
        rot_matrix[:3, 2] = new_z_vector / np.linalg.norm(new_z_vector)

    np_frame = np.eye(4)
    np_frame[:3, :3] = rot_matrix
    np_frame[:3, 3] = vector3_point
    kdl_frame = pm.fromMatrix(np_frame)

    return kdl_frame
예제 #6
0
    def evaluate(self, world):
        '''
        Reward is 0 at object.
        '''
        robot_actor = world.actors[0]
        T_ee = pm.fromMatrix(robot_actor.robot.fwd(robot_actor.state.arm))
        T_obj = world.getObject(self.goal).state.T
        dist = (T_obj.p - T_ee.p).Norm()

        #print "prev distance " + str(self.prev_distance) + " new distance " + str(dist)

        #print dist
        reward = 1 / dist
        #print reward

        #if (dist < self.prev_distance):
        #    reward = +1
        #else:
        #    reward = -1

        #if dist < .05:
        #    print "reached target"
        #    reward = 100

        #print "reward " + str(reward)

        #raw_input("Press Enter to continue...")

        self.prev_distance = dist

        return reward, 0
예제 #7
0
    def step(self):
        if self.T_ is None:
            self.init_tf()
            return

        if self.recv_:
            self.recv_ = False
            pose = self.odom_.pose.pose
            T0 = pm.toMatrix(pm.fromMsg(pose))  # android_odom --> android

            # send odom -> base_link transform
            T = tf.transformations.concatenate_matrices(self.T_, T0, self.Ti_)
            frame = pm.fromMatrix(T)
            if self.pub_tf_:
                txn, qxn = pm.toTf(frame)
                self.tfb_.sendTransform(txn, qxn, self.odom_.header.stamp,
                                        'odom', 'base_link')

            # send as msg
            # TODO : does not deal with twist/covariance information
            msg = pm.toMsg(frame)
            self.cvt_msg_.pose.pose = pm.toMsg(frame)
            self.cvt_msg_.header.frame_id = 'map'  # experimental
            self.cvt_msg_.header.stamp = self.odom_.header.stamp
            self.pub_.publish(self.cvt_msg_)
예제 #8
0
    def process_grasp_msg(self, grasp_msg):
        """@brief - Attempt to make the adjustment specified by grasp_msg
        1. plan a path to the a place 15cm back from the new pose
        """
        print 'regular move:'
        print grasp_msg
        #release the object
        tp.open_barrett()

        #get the robot's current location and calculate the absolute tran after the adjustment
        #update the robot status
        tp.update_robot(self.global_data.or_env.GetRobots()[0])
        handGoalInBase = pm.toMatrix(pm.fromMsg(grasp_msg.final_grasp_pose))

        try:
            hand_tf = pm.toTf(pm.fromMatrix(handGoalInBase))
            bc = tf.TransformBroadcaster()
            now = rospy.Time.now()
            bc.sendTransform(hand_tf[0], hand_tf[1], now, "hand_goal",
                             "armbase")
            self.global_data.listener.waitForTransform("armbase",
                                                       "arm_goal", now,
                                                       rospy.Duration(1.0))
            armtip_robot = self.global_data.listener.lookupTransform(
                'armbase', 'arm_goal', now)
            armGoalInBase = pm.toMatrix(pm.fromTf(armtip_robot))
        except Exception, e:
            handle_fatal('convert_cartesian_world_goal failed: error %s.' % e)
예제 #9
0
def processMarkerFeedback(feedback):
    global marker_tf, marker_name
    marker_name = feedback.marker_name
    marker_offset_tf = ((0, 0, marker_offset), tf.transformations.quaternion_from_euler(0, 0, 0))
    marker_tf = pm.toTf(pm.fromMatrix(numpy.dot(pm.toMatrix(pm.fromMsg(feedback.pose)), pm.toMatrix(pm.fromTf(marker_offset_tf)))))
    if feedback.menu_entry_id:
        marker_tf = zero_tf
예제 #10
0
    def _makePose(self, pose_msg):
        '''
        Read in a pose message and convert it to a standard data format. We
        use KDL Frames for fast operations down the line.

        If we previously set the `from_unity` flag to true, then we need to
        adjust our frames so that they make sense as well.

        Parameters:
        -----------
        pose_msg: a ROS pose message

        Returns:
        --------
        a KDL pose in right hand, z-up notation
        '''
        pose = pm.fromMsg(pose_msg)
        if self.from_unity:
            H = np.array([[0, 0, 1, 0], [-1, 0, 0, 0], [0, 1, 0, 0],
                          [0, 0, 0, 1]])
            x = pm.toMatrix(pose)
            x = H.dot(x)
            pose = pm.fromMatrix(x)

        return pose
예제 #11
0
    def update_position(self):

        if self.q0 is None or self.old_q0 is None:
            return

        self.ee_pose = pm.fromMatrix(self.kdl_kin.forward(self.q0))

        if self.goal is not None:

            cart_diff = (self.ee_pose.p - self.goal.p).Norm()
            rot_diff = self.goal_rotation_weight * \
              (pm.Vector(*self.ee_pose.M.GetRPY()) - pm.Vector(*self.goal.M.GetRPY())).Norm()
            goal_diff = cart_diff + rot_diff

            if goal_diff < self.max_goal_diff:
                self.at_goal = True
            else:
                self.at_goal = False

            if goal_diff < 10*self.max_goal_diff:
                self.near_goal = True

        q_diff = np.abs(self.old_q0 - self.q0).sum()

        if q_diff < self.max_q_diff:
            self.moving = False
        else:
            self.moving = True
예제 #12
0
 def fromCameraParams(self, cv, rvec, tvec):
             m = numpy.array([ [ 0, 0, 0, tvec[0,0] ],
                               [ 0, 0, 0, tvec[1,0] ], 
                               [ 0, 0, 0, tvec[2,0] ], 
                               [ 0, 0, 0, 1.0       ] ], dtype = numpy.float32)
             cv.Rodrigues2(rvec, cv.fromarray(m[:3,:3]))
             return pm.fromMatrix(m)
예제 #13
0
    def getCartesianMove(self,
                         frame,
                         q0,
                         base_steps=1000,
                         steps_per_meter=1000,
                         time_multiplier=10):

        # interpolate between start and goal
        pose = pm.fromMatrix(self.kdl_kin.forward(q0))

        cur_rpy = np.array(pose.M.GetRPY())
        cur_xyz = np.array(pose.p)

        goal_rpy = np.array(frame.M.GetRPY())
        goal_xyz = np.array(frame.p)

        steps = base_steps + int((pose.p - frame.p).Norm() * steps_per_meter)
        print " -- Computing %f steps" % steps

        ts = (pose.p - frame.p).Norm() / steps * time_multiplier
        traj = JointTrajectory()
        traj.points.append(
            JointTrajectoryPoint(positions=q0,
                                 velocities=[0] * len(q0),
                                 accelerations=[0] * len(q0)))

        # compute IK
        for i in range(1, steps + 1):
            xyz = cur_xyz + ((float(i) / steps) * (goal_xyz - cur_xyz))
            rpy = cur_rpy + ((float(i) / steps) * (goal_rpy - cur_rpy))

            frame = pm.toMatrix(
                kdl.Frame(kdl.Rotation.RPY(rpy[0], rpy[1], rpy[2]),
                          kdl.Vector(xyz[0], xyz[1], xyz[2])))
            #q = self.kdl_kin.inverse(frame,q0)
            q = self.ik(frame, q0)

            if self.verbose:
                print "%d -- %s %s = %s" % (i, str(xyz), str(rpy), str(q))

            if not q is None:
                pt = JointTrajectoryPoint(positions=q,
                                          velocities=[0] * len(q),
                                          accelerations=[0] * len(q))
                pt.time_from_start = rospy.Duration(i * ts)
                traj.points.append(pt)
                q0 = q
            elif i == steps:
                return JointTrajectory()

        if len(traj.points) < base_steps:
            print rospy.logerr("Planning failure with " \
                    + str(len(traj.points)) \
                    + " / " + str(base_steps) \
                    + " points.")
            return JointTrajectory()

        traj.joint_names = self.joint_names
        return traj
예제 #14
0
 def forward_kinematics_cb(self, req):
     ''' Run forward kinematics on a joint space pose and return the result.
     '''
     q = req.joint_state.position
     kdl_pose = pm.fromMatrix(self.kdl_kin.forward(q))
     pose_msg = pm.toMsg(kdl_pose)
     response = ForwardKinematicsResponse(pose=pose_msg, ack="SUCCESS")
     return response
예제 #15
0
def np2Transform(nparray):
    npmat = nparray.reshape((3, 4))
    R = npmat[0:3, 0:3]
    assert (isrot(R, 10))
    # T = npmat[0:3,3]
    posetmp = posemath.fromMatrix(npmat)
    transformtmp = posemath.toMsg(posetmp)
    return transformtmp
def publish_grasp_tran(tran):
    """@brief - Add a grasp relative to the world origin to the scene graph
    @param tran - the grasp transform., 
    """
    bc = tf.TransformBroadcaster()
    ttf = pm.toTf(pm.fromMatrix(tran))
    bc.sendTransform(ttf[0], ttf[1], rospy.Time.now(), "/hand_goal_pose",
                     "/world")
예제 #17
0
def gstamPose2Transform(gstamPose):
    npmat = np.zeros((3, 4))
    npmat[0:3, 0:3] = gstamPose.rotation().matrix()
    npmat[0, 3] = gstamPose.translation().x()
    npmat[1, 3] = gstamPose.translation().y()
    npmat[2, 3] = gstamPose.translation().z()
    posetmp = posemath.fromMatrix(npmat)
    transformtmp = posemath.toMsg(posetmp)
    return transformtmp
예제 #18
0
def compute_twist(T0, T1):
    """
  Compute the twist between two homogeneous transformations: `twist = T1 - T0`

  Parameters
  ----------
  T0: array_like
    Initial homogeneous transformation
  T1: array_like
    Final homogeneous transformation
  """
    F0 = posemath.fromMatrix(T0)
    F1 = posemath.fromMatrix(T1)
    kdl_twist = PyKDL.diff(F0, F1)
    twist = np.zeros(6)
    twist[:3] = [kdl_twist.vel.x(), kdl_twist.vel.y(), kdl_twist.vel.z()]
    twist[3:] = [kdl_twist.rot.x(), kdl_twist.rot.y(), kdl_twist.rot.z()]
    return twist
예제 #19
0
    def GetForward(self,q):

        #q = [0,0,0,0,0,0,0]
        mat = self.kinematics.forward(q)
        f = pm.fromMatrix(mat)

        if not self.manip_frame is None:
            f = f * self.manip_frame #* self.manip_frame.Inverse()

        return f
 def get_frame(self, matrix):
     """ Convert the generic homogen. transformation matrix to geometrically same PyKDL frame.
         TODO: how to convert 
     """
     # _r, _t = r.TransToRp(matrix)
     # _rot = PyKDL.Rotation(_r[0][0], _r[0][1], _r[0][2],
     #                            _r[1][0], _r[1][1], _r[1][2],
     #                            _r[2][0], _r[2][1], _r[2][2])
     # _tran = PyKDL.Vector(_t[0], _t[1], _t[2])
     return pm.fromMatrix(np.array(matrix))
예제 #21
0
    def call_planner(self, stage):
        self.status.planning_status = stage
        print(stage)

        if stage == ObjectStatus.PREGRASP:

            target_pose = self.status.pose_stamped.pose

            pregrasp_distance = .1

            #The marker pose is defined in the torso coordinate system
            target_tran = pm.toMatrix(pm.fromMsg(target_pose))
            hand_tran = target_tran.copy()

            #Align hand perpendicular to cylinder in its canonical pose
            hand_tran[:3, 1] = target_tran[:3, 2]
            hand_tran[:3, 2] = target_tran[:3, 0]
            hand_tran[:3, 0] = target_tran[:3, 1]
            pregrasp_tran = hand_tran.copy()

            #The approach direction of this gripper is -y for some reason
            pregrasp_tran[:3,
                          3] = pregrasp_tran[:3,
                                             3] + pregrasp_tran[:3,
                                                                1] * pregrasp_distance
            hand_tran_pose = pm.toMsg(pm.fromMatrix(hand_tran))
            pregrasp_pose = pm.toMsg(pm.fromMatrix(pregrasp_tran))

            req = PosePlanningSrv._request_class()
            req.TargetPoses = [pregrasp_pose, hand_tran_pose]
            req.ManipulatorID = int(self.status.hands == self.status.RIGHT)
            res = None
            try:
                print(req)
                res = self.pregrasp_planner_client.call(req)
                self.status.operating_status = self.status.PLANNING
                #print res
                self.last_plan = res.PlannedTrajectory
            except:
                res = None
                rospy.loginfo("Planner failed to pregrasp")

                self.status.operating_status = self.status.ERROR
def draw_raw_map():
    global package_path
    global arucos_dict
    global BASE_ARUCO_ID
    global rviz_marker_pub
    global aruco_map

    while(rviz_marker_pub.get_num_connections() < 1):
        if rospy.is_shutdown():
            rospy.signal_shutdown('Master主节点没有开启')

        # load_aruco_map()
        rospy.logwarn("请创建RVIZ的Subscriber")
        rospy.sleep(5)
    
    aruco_map[BASE_ARUCO_ID] = get_base_tag_pose()
    # 绘制参考marker
    draw_single_marker(BASE_ARUCO_ID, get_base_tag_pose(), alpha=0.8, height=0.05)
    
    # 构建一个图
    graph = aruco_udg(max_dis=rospy.get_param('max_distance'))
    order = graph.BFS()

    for aruco_id in order[1:]:
        # 跳过第一个, 因为第一个是base aruco, 已经确定
        # 按照拓扑顺序依次遍历
        parent_id = graph.nodes_dict[aruco_id].parent_id
        # 获取所有记录到的从parent变换到当前码的变换
        pose_list = arucos_dict[parent_id][aruco_id]
        # 均值滤波获取Pose
        pose = weighted_mean_filter(pose_list)
        # 父亲码在世界坐标系下的位姿
        parent_pose = aruco_map[parent_id]

        world2parent_mat = pm.toMatrix(pm.fromMsg(parent_pose))
        parent2child_mat = pm.toMatrix(pm.fromMsg(pose))
        world2child_mat = world2parent_mat.dot(parent2child_mat)
        
        aruco_pose = pm.toMsg(pm.fromMatrix(world2child_mat))

        # 地图追加该aruco码
        aruco_map[aruco_id] = aruco_pose
        # 绘制当前的aruco码
        draw_single_marker(aruco_id, aruco_pose, alpha=0.8, height=0.05)
        

    # 序列化保存    
    with open(package_path + '/data/aruco_map.bin', 'wb') as f:
        pickle.dump(aruco_map, f)
    # 发布静态TF变换
    for aruco_id, pose in aruco_map.items():
        send_aruco_static_transform(aruco_id, pose)
    
    # 结束当前的进程
    rospy.signal_shutdown('绘制了所有的aruco,并发布了aruco码的TF变换')
  def getposecb(self, a, b):
    print "Setting Pose based on world model"

    if((rospy.Time.now().to_sec() - b.pose.header.stamp.to_sec()) > 4.0):
      print "time now = ", rospy.Time.now().to_sec(), "time from pose = ", b.pose.header.stamp.to_sec()
      print "Time stamp of detection to old: diff=", (rospy.Time.now().to_sec() - b.pose.header.stamp.to_sec())
      if(self.detection_counter <= 4):
        self.detection_counter += 1
        return "preempted"
      else:
        self.detection_counter = 0
        return "aborted"

    self.config_over_object.header.stamp = rospy.Time.now()
    self.config_over_object.pose.position.x = b.pose.pose.position.x
    self.config_over_object.pose.position.y = b.pose.pose.position.y
    #ao = PyKDL.Rotation.Quaternion(self.config_over_object.pose.orientation.x, self.config_over_object.pose.orientation.y, self.config_over_object.pose.orientation.z, self.config_over_object.pose.orientation.w)
    #ao = PyKDL.Rotation.Quaternion(self.config_over_object.pose.orientation.x, self.config_over_object.pose.orientation.y, self.config_over_object.pose.orientation.z, self.config_over_object.pose.orientation.w)
    #bo = PyKDL.Rotation.Quaternion(b.pose.pose.orientation.x, b.pose.pose.orientation.y, b.pose.pose.orientation.z, b.pose.pose.orientation.w)
    #c = ao #* bo#.Inverse()
    m_manip = pm.toMatrix( pm.fromMsg(self.config_over_object.pose) )
    m_obj = pm.toMatrix( pm.fromMsg(b.pose.pose) )
    import numpy
    from tf.transformations import *
    m_ori = pm.toMatrix( pm.fromTf( ((0,0,0), quaternion_from_euler(0, 0, math.radians(-20.0))) ))
    box_pose = pm.toMsg( pm.fromMatrix(numpy.dot(m_manip,m_obj)) )
    m_test = numpy.dot(m_manip,m_obj)
    newrot = pm.toMsg( pm.fromMatrix(m_test))
    print newrot
    print self.config_over_object.pose
    #print c.GetQuaternion()
    self.config_over_object.pose.orientation = box_pose.orientation
    #self.config_over_object.pose.orientation.y = c.GetQuaternion()[1]
    #self.config_over_object.pose.orientation.z = c.GetQuaternion()[2]
    #self.config_over_object.pose.orientation.w = c.GetQuaternion()[3]
    #self.config_over_object.pose.position.z = 0.30#0.430

    self.config_object.header.stamp = rospy.Time.now()
    self.config_object.pose.position.x = b.pose.pose.position.x
    self.config_object.pose.position.y = b.pose.pose.position.y
    #self.config_object.pose.position.z = 0.2 #0.095
    self.config_object.pose.orientation = box_pose.orientation
    def call_planner(self, stage):
        self.status.planning_status = stage
        print(stage)

        if stage == ObjectStatus.PREGRASP:
            
            target_pose = self.status.pose_stamped.pose
            
            pregrasp_distance = .1

            #The marker pose is defined in the torso coordinate system
            target_tran = pm.toMatrix(pm.fromMsg(target_pose))
            hand_tran = target_tran.copy()


            #Align hand perpendicular to cylinder in its canonical pose
            hand_tran[:3,1] = target_tran[:3,2]
            hand_tran[:3,2] = target_tran[:3,0]
            hand_tran[:3,0] = target_tran[:3,1]
            pregrasp_tran = hand_tran.copy()

            #The approach direction of this gripper is -y for some reason
            pregrasp_tran[:3,3] = pregrasp_tran[:3,3] + pregrasp_tran[:3,1]*pregrasp_distance
            hand_tran_pose = pm.toMsg(pm.fromMatrix(hand_tran))
            pregrasp_pose = pm.toMsg(pm.fromMatrix(pregrasp_tran))


            req = PosePlanningSrv._request_class()
            req.TargetPoses = [pregrasp_pose, hand_tran_pose]
            req.ManipulatorID = int(self.status.hands == self.status.RIGHT)
            res = None
            try:
                print(req)
                res = self.pregrasp_planner_client.call(req)
                self.status.operating_status = self.status.PLANNING
                #print res
                self.last_plan = res.PlannedTrajectory
            except:
                res = None
                rospy.loginfo("Planner failed to pregrasp")
                         
                self.status.operating_status = self.status.ERROR
예제 #25
0
 def test_compute_twist(self):
     for _ in range(100):
         T0 = br.transform.random()
         vel = np.random.rand(3)
         rot = np.random.rand(3)
         kdl_twist = PyKDL.Twist(PyKDL.Vector(*vel), PyKDL.Vector(*rot))
         F0 = posemath.fromMatrix(T0)
         F1 = PyKDL.addDelta(F0, kdl_twist)
         T1 = posemath.toMatrix(F1)
         twist = ru.transforms.compute_twist(F0, F1)
         np.testing.assert_allclose(twist, np.hstack((vel, rot)))
예제 #26
0
    def update(self, dt=0.0):

        # initial pose
        T0 = pm.toMatrix(pm.fromMsg(self._odom.pose.pose))
        dx = self._cmd_vel.linear
        dq = self._cmd_vel.angular

        dT = tx.compose_matrix(angles=(dq.x * dt, dq.y * dt, dq.z * dt),
                               translate=(dx.x * dt, dx.y * dt, dx.z * dt))
        T1 = tx.concatenate_matrices(T0, dT)
        self._odom.pose.pose = pm.toMsg(pm.fromMatrix(T1))
예제 #27
0
    def test_roundtrip(self):
        c = Frame()

        d = pm.fromMsg(pm.toMsg(c))
        self.assertEqual(repr(c), repr(d))

        d = pm.fromMatrix(pm.toMatrix(c))
        self.assertEqual(repr(c), repr(d))

        d = pm.fromTf(pm.toTf(c))
        self.assertEqual(repr(c), repr(d))
예제 #28
0
    def test_roundtrip(self):
        c = Frame()

        d = pm.fromMsg(pm.toMsg(c))
        self.assertEqual(repr(c), repr(d))

        d = pm.fromMatrix(pm.toMatrix(c))
        self.assertEqual(repr(c), repr(d))

        d = pm.fromTf(pm.toTf(c))
        self.assertEqual(repr(c), repr(d))
    def __setstate__(self, state):
        self.model_name = state['model_name']
        self.object_name = state['object_name']

        buff = StringIO.StringIO()
        buff.writelines(state['point_cloud_data'])
        buff.seek(0)
        self.point_cloud_data = sensor_msgs.msg.PointCloud2()
        self.point_cloud_data.deserialize(buff.buf)
        self.pose = pm.toMsg(pm.fromMatrix(state['pose']))
        self.bc = ModelRecManager.tf_broadcaster
        self.listener = ModelRecManager.tf_listener
예제 #30
0
    def __setstate__(self, state):
        self.model_name = state['model_name']
        self.object_name = state['object_name']

        buff = StringIO.StringIO()
        buff.writelines(state['point_cloud_data'])
        buff.seek(0)
        self.point_cloud_data = sensor_msgs.msg.PointCloud2()
        self.point_cloud_data.deserialize(buff.buf)
        self.pose = pm.toMsg(pm.fromMatrix(state['pose']))
        self.bc = ModelRecManager.tf_broadcaster
        self.listener = ModelRecManager.tf_listener
예제 #31
0
    def GetFwdPoseMsg(self):

        msg = PoseArray()
        msg.header.frame_id = self.obj_frame

        for i in range(len(self.world_states)): 
            mat = self.kinematics.forward(self.joint_states[i].position[:self.dof])
            ee_frame = self.base_tform * pm.fromMatrix(mat)
            pmsg = pm.toMsg(ee_frame * PyKDL.Frame(PyKDL.Rotation.RotY(-1*np.pi/2)))
            msg.poses.append(pmsg)
#
        return msg
예제 #32
0
 def align_pose(self, pose):
     objectInCamera = pm.toMatrix(pm.fromMsg(pose))
     ModelRecManager.tf_listener.waitForTransform("/world", "/camera_rgb_optical_frame", rospy.Time(0), rospy.Duration(5))
     cameraInWorld = pm.toMatrix(pm.fromTf(ModelRecManager.tf_listener.lookupTransform("/world", "/camera_rgb_optical_frame",rospy.Time(0))))
     objectInWorld = dot(cameraInWorld, objectInCamera)
     """45 degrees rotated around z axis"""
     objectInWorld[0:3,0:3] = mat([[0.7071,-0.7071,0],
                                   [0.7071,0.7071,0],
                                   [0,0,1]]);
     worldInCamera = linalg.inv(cameraInWorld)
     objectInCameraModified = dot(worldInCamera, objectInWorld)
     return pm.toMsg(pm.fromMatrix(objectInCameraModified))
    def get_des_twist(self, dT=0.01):
        """ Get desired twist @ time t, by computing differential of traj_frame @ t=T-1 and t=T
        """
        #_cur_frame = self.get_current_frame()
        #return PyKDL.diff(self.prev_frame, _cur_frame, dT)
        #
        #
        if self.btn_state_2 == 1:
            b_T_saw0 = pm.toMatrix(self.cur_frame)
            b_T_vr0 = pm.toMatrix(self._frame2)
            self.vr0_T_saw0 = np.matmul(inv(b_T_vr0),
                                        b_T_saw0)  # numpy 4x4 array
        # else:
        #     self.vr0_T_saw0 = np.identity(4)

        self._frame2 = pm.fromMatrix(
            np.matmul(self.vr0_T_saw0, pm.toMatrix(self._frame2)))
        self._frame1 = pm.fromMatrix(
            np.matmul(self.vr0_T_saw0, pm.toMatrix(self._frame1)))

        return PyKDL.diff(self._frame1, self._frame2, dT)
예제 #34
0
def omni_callback(joint_state):
    global update_pub, last_button_state
    sendTf(marker_tf, '/marker', fixed_frame)
    sendTf(zero_tf, '/base', fixed_frame)
    sendTf(marker_ref, '/marker_ref', fixed_frame)
    sendTf(stylus_ref, '/stylus_ref', fixed_frame)
        
    try:
        update = InteractionCursorUpdate()
        update.pose.header = std_msgs.msg.Header()
        update.pose.header.stamp = rospy.Time.now()
        update.pose.header.frame_id = 'marker_ref'
        if button_clicked and last_button_state == update.GRAB:
            update.button_state = update.KEEP_ALIVE
        elif button_clicked and last_button_state == update.KEEP_ALIVE:
            update.button_state = update.KEEP_ALIVE
        elif button_clicked:
            update.button_state = update.GRAB
        elif last_button_state == update.KEEP_ALIVE:
            update.button_state = update.RELEASE
        else:
            update.button_state = update.NONE
            updateRefs()
        update.key_event = 0

        control_tf = ((0, 0, 0), tf.transformations.quaternion_from_euler(0, 0, control_offset))
        if button_clicked:
            # Get pose corresponding to transform between stylus reference and current position.
            stylus_tf = listener.lookupTransform('/stylus_ref', '/stylus', rospy.Time(0))
            stylus_matrix = pm.toMatrix(pm.fromTf(stylus_tf))
            control_matrix = pm.toMatrix(pm.fromTf(control_tf))
            p = pm.toMsg(pm.fromMatrix(numpy.dot(control_matrix, stylus_matrix)))
        else:
            p = pm.toMsg(pm.fromTf(control_tf))
        
        # Simply scale this up a bit to increase the workspace.
        workspace = 4
        p.position.x = p.position.x * workspace
        p.position.y = p.position.y * workspace
        p.position.z = p.position.z * workspace

        update.pose.pose = p

        last_button_state = update.button_state

        # Publish feedback.
        update_pub.publish(update)
    except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
        rospy.logerr("Couldn't look up transform. These things happen...")
예제 #35
0
def publish_true_object_tran(height):
    """@brief - A debugging function to visualize the origin of the objects
    in from openrave in the scene graph. 

    @param height - the offset from the table to the origin of the object.
    """
    try:
        object_height_offset = eye(4)
        object_height_offset[2,3] = height
        object_height_tf = pm.toTf(pm.fromMatrix(object_height_offset))
        bc = tf.TransformBroadcaster()
        bc.sendTransform(object_height_tf[0], object_height_tf[1], rospy.Time.now(), "/true_obj_pose", "/object")
    except Exception,e:
        rospy.logwarn("failed to publish true object height: %s"%e)
        return []
def transfrom_between_arucos(camera2ai, camera2aj):
    '''
    计算arucos之间的转换
    换算成以 aruco i为参考坐标系 对应arucoj的姿态
    '''
    # 获取以相机为世界坐标系下的码j的位姿
    camera2aj_mat = pm.toMatrix(camera2aj)
    # 获取以码i做世界坐标系下的相机的位姿
    ai2camera_mat = pm.toMatrix(camera2ai.Inverse())
    # 计算aj在ai下的位姿 位姿传递
    ai2aj_mat = ai2camera_mat.dot(camera2aj_mat)
    # 生成码j在码i坐标系下的位姿 对应的KDL Frame对象
    ai2aj_frame = pm.fromMatrix(ai2aj_mat)
    # 返回对象
    return pm.toMsg(ai2aj_frame)
    def model_state_publisher(self):
        #############
        targetFrame = PyKDL.Frame(PyKDL.Rotation.Quaternion(self.target.pose.orientation.x,self.target.pose.orientation.y,self.target.pose.orientation.z,self.target.pose.orientation.w), PyKDL.Vector(self.target.pose.position.x,self.target.pose.position.y,self.target.pose.position.z))

        targetFrameMatrix = pm.toMatrix(targetFrame)

        sourceFrame = PyKDL.Frame(PyKDL.Rotation.Quaternion(self.source.pose.orientation.x,self.source.pose.orientation.y,self.source.pose.orientation.z,self.source.pose.orientation.w),PyKDL.Vector(self.source.pose.position.x,self.source.pose.position.y,self.source.pose.position.z))
        
        pose = pm.toMsg(pm.fromMatrix(linalg.inv(targetFrameMatrix))*sourceFrame)
        
        poseStamped = PoseStamped()
        poseStamped.pose = pose
        poseStamped.header.stamp = rospy.get_rostime()
        poseStamped.header.frame_id = '/'+self.targetLinkName[5:len(self.targetLinkName)] # This strips 'pr2::' from the frame_id string
        self.wPub.publish(poseStamped)
 def align_pose(self, pose):
     objectInCamera = pm.toMatrix(pm.fromMsg(pose))
     ModelRecManager.tf_listener.waitForTransform(
         "/world", "/camera_rgb_optical_frame", rospy.Time(0),
         rospy.Duration(5))
     cameraInWorld = pm.toMatrix(
         pm.fromTf(
             ModelRecManager.tf_listener.lookupTransform(
                 "/world", "/camera_rgb_optical_frame", rospy.Time(0))))
     objectInWorld = dot(cameraInWorld, objectInCamera)
     """45 degrees rotated around z axis"""
     objectInWorld[0:3, 0:3] = mat([[0.7071, -0.7071, 0],
                                    [0.7071, 0.7071, 0], [0, 0, 1]])
     worldInCamera = linalg.inv(cameraInWorld)
     objectInCameraModified = dot(worldInCamera, objectInWorld)
     return pm.toMsg(pm.fromMatrix(objectInCameraModified))
def publish_true_object_tran(height):
    """@brief - A debugging function to visualize the origin of the objects
    in from openrave in the scene graph. 

    @param height - the offset from the table to the origin of the object.
    """
    try:
        object_height_offset = eye(4)
        object_height_offset[2, 3] = height
        object_height_tf = pm.toTf(pm.fromMatrix(object_height_offset))
        bc = tf.TransformBroadcaster()
        bc.sendTransform(object_height_tf[0], object_height_tf[1],
                         rospy.Time.now(), "/true_obj_pose", "/object")
    except Exception, e:
        rospy.logwarn("failed to publish true object height: %s" % e)
        return []
예제 #40
0
    def detach(self, object_name="", actuate = True, add_back_to_planning_scene = True):
        if actuate:
            # detach the collision object to the gripper
            self.gripper_open.call()
        # self.planning_group.detachObject(object_name, self.end_link)
        self.planning_scene_publisher = rospy.Publisher('planning_scene', PlanningScene, queue_size=100)
        planning_scene_diff = PlanningScene(is_diff=True)
        T_fwd = pm.fromMatrix(self.kdl_kin.forward(self.q0))

        # get the actual collision obj from planning scene
        res = self.get_planning_scene(components=
            PlanningSceneComponents(components=PlanningSceneComponents.ROBOT_STATE_ATTACHED_OBJECTS))
        all_objects = res.scene.robot_state.attached_collision_objects
        for col_obj in all_objects:
            rospy.logwarn(str(object_name)+", "+str(col_obj.link_name)+", "+str(col_obj.object.id))

            # remove from attached objects
            diff_obj = AttachedCollisionObject(
                link_name=self.end_link,
                object=CollisionObject(id=col_obj.object.id))
            diff_obj.object.operation = CollisionObject.REMOVE
            planning_scene_diff.robot_state.attached_collision_objects.append(diff_obj)
            
            if add_back_to_planning_scene:
                # add into planning scene
                add_object = col_obj.object
                add_object.header.frame_id = self.base_link
                add_object.operation = CollisionObject.ADD
                for index, mesh_pose in enumerate(col_obj.object.mesh_poses):
                    add_object.mesh_poses[index] = pm.toMsg(T_fwd * pm.fromMsg(mesh_pose))
                planning_scene_diff.world.collision_objects.append(add_object)

            # if col_obj.id == object_name:
            #     detach_object = col_obj
            #     break

        # remove object from the gripper, then add the original collision object
        # detach_object = AttachedCollisionObject()
        # detach_object.object.id = object_name
        # detach_object.link_name = self.end_link
        # detach_object.object.operation = detach_object.object.REMOVE
        # planning_scene_diff.robot_state.attached_collision_objects.append(copy.deepcopy(detach_object));

        # add_object = CollisionObject()

        self.planning_scene_publisher.publish(planning_scene_diff)
    def get_err_twist(self, goal_frame=None, dT=0.01):
        """ Get the desirable twist for given error.
        """
        #_cur_frame = self.get_current_frame()

        if self.btn_state_2 == 1:
            b_T_saw0 = pm.toMatrix(self.cur_frame)
            b_T_vr0 = pm.toMatrix(self._frame2)
            self.vr0_T_saw0 = np.matmul(inv(b_T_vr0),
                                        b_T_saw0)  # numpy 4x4 array
        # else:
        #      self.vr0_T_saw0 = np.identity(4)

        self._frame2 = pm.fromMatrix(
            np.matmul(self.vr0_T_saw0, pm.toMatrix(self._frame2)))

        return PyKDL.diff(self.cur_frame, self._frame2, dT)
예제 #42
0
    def detach(self, object_name="", actuate = True, add_back_to_planning_scene = True):
        if actuate:
            # detach the collision object to the gripper
            self.gripper_open.call()
        # self.planning_group.detachObject(object_name, self.end_link)
        self.planning_scene_publisher = rospy.Publisher('planning_scene', PlanningScene, queue_size=100)
        planning_scene_diff = PlanningScene(is_diff=True)
        T_fwd = pm.fromMatrix(self.kdl_kin.forward(self.q0))

        # get the actual collision obj from planning scene
        res = self.get_planning_scene(components=
            PlanningSceneComponents(components=PlanningSceneComponents.ROBOT_STATE_ATTACHED_OBJECTS))
        all_objects = res.scene.robot_state.attached_collision_objects
        for col_obj in all_objects:
            rospy.logwarn(str(object_name)+", "+str(col_obj.link_name)+", "+str(col_obj.object.id))

            # remove from attached objects
            diff_obj = AttachedCollisionObject(
                link_name=self.end_link,
                object=CollisionObject(id=col_obj.object.id))
            diff_obj.object.operation = CollisionObject.REMOVE
            planning_scene_diff.robot_state.attached_collision_objects.append(diff_obj)

            if add_back_to_planning_scene:
                # add into planning scene
                add_object = col_obj.object
                add_object.header.frame_id = self.base_link
                add_object.operation = CollisionObject.ADD
                for index, mesh_pose in enumerate(col_obj.object.mesh_poses):
                    add_object.mesh_poses[index] = pm.toMsg(T_fwd * pm.fromMsg(mesh_pose))
                planning_scene_diff.world.collision_objects.append(add_object)

            # if col_obj.id == object_name:
            #     detach_object = col_obj
            #     break

        # remove object from the gripper, then add the original collision object
        # detach_object = AttachedCollisionObject()
        # detach_object.object.id = object_name
        # detach_object.link_name = self.end_link
        # detach_object.object.operation = detach_object.object.REMOVE
        # planning_scene_diff.robot_state.attached_collision_objects.append(copy.deepcopy(detach_object));

        # add_object = CollisionObject()

        self.planning_scene_publisher.publish(planning_scene_diff)
예제 #43
0
def convert_cartesian_relative_goal(global_data, move_tran):
    """@brief - Convert a position relative to the hand's current coordinate system to the robot's base coordinate system.
    @param move_tran - the relative position in the hand's coordinate system. 

    """
    try:
        move_tf = pm.toTf(pm.fromMatrix(move_tran))
        print move_tf
        bc = tf.TransformBroadcaster()
        now = rospy.Time.now()
        bc.sendTransform(move_tf[0], move_tf[1], now, "hand_goal", "hand")
        #listener = tf.TransformListener()
        global_data.listener.waitForTransform("armbase", "arm_goal", now, rospy.Duration(1.0))
        armtip_robot = global_data.listener.lookupTransform('armbase','arm_goal', now)

        #arm_robot = global_data.listener.lookupTransform('armbase', 'arm', now)
        return pm.toMatrix(pm.fromTf(armtip_robot))
    except Exception, e:
        handle_fatal('convert_cartesian_relative_goal failed: error %s.'%e)
예제 #44
0
 def compute(self, world, state):
     '''
     Compute LfD features
     '''
     if state.reference is not None:
         ee = pm.fromMatrix(self.kdl_kin.forward(state.q))
         if state.gripper_closed:
             gripper = 1.
         else:
             gripper = 0.
         f = np.array(
             self.features.GetFeatures(ee,
                                       state.seq / len(state.traj.points),
                                       world.observation,
                                       ['time', state.reference.goal],
                                       gripper))
         return f
     else:
         return None
예제 #45
0
def convert_cartesian_world_goal(global_data, world_tran):
    """@brief - Get armtip goal pose in the arms coordinate system from hand world goal pose.
    This is useful to convert between a hand goal in the world coordinate system to a cartesian goal that
    can be sent directly to the staubli controller. 
    
    @param world_tf - tf of hand goal pose in world coordinates
    """
    try:    
        world_tf = pm.toTf(pm.fromMatrix(world_tran))
        bc = tf.TransformBroadcaster()
        now = rospy.Time.now()
        bc.sendTransform(world_tf[0], world_tf[1], now, "hand_goal", "world")
        #listener = tf.TransformListener()
        global_data.listener.waitForTransform("armbase", "arm_goal", now, rospy.Duration(1.0))
        armtip_robot = global_data.listener.lookupTransform('armbase', 'arm_goal', now)
        
        return pm.toMatrix(pm.fromTf(armtip_robot))
    except Exception, e:
        handle_fatal('convert_cartesian_world_goal failed: error %s.'%e) 
예제 #46
0
    def visualizePlan(self, plan):
        '''
        Debug tool: print poses associated with a set of plans to a message and
        publish this message.
        '''
        actor = self.actors[plan.actor_id]
        msg = PoseArray()
        msg.header.frame_id = actor.base_link

        if plan.actor_id is not 0:
            raise NotImplementedError(
                'kdl kinematics only created for actor 0 right now')

        # Show a trajectory of task plans.
        for node in plan.nodes:
            for state, action in node.traj:
                T = self.lfd.kdl_kin.forward(state.q)
                msg.poses.append(pm.toMsg(pm.fromMatrix(T)))
        self.plan_pub.publish(msg)
def convert_cartesian_relative_goal(global_data, move_tran):
    """@brief - Convert a position relative to the hand's current coordinate system to the robot's base coordinate system.
    @param move_tran - the relative position in the hand's coordinate system. 

    """
    try:
        move_tf = pm.toTf(pm.fromMatrix(move_tran))
        print move_tf
        bc = tf.TransformBroadcaster()
        now = rospy.Time.now()
        bc.sendTransform(move_tf[0], move_tf[1], now, "hand_goal", "hand")
        #listener = tf.TransformListener()
        global_data.listener.waitForTransform("armbase", "arm_goal", now,
                                              rospy.Duration(1.0))
        armtip_robot = global_data.listener.lookupTransform(
            'armbase', 'arm_goal', now)

        #arm_robot = global_data.listener.lookupTransform('armbase', 'arm', now)
        return pm.toMatrix(pm.fromTf(armtip_robot))
    except Exception, e:
        handle_fatal('convert_cartesian_relative_goal failed: error %s.' % e)
예제 #48
0
	def Rot_k_phi(self, k_vec, phi): #Eigen::Vector3d k_vec,float phi
	    #Eigen::Matrix3d R_k_phi;
	    kx = k_vec[0]
	    ky = k_vec[1]
	    kz = k_vec[2]
	    N = 3
	    K_nrow = Vector(0.0, -kz, ky)
	    K_trow = Vector(kz, 0.0, -kx)
	    K_brow = Vector(-ky, kx, 0.0)

	    K = Rotation(K_nrow,K_trow,K_brow)
	    I = Rotation()
	    K_Frame = Frame(K)
	    K_Frame_numpy = posemath.toMatrix(K_Frame)
	    K_numpy = K_Frame_numpy[0:3,0:3]
	    I_numpy = np.identity(N)
	    R_k_phi_numpy = I_numpy + math.sin(phi)*K_numpy + (1-math.cos(phi))*K_numpy*K_numpy
	    R_k_phi_numpy_FrameTemp = np.c_[R_k_phi_numpy, np.ones(N)]
	    R_k_phi_numpy_Frame = np.r_[R_k_phi_numpy_FrameTemp,[R_k_phi_numpy_FrameTemp[1]]]
	    R_k_phi_Frame = posemath.fromMatrix(R_k_phi_numpy_Frame)
	    R_k_phi = R_k_phi_Frame.M
	    return R_k_phi
    def process_grasp_msg(self, grasp_msg):
        """@brief - Attempt to make the adjustment specified by grasp_msg
        1. plan a path to the a place 15cm back from the new pose
        """
        print 'regular move:'
        print grasp_msg
        #release the object
        tp.open_barrett()

        #get the robot's current location and calculate the absolute tran after the adjustment
        #update the robot status
        tp.update_robot(self.global_data.or_env.GetRobots()[0])
        handGoalInBase = pm.toMatrix(pm.fromMsg(grasp_msg.final_grasp_pose))

        try:
            hand_tf = pm.toTf(pm.fromMatrix(handGoalInBase))
            bc = tf.TransformBroadcaster()
            now = rospy.Time.now()
            bc.sendTransform(hand_tf[0], hand_tf[1], now, "hand_goal", "armbase")
            self.global_data.listener.waitForTransform("armbase", "arm_goal", now, rospy.Duration(1.0))
            armtip_robot = self.global_data.listener.lookupTransform('armbase', 'arm_goal', now)
            armGoalInBase = pm.toMatrix(pm.fromTf(armtip_robot))
        except Exception, e:
            handle_fatal('convert_cartesian_world_goal failed: error %s.'%e) 
예제 #50
0
 def set_goal(self,q):
     self.at_goal = False
     self.near_goal = False
     self.goal = pm.fromMatrix(self.kdl_kin.forward(q))
예제 #51
0
    rospy.wait_for_service('getCartesian')
    try:
        get_cartesian = rospy.ServiceProxy( 'getCartesian', staubli_tx60.srv.GetCartesian )
        resp1 = get_cartesian()
        # make srv x y z  rx ry rz euler angle representation into pose message
        pos = geometry_msgs.msg.Point( x = resp1.x, y = resp1.y, z = resp1.z )
        q = tf.transformations.quaternion_from_euler( resp1.rx , resp1.ry, resp1.rz ,'rxyz' )
        quat =  geometry_msgs.msg.Quaternion( x = q[0], y = q[1], z = q[2], w = q[3] )
        return geometry_msgs.msg.Pose( position = pos, orientation = quat )
    except rospy.ServiceException, e:
        print "Service call failed: %s"%e
        return []

def get_staubli_cartesian_as_tran():
    current_pose = get_staubli_cartesian_as_pose_msg()
    return pm.toMatrix(pm.fromMsg(current_pose))


rospy.init_node('arm_position_publisher')
tf_publisher = tf.TransformBroadcaster()
loop = rospy.Rate(3.0)
while not rospy.is_shutdown():
    t = get_staubli_cartesian_as_tran()
    t_tf = pm.toTf(pm.fromMatrix(t))
    tf_publisher.sendTransform(t_tf[0], t_tf[1], rospy.Time.now(),"/arm","/armbase")
    loop.sleep()


 
  
예제 #52
0
	def write_needle_drive_affines_to_file(self, gripper_affines_wrt_camera):


		nposes = len(gripper_affines_wrt_camera)
		if (nposes<1):
			rospy.logwarn("NO POSES TO SAVE")
			return

		str1 = "saving computer %d gripper poses w/rt camera"%nposes
		rospy.loginfo(str1)



		affine_gripper1_frame_wrt_camera_last = gripper_affines_wrt_camera[nposes-1]
		affine_needle_frame_wrt_camera_last = affine_gripper1_frame_wrt_camera_last*self.affine_needle_frame_wrt_gripper_frame_

		affine_needle_frame_wrt_camera_last_numpy = posemath.toMatrix(affine_needle_frame_wrt_camera_last)
		affine_needle_frame_wrt_camera_last_numpy_R = affine_needle_frame_wrt_camera_last_numpy[0:3,0:3] 
		
		'''BELOW do rest of the calculations with numpy array to prevent data type mismatch
		and convert to PyKDL at the end'''

		nvec_needle = affine_needle_frame_wrt_camera_last_numpy_R[0:,0]
		bvec_needle = affine_needle_frame_wrt_camera_last_numpy_R[0:,2]

		origin_needle = affine_needle_frame_wrt_camera_last.p
		origin_needle_numpy = np.array([origin_needle.x(),origin_needle.y(),origin_needle.z()])
		tip_of_needle_numpy = origin_needle_numpy + self.needle_radius_*nvec_needle
		
		gripper2_grasp_origin_numpy = tip_of_needle_numpy + self.grasp_depth_*bvec_needle
		gripper2_pre_grasp_origin_numpy = gripper2_grasp_origin_numpy - 0.03*bvec_needle
		gripper2_out_of_way_numpy = np.array([0.0, 0.0, -0.02])

		N = 3
		R2_diag_numpy = np.identity(N)
		R2_diag_numpy[2,2] = -1
		R2_diag_numpy[1,1] = -1

		print "R2_diag:"
		print R2_diag_numpy

		R2_diag_numpy_FrameTemp = np.c_[R2_diag_numpy, np.ones(N)]
		R2_diag_numpy_Frame = np.r_[R2_diag_numpy_FrameTemp,[R2_diag_numpy_FrameTemp[1]]]
		R2_diag_Frame = posemath.fromMatrix(R2_diag_numpy_Frame)
		
		'''BELOW numpy ends here - switch back to PyKDL BELOW'''	

		R2_diag = R2_diag_Frame.M
		gripper2_out_of_way = Vector(gripper2_out_of_way_numpy[0],gripper2_out_of_way_numpy[1],gripper2_out_of_way_numpy[2])
		psm2_nom_wrt_base2 = Frame(R2_diag,gripper2_out_of_way)


		print "affine_lcamera_to_psm_two_: "
		print self.default_affine_lcamera_to_psm_two_.M
		print "origin: "
		print self.default_affine_lcamera_to_psm_one_.p

		psm2_nom_wrt_camera = self.default_affine_lcamera_to_psm_two_*psm2_nom_wrt_base2

		print "psm2_nom_wrt_camera: "
		print psm2_nom_wrt_camera.M
		print "origin: "
		print psm2_nom_wrt_camera.p

		'''BELOW switch back to numpy BELOW '''
		
		psm2_nom_wrt_camera_numpy = posemath.toMatrix(psm2_nom_wrt_camera)
		psm2_nom_wrt_camera_numpy_R = psm2_nom_wrt_camera_numpy[0:3,0:3] 

		nvec_gripper2 = psm2_nom_wrt_camera_numpy_R[0:,0]
		tvec_gripper2 = psm2_nom_wrt_camera_numpy_R[0:,1]
		bvec_gripper2 = psm2_nom_wrt_camera_numpy_R[0:,2]

		gripper2_out_of_way_wrt_camera = psm2_nom_wrt_camera.p


		t = 4
		dt = 1

		outfile = open('gripper_poses_in_camera_coords.csp','w')

		for i in range(nposes):
			gripper_affines_wrt_camera_this = gripper_affines_wrt_camera[i]
			#Origin = gripper_affines_wrt_camera_this.p
			#R = gripper_affines_wrt_camera_this.M

			gripper_numpy = posemath.toMatrix(gripper_affines_wrt_camera_this)
			R = gripper_numpy[0:3,0:3] 
			Origin = gripper_numpy[0:3,3]

			outfile.write(str("%0.3f"%Origin[0]) + ', ' + str("%0.3f"%Origin[1]) + ', ' + str("%0.3f"%Origin[2]) + ',     ')

			nvec = R[0:,0]
			tvec = R[0:,1]
			bvec = R[0:,2]

			outfile.write(str("%0.3f"%nvec[0]) + ', ' + str("%0.3f"%nvec[1]) + ', ' + str("%0.3f"%nvec[2]) + ',     ')
			outfile.write(str("%0.3f"%bvec[0]) + ', ' + str("%0.3f"%bvec[1]) + ', ' + str("%0.3f"%bvec[2]) + ',  0,   ')

			outfile.write(str("%0.3f"%gripper2_out_of_way_wrt_camera.x()) + ', ' + str("%0.3f"%gripper2_out_of_way_wrt_camera.y()) + ', ' + str("%0.3f"%gripper2_out_of_way_wrt_camera.z()) + '     ')
			outfile.write(str("%0.3f"%nvec_gripper2[0]) + ', ' + str("%0.3f"%nvec_gripper2[1]) + ', ' + str("%0.3f"%nvec_gripper2[2]) + ',     ')
			outfile.write(str("%0.3f"%bvec_gripper2[0]) + ', ' + str("%0.3f"%bvec_gripper2[1]) + ', ' + str("%0.3f"%bvec_gripper2[2]) + ',  0.0   ' + str("%0.2f"%t) + '\n')


			t+=dt

		outfile.close()
		rospy.loginfo("wrote gripper motion plan to file gripper_poses_in_camera_coords,csp")
	def Rot_k_phi(self, k_vec, phi): #Eigen::Vector3d k_vec,float phi
	    #Eigen::Matrix3d R_k_phi;
	    kx = k_vec[0]
	    ky = k_vec[1]
	    kz = k_vec[2]
	    
	    #K_nrow = Vector(0.0, -kz, ky)
	    #K_trow = Vector(kz, 0.0, -kx)
	    #K_brow = Vector(-ky, kx, 0.0)

	    N = 3
	    K = Rotation(0.0,-kz,ky, kz,0.0,-kx, -ky,kx,0.0)
	    
	    Ksquare = K*K

	    I = Rotation()
	    
	    K_Frame = Frame(K)
	    
	    K_Frame_numpy = posemath.toMatrix(K_Frame)
	    
	    K_numpy = K_Frame_numpy[0:3,0:3]
	    
	    I_numpy = np.identity(N)
	    K_square_Frame = Frame(Ksquare)
	   

	    K_square_Frame_numpy = posemath.toMatrix(K_square_Frame)
	    
	    K_square_numpy = K_square_Frame_numpy[0:3,0:3]
	    

	    #R_k_phi_numpy = I_numpy + math.sin(phi)*K_numpy + (1-math.cos(phi))*K_numpy*K_numpy
	    #R_k_phi_numpy_FrameTemp = np.c_[R_k_phi_numpy, np.ones(N)]
	    #R_k_phi_numpy_Frame = np.r_[R_k_phi_numpy_FrameTemp,[R_k_phi_numpy_FrameTemp[1]]]
	    #R_k_phi_Frame = posemath.fromMatrix(R_k_phi_numpy_Frame)
	    #R_k_phi = R_k_phi_Frame.M


	    R_k_phi_numpy_square = I_numpy + math.sin(phi)*K_numpy + (1-math.cos(phi))*K_square_numpy
	    R_k_phi_numpy_FrameTemp_square = np.c_[R_k_phi_numpy_square, np.ones(N)]
	    R_k_phi_numpy_Frame_square = np.r_[R_k_phi_numpy_FrameTemp_square,[R_k_phi_numpy_FrameTemp_square[1]]]
	    R_k_phi_Frame_square = posemath.fromMatrix(R_k_phi_numpy_Frame_square)
	    R_k_phi_square = R_k_phi_Frame_square.M
	    

	    if(debug_needle_print):
	    	print "K",K
	    	print "Ksquare",Ksquare
	    	print "K_Frame",K_Frame
	    	print "K_Frame_numpy",K_Frame_numpy
	    	print "K_numpy",K_numpy
	    	print "K_square_Frame",K_square_Frame
	    	print "K_square_Frame_numpy",K_square_Frame_numpy
	    	print "math.sin(phi)",(math.sin(phi))
	    	print "math.sin(phi)",(math.sin(phi))
	    	print "1-math.cos(phi)",(1-math.cos(phi))
	    	print "K_numpy*K_numpy",K_numpy*K_numpy
	    	print "R_k_phi_numpy",R_k_phi_numpy
	    	print "R_k_phi_numpy_FrameTemp",R_k_phi_numpy_FrameTemp
	    	print "R_k_phi_numpy_Frame",R_k_phi_numpy_Frame
	    	print "R_k_phi_Frame",R_k_phi_Frame
	    	print  "R_k_phi",R_k_phi
	    	print "R_k_phi_numpy_square",R_k_phi_numpy_square
	    	print "R_k_phi_numpy_FrameTemp_square",R_k_phi_numpy_FrameTemp_square
	    	print "R_k_phi_numpy_Frame_square",R_k_phi_numpy_Frame_square
	    	print "R_k_phi_Frame_square",R_k_phi_Frame_square
	    	print  "R_k_phi_square",R_k_phi_square

	    return R_k_phi_square
예제 #54
0
    def getCartesianMove(self, frame, q0,
      base_steps=1000,
      steps_per_meter=1000,
      steps_per_radians = 4,
      time_multiplier=1,
      percent_acc=1,
      use_joint_move = False,
      table_frame = None):

      if table_frame is not None:
        if frame.p[2] < table_frame[0][2]:
          rospy.logerr("Ignoring move to waypoint due to relative z: %f < %f"%(frame.p[2],table_frame[0][2]))
          return JointTrajectory()

      if q0 is None:
        rospy.logerr("Invalid initial joint position in getCartesianMove")
        return JointTrajectory()

      # interpolate between start and goal
      pose = pm.fromMatrix(self.kdl_kin.forward(q0))

      cur_rpy = np.array(pose.M.GetRPY())
      cur_xyz = np.array(pose.p)
      
      goal_rpy = np.array(frame.M.GetRPY())
      goal_xyz = np.array(frame.p)
      delta_rpy = np.linalg.norm(goal_rpy - cur_rpy)
      delta_translation = (pose.p - frame.p).Norm()
      if delta_rpy < 0.001 and delta_translation < 0.001:
        rospy.logwarn("Robot is already in the goal position.")
        return JointTrajectory(points=[JointTrajectoryPoint(positions=q0,
          velocities=[0]*len(q0),
          accelerations=[0]*len(q0),
          time_from_start=rospy.Duration(0.0))], 
          joint_names = self.joint_names)

      q_target = self.ik(pm.toMatrix(frame),q0)
      if q_target is None:
        rospy.logerr("No IK solution on cartesian move target")
        return JointTrajectory()
      else:
        if np.any(
          np.greater(
            np.absolute(q_target[:2] - np.array(q0[:2])), np.pi/2)) \
          or np.absolute(q_target[3] - q0[3]) > np.pi:

          rospy.logerr("Dangerous IK solution, abort getCartesianMove")
          return JointTrajectory()
      
      dq_target = q_target - np.array(q0)
      if np.sum(np.absolute(dq_target)) < 0.0001:
        rospy.logwarn("Robot is already in the goal position.")
        return JointTrajectory(points=[JointTrajectoryPoint(positions=q0,
          velocities=[0]*len(q0),
          accelerations=[0]*len(q0),
          time_from_start=rospy.Duration(0.0))], 
          joint_names = self.joint_names)
      
      steps, t_v_const_step, t_v_setting_max, steps_to_max_speed, const_velocity_max_step = self.calculateAccelerationProfileParameters(dq_target,
        base_steps,
        steps_per_meter,
        steps_per_radians,
        delta_translation,
        time_multiplier,
        self.acceleration_magnification * percent_acc)

      traj = JointTrajectory()
      traj.points.append(JointTrajectoryPoint(positions=q0,
          	velocities=[0]*len(q0),
          	accelerations=[0]*len(q0)))
      # Compute a smooth trajectory.
      for i in range(1,steps + 1):
        xyz = None
        rpy = None
        q = None

        if not use_joint_move:
          xyz = cur_xyz + ((float(i)/steps) * (goal_xyz - cur_xyz))
          rpy = cur_rpy + ((float(i)/steps) * (goal_rpy - cur_rpy))

          # Create transform for goal frame
          frame = pm.toMatrix(
            kdl.Frame(
              kdl.Rotation.RPY(
                rpy[0],
                rpy[1],
                rpy[2]),
              kdl.Vector(
                xyz[0],
                xyz[1],
                xyz[2])))

          # Use current inverse kinematics solver with current position
          q = self.ik(frame, q0)
        else:
          q = np.array(q0) + (float(i)/steps) * dq_target
          q = q.tolist()
          
          #q = self.kdl_kin.inverse(frame,q0)
        if self.verbose:
          print "%d -- %s %s = %s"%(i,str(xyz),str(rpy),str(q))

        if q is not None:
          total_time = self.calculateTimeOfTrajectoryStep(i, 
            steps_to_max_speed,
            const_velocity_max_step,
            t_v_const_step,
            t_v_setting_max)
          # Compute the distance to the last point for each joint. We use this to compute our joint velocities.
          dq_i = np.array(q) - np.array(traj.points[-1].positions)
          if np.sum(np.abs(dq_i)) < self.skip_tol:
            rospy.logwarn("Joint trajectory point %d is repeating previous trajectory point. "%i)
            continue

          traj.points[i-1].velocities = (dq_i)/(total_time - traj.points[i-1].time_from_start.to_sec())
          pt = JointTrajectoryPoint(positions=q,
          	velocities=[0]*len(q),
          	accelerations=[0]*len(q))

          pt.time_from_start = rospy.Duration(total_time)
          # pt.time_from_start = rospy.Duration(i * ts)
          traj.points.append(pt)
        else:
          rospy.logwarn("No IK solution on one of the trajectory point to cartesian move target")

      if len(traj.points) < base_steps:
          print rospy.logerr("Planning failure with " \
                  + str(len(traj.points)) \
                  + " / " + str(base_steps) \
                  + " points.")
          return JointTrajectory()

      traj.joint_names = self.joint_names
      return traj
예제 #55
0
def integrate_pose(pseudo_frame_as_pose, target_pose):
    return pm.toMsg(pm.fromMatrix( numpy.dot(pm.toMatrix(pm.fromMsg(pseudo_frame_as_pose)), pm.toMatrix(pm.fromMsg(target_pose))) ))
예제 #56
0
def interPoseDistance(p1, p2):
    r = p1.I*p2;
    rMsg  = pm.toMsg(pm.fromMatrix(r))
    angleDistance = abs(math.acos(rMsg.orientation.w))
    positionDistance = linalg.norm(p1[0:3,3] - p2[0:3,3])
    return angleDistance + positionDistance
예제 #57
0
    def execute(self, userdata):
        sss.set_light("blue")

        wi = WorldInterface()
        wi.reset_attached_objects()
        graspdata = dict()
        print userdata.object

        # add wall
        wall_extent = [3.0, 0.1, 2.5]
        wall_pose = conversions.create_pose_stamped(
            [0, -0.99 - wall_extent[1], wall_extent[2] / 2.0, 0, 0, 0, 1], "base_link"
        )  # magic numbers are cool
        wi.add_collision_box(wall_pose, wall_extent, "wall")

        # add floor
        floor_extent = [3.0, 3.0, 0.1]
        floor_pose = conversions.create_pose_stamped(
            [0, 0, floor_extent[2] / 2.0, 0, 0, 0, 1], "/base_link"
        )  # magic numbers are cool
        wi.add_collision_box(floor_pose, floor_extent, "floor")

        # transform into base_link
        grasp_pose = transform_listener.transform_pose_stamped("base_link", userdata.object.pose, use_most_recent=False)

        # add object bounding box
        obj_pose = deepcopy(grasp_pose)
        lwh = userdata.object.bounding_box_lwh
        m1 = pm.toMatrix(pm.fromMsg(obj_pose.pose))
        m2 = pm.toMatrix(pm.fromTf(((0, 0, lwh.z / 2.0), (0, 0, 0, 1))))
        obj_pose.pose = pm.toMsg(pm.fromMatrix(numpy.dot(m1, m2)))
        wi.add_collision_box(obj_pose, (lwh.x * 2.0, lwh.y * 2.0, lwh.z), "grasp_object")

        print grasp_pose
        # add table
        table_extent = (2.0, 2.0, grasp_pose.pose.position.z)
        table_pose = conversions.create_pose_stamped(
            [-0.5 - table_extent[0] / 2.0, 0, table_extent[2] / 2.0, 0, 0, 0, 1], "base_link"
        )
        wi.add_collision_box(table_pose, table_extent, "table")

        # calculate grasp and lift pose
        grasp_pose.pose.position.x += 0.02
        grasp_pose.pose.position.y += 0.02
        grasp_pose.pose.position.z += 0.1  # 0.03 + 0.05
        grasp_pose.pose.orientation = Quaternion(
            *quaternion_from_euler(-1.706, 0.113, 2.278)
        )  # orientation of sdh_grasp_link in base_link for 'grasp' joint goal

        graspdata["height"] = grasp_pose.pose.position.z - table_extent[2]

        pregrasp_pose = deepcopy(grasp_pose)
        pregrasp_pose.pose.position.x += 0.20
        pregrasp_pose.pose.position.y += 0.11
        pregrasp_pose.pose.position.z += 0.1

        lift_pose = deepcopy(grasp_pose)
        lift_pose.pose.position.z += 0.03

        mp = MotionPlan()
        # open hand
        mp += CallFunction(sss.move, "sdh", "cylopen", False)
        mp += MoveArm("arm", [pregrasp_pose, ["sdh_grasp_link"]], seed="pregrasp")
        mp += MoveComponent("sdh", "cylopen", True)

        # allow collison hand/object
        # for l in hand_description.HandDescription('arm').touch_links:
        #    mp += EnableCollision("grasp_object", l)
        #
        # disable collison
        # mp += ResetCollisions()

        # goto grasp
        mp += MoveArmUnplanned("arm", [grasp_pose, ["sdh_grasp_link"]])

        # close hand
        mp += MoveComponent("sdh", "cylclosed")

        # check grasp
        # mp += CheckService('/sdh_controller/is_cylindric_grasped', Trigger, lambda res: res.success.data)

        # attach object
        mp += AttachObject("arm", "grasp_object")

        # enable collision
        mp += EnableCollision("grasp_object", "table")

        # lift motion
        mp += MoveArmUnplanned("arm", [lift_pose, ["sdh_grasp_link"]])

        # disable collison
        mp += ResetCollisions()

        # move away
        mp += MoveArm("arm", [pregrasp_pose, ["sdh_grasp_link"]])

        # goto hold
        mp += MoveArm("arm", "hold")

        userdata.graspdata = graspdata

        if not mp.plan(5).success:
            sss.set_light("green")
            return "not_grasped"

        sss.set_light("yellow")
        sss.say(["I am grasping " + userdata.object.label + " now."], False)
        # run, handle errors
        i = 0
        for ex in mp.execute():
            if not ex.wait(80.0).success:
                sss.set_light("red")
                return "failed"
            i += 1
        sss.set_light("green")
        return "grasped"
예제 #58
0
    def query(self, req, disable_target_object_collision = False):
        # Get the best object to manipulate, just like smart move, but without the actual movement
        # This will check robot collision and reachability on all possible object grasp position based on its symmetry.
        # Then, it will returns one of the best symmetry to work with for grasp and release.
        # it will be put on parameter server

        # Find possible poses
        res = self.get_waypoints_srv.get_waypoints(
                req.obj_class, # object class to move to
                req.predicates, # predicates to match
                [req.pose], # offset/transform from each member of the class
                [req.name], # placeholder name
                req.constraints,
                )

        if res is None:
            # no poses found that meet the query!
            rospy.logerr("No waypoints found for query")
            return []

        (poses,names,objects) = res
        if poses is None:
            rospy.logerr("No poses found for query")
            return []

        selected_objs, selected_names = [], []
        dists = []
        Ts = []
        if self.q0 is None:
            rospy.logerr("Robot state has not yet been received!")
            return "FAILURE -- robot state not yet received!"
        T_fwd = pm.fromMatrix(self.kdl_kin.forward(self.q0))

        number_of_valid_query_poses, number_of_invalid_query_poses = 0, 0
        for (pose,name,obj) in zip(poses,names,objects):

            # figure out which tf frame we care about
            tf_path = name.split('/')
            tf_frame = ''
            for part in tf_path:
                if len(part) > 0:
                    tf_frame = part
                    break

            if len(tf_frame) == 0:
                continue

            # try to move to the pose until one succeeds
            T_base_world = pm.fromTf(self.listener.lookupTransform(self.world,self.base_link,rospy.Time(0)))
            T = T_base_world.Inverse()*pm.fromMsg(pose)

            # Ignore anything below the table: we don't need to even bother
            # checking that position.
            if self.table_pose is not None:
                if T.p[2] < self.table_pose[0][2]:
                    rospy.logwarn("[QUERY] Ignoring due to relative z: %f < %f x=%f y=%f %s"%(T.p[2],self.table_pose[0][2],T.p[0],T.p[1], obj))
                    continue
                dist_from_table = (T.p - pm.Vector(*self.table_pose[0])).Norm()
            if dist_from_table > self.max_dist_from_table:
                rospy.logwarn("[QUERY] Ignoring due to table distance: %f > %f"%(
                    dist_from_table,
                    self.max_dist_from_table))
                continue

            # Get metrics for the best distance to a matching objet
            valid_pose, best_dist, best_invalid, message_print, message_print_invalid, best_q = self.get_best_distance(T,T_fwd,self.q0, check_closest_only = False, obj_name = obj)

            if best_q is None or len(best_q) == 0:
                rospy.logwarn("[QUERY] DID NOT ADD:"+message_print)
                continue
            elif valid_pose:
                rospy.loginfo('[QUERY] %s'%message_print)
                Ts.append(T)
                selected_objs.append(obj)
                selected_names.append(name)
                dists.append(best_dist)
                number_of_valid_query_poses += 1
            else:
                rospy.loginfo('[QUERY] %s'%message_print_invalid)
                Ts.append(T)
                selected_objs.append(obj)
                selected_names.append(name)
                dists.append(best_invalid + self.state_validity_penalty)
                number_of_invalid_query_poses += 1


        if len(Ts) is not len(dists):
            raise RuntimeError('You miscounted the number of transforms somehow.')
        if len(Ts) is not len(selected_objs):
            raise RuntimeError('You miscounted the number of transforms vs. objects somehow.')
        if len(Ts) is not len(selected_names):
            raise RuntimeError('You miscounted the number of transforms vs. names somehow.')

        if len(Ts) == 0:
            possible_goals = []
        else:
            possible_goals = [(d,T,o,n) for (d,T,o,n) in zip(dists,Ts,selected_objs,selected_names) if T is not None]
            possible_goals.sort()


        joint = JointState()
        # joint.position = self.ik(T,self.q0)
        rospy.loginfo("[QUERY] There are %i valid poses and %i invalid poses" %
                (number_of_valid_query_poses, number_of_invalid_query_poses))

        return possible_goals
예제 #59
0
    def smartmove_multipurpose_gripper(self,
            stamp,
            possible_goals,
            distance,
            gripper_function,
            velocity,
            acceleration,
            backup_in_gripper_frame):
        '''
        Basic function that handles collecting and aggregating smartmoves
        '''

        list_of_valid_sequence = list()
        list_of_invalid_sequence = list()
        self.backoff_waypoints = []

        if self.q0 is None:
            return "FAILURE -- Initial joint position is None"

        T_fwd = pm.fromMatrix(self.kdl_kin.forward(self.q0))
        for (dist,T,obj,name) in possible_goals:
            if not self.valid_verify(stamp):
                rospy.logwarn('Stopping action because robot has been preempted by another process,')
                return "FAILURE -- Robot has been preempted by another process"

            rospy.loginfo("check: " + str(dist) + " " + str(name))

            if backup_in_gripper_frame:
                backup_waypoint = kdl.Frame(kdl.Vector(-distance,0.,0.))
                backup_waypoint = T * backup_waypoint
            else:
                backup_waypoint = kdl.Frame(kdl.Vector(0.,0.,distance))
                backup_waypoint = backup_waypoint * T

            self.backoff_waypoints.append(("%s/%s_backoff/%f"%(obj,name,dist),backup_waypoint))
            self.backoff_waypoints.append(("%s/%s_grasp/%f"%(obj,name,dist),T))

            query_backup_message = ''
            if dist < self.state_validity_penalty:
                query_backup_message = "valid query's(dist = %.3f) backup msg"%dist
            else:
                query_backup_message = "invalid query's(dist = %.3f) backup msg"%(dist - self.state_validity_penalty)

            valid_pose, best_backup_dist, best_invalid, message_print, message_print_invalid,best_q = self.get_best_distance(backup_waypoint,T_fwd,self.q0, check_closest_only = False,  obj_name = obj)
            if best_q is None or len(best_q) == 0:
                rospy.loginfo('Skipping %s: %s'%(query_backup_message,message_print_invalid))
                continue

            if valid_pose and dist < self.state_validity_penalty:
                list_of_valid_sequence.append((backup_waypoint,T,obj,best_backup_dist,dist,name))
                rospy.loginfo('Valid sequence: %s: %s'%(query_backup_message, message_print))
            else:
                if valid_pose:
                    rospy.loginfo('Invalid sequence: %s: %s'%(query_backup_message, message_print))
                    list_of_invalid_sequence.append((backup_waypoint,T,obj,best_backup_dist,dist,name))
                else:
                    rospy.loginfo('Invalid sequence: %s: %s'%(query_backup_message, message_print_invalid))
                    list_of_invalid_sequence.append((backup_waypoint,T,obj,best_invalid,dist,name))

        sequence_to_execute = list()
        if len(list_of_valid_sequence) > 0:
            sequence_to_execute = list_of_valid_sequence + list_of_invalid_sequence[:5]
        else:
            rospy.logwarn("WARNING -- no sequential valid grasp action found")
            sequence_to_execute = list_of_invalid_sequence[:5]

        rospy.loginfo('There is %i valid sequence and %i invalid sequence to try'%(len(list_of_valid_sequence),len(list_of_invalid_sequence)))
        # print 'Number of sequence to execute:', len(sequence_to_execute)
        msg = None
        for sequence_number, (backup_waypoint,T,obj,backup_dist,query_dist,name) in enumerate(sequence_to_execute,1):
            rospy.loginfo(str(sequence_number) + " moving to " + str(name))
            if not self.valid_verify(stamp):
                rospy.logwarn('Stopping action because robot has been preempted by another process,')
                return "FAILURE -- Robot has been preempted by another process"

            rospy.loginfo("Trying sequence number %i: backup_dist: %.3f query_dist: %.3f"%(sequence_number,backup_dist,query_dist))
            # plan to T
            rospy.sleep(0.01)
            (code,res) = self.planner.getPlan(backup_waypoint,self.q0,obj=None)
            if (not res is None) and len(res.planned_trajectory.joint_trajectory.points) > 0:
                q_2 = res.planned_trajectory.joint_trajectory.points[-1].positions
                (code2,res2) = self.planner.getPlan(T,q_2,obj=obj)

                if ((not res2 is None) and len(res2.planned_trajectory.joint_trajectory.points) > 0):
                    q_3 = res.planned_trajectory.joint_trajectory.points[-1].positions
                    print(q_2,q_3)
                    dist = np.linalg.norm(np.array(q_2)-np.array(q_3))
                    if dist > 2 * backup_dist:
                        rospy.logwarn("Backoff failed for pose %i: distance was %f vs %f"%(sequence_number,dist,2*backup_dist))
                        continue
                    self.info("appoach_%s"%obj, obj)
                    msg = self.send_and_publish_planning_result(res,stamp,acceleration,velocity)
                    rospy.sleep(0.1)

                    if msg[0:7] == 'SUCCESS':
                        self.info("move_to_grasp_%s"%obj, obj)
                        msg = self.send_and_publish_planning_result(res2,stamp,acceleration,velocity)
                        rospy.sleep(0.1)

                        if msg[0:7] == 'SUCCESS':
                            self.info("take_%s"%obj, obj)
                            gripper_function(obj)

                            traj = res2.planned_trajectory.joint_trajectory
                            traj.points.reverse()
                            end_t = traj.points[0].time_from_start
                            for i, pt in enumerate(traj.points):
                                pt.velocities = [-v for v in pt.velocities]
                                pt.accelerations = []
                                pt.effort = []
                                pt.time_from_start = end_t - pt.time_from_start
                            traj.points[0].positions = self.q0
                            traj.points[-1].velocities = [0.]*len(self.q0)

                            self.info("backoff_from_%s"%obj, obj)
                            msg = self.send_and_publish_planning_result(res2,stamp,acceleration,velocity)
                            return msg
                        else:
                            rospy.logwarn("Fail to move to grasp pose in sequence %i" % sequence_number)
                    else:
                        rospy.logwarn("Fail to move to backup pose in sequence %i" % sequence_number)
                else:
                    rospy.logwarn("Plan to pose in sequence %i does not work" % sequence_number)
            else:
                rospy.logwarn("Plan Backoff pose in sequence %i does not work" % sequence_number)
        return "FAILURE -- No sequential motions work."
예제 #60
0
	def execute(self, userdata, overwrite_presets = dict()):
		presets = self.default_presets
		presets.update(overwrite_presets)
		sss.set_light('blue')

		wi = WorldInterface()
		wi.reset_attached_objects()
		graspdata = dict()
		print userdata.object
		print "time difference = ", (rospy.Time.now() - userdata.object.pose.header.stamp).to_sec()


		# add wall
		wall_extent = [3.0,0.1,2.5]
		wall_pose = conversions.create_pose_stamped([  0, -0.99 - wall_extent[1], wall_extent[2]/2.0 ,0,0,0,1], '/base_link') # magic numbers are cool
		wi.add_collision_box(wall_pose, wall_extent, "wall")

		# add floor
		floor_extent = [3.0,3.0,0.1]
		floor_pose = conversions.create_pose_stamped([  0, 0, floor_extent[2]/2.0 ,0,0,0,1], '/base_link') # magic numbers are cool
		wi.add_collision_box(floor_pose, floor_extent, "floor")
		
		#transform into base_link
		grasp_pose = transform_listener.transform_pose_stamped('/base_link', userdata.object.pose, use_most_recent=False)
		print grasp_pose

		# add object bounding box
		obj_pose = deepcopy(userdata.object.pose)
		lwh = userdata.object.bounding_box_lwh
		m1 = pm.toMatrix( pm.fromMsg(obj_pose.pose) )
		m2 = pm.toMatrix( pm.fromTf( ((0,0, lwh.z/2.0),(0,0,0,1)) ) )
		obj_pose.pose = pm.toMsg( pm.fromMatrix(numpy.dot(m1,m2)) )
		wi.add_collision_box(obj_pose,(lwh.x*2.0,lwh.y*2.0,lwh.z) , "grasp_object")

		# compute minimal height of object bounding box edges in base_link
		min_z = grasp_pose.pose.position.z
		for k in [(0.5,0.5,1.0),(0.5,0.5,0.0),(0.5,-0.5,1.0),(0.5,-0.5,0.0),(-0.5,0.5,1.0),(-0.5,0.5,0.0),(-0.5,-0.5,1.0),(-0.5,-0.5,0.0)]:		
			m1 = pm.toMatrix( pm.fromMsg(grasp_pose.pose) ) # BFromO
			m2 = pm.toMatrix( pm.fromTf( ((k[0]*lwh.x,k[1]*lwh.y, k[2]*lwh.z),(0,0,0,1)) ) ) # inO
			min_z = min(min_z,pm.toMsg( pm.fromMatrix(numpy.dot(m1,m2))).position.z) #min_z inB

		# add table
		table_extent = (2.0, 2.0, min_z)
		# base_link
		# x - points towards front of robot
		# y - follow right hand rule
		# z - points upwards
		table_pose = conversions.create_pose_stamped([ -0.5 - table_extent[0]/2.0, 0 ,table_extent[2]/2.0 ,0,0,0,1], '/base_link') # center of table, 0.5 meter behind the robot
		wi.add_collision_box(table_pose, table_extent, "table")

		# calculate grasp and lift pose
		grasp_pose.pose.position.x += presets['grasp_offset'][0]
		grasp_pose.pose.position.y += presets['grasp_offset'][1]
		grasp_pose.pose.position.z += presets['grasp_offset'][2]
		grasp_pose.pose.orientation = Quaternion(*quaternion_from_euler(*presets['fixed_orientation_euler'])) # orientation of sdh_grasp_link in base_link for 'grasp' joint goal
		
		graspdata['height'] =  grasp_pose.pose.position.z - table_extent[2]
		
		pregrasp_pose = deepcopy(grasp_pose)
		pregrasp_pose.pose.position.x += presets['pregrasp_offset'][0]
		pregrasp_pose.pose.position.y += presets['pregrasp_offset'][1]
		pregrasp_pose.pose.position.z += presets['pregrasp_offset'][2]

		lift_pose = deepcopy(grasp_pose)
		lift_pose.pose.position.x += presets['lift_offset'][0]
		lift_pose.pose.position.y += presets['lift_offset'][1]
		lift_pose.pose.position.z += presets['lift_offset'][2]
		
		mp = MotionPlan()
		# open hand
		mp += CallFunction(sss.move, 'sdh','cylopen', False)
		mp += MoveArm('arm',[pregrasp_pose,[presets['tcp_link']]], seed = presets['pregrasp_seed'])
		mp += MoveComponent('sdh','cylopen', True)

		# allow collison hand/object
		for l in hand_description.HandDescription('arm').touch_links:
		    mp += EnableCollision("grasp_object", l)
		
		# goto grasp
		mp += MoveArm('arm', [grasp_pose,[presets['tcp_link']]]) # Move sdh_grasp_link to grasp_pose
		
		# close hand
		mp += MoveComponent('sdh','cylclosed')
		
		# check grasp
		#mp += CheckService('/sdh_controller/is_cylindric_grasped', Trigger, lambda res: res.success.data)
		
		# disable collison
		mp += ResetCollisions()

		# attach object
		mp += AttachObject('arm', "grasp_object")
		
		# enable collision
		mp += EnableCollision("grasp_object", "table")
		
		# lift motion
		mp += MoveArm('arm', [lift_pose,[presets['tcp_link']]]) # Move sdh_grasp_link to lift_pose

		# disable collison
		mp += ResetCollisions()

		# move away
		#mp += MoveArm('arm', [pregrasp_pose,[presets['tcp_link']]])
		
		# goto hold
		mp += MoveArm('arm', 'hold')
		
		userdata.graspdata = graspdata

		if not mp.plan(5).success:
			sss.set_light('green')
			return "not_grasped"
		
		sss.set_light('yellow')
		sss.say(["I am grasping " + userdata.object.label + " now."],False)
		# run, handle errors
		i = 0
		for ex in mp.execute():
			if not ex.wait(80.0).success:
				sss.set_light('red')
				return 'failed'
			i+=1
		sss.set_light('green')
		return 'grasped'