コード例 #1
0
def get_target_3d(d):
    gp = geometry_msgs.msg.Pose()
    gp.position.x = -d[0]
    gp.position.y = -d[1]
    gp.position.z = d[2]
    gp.orientation.w = 1
    q = tft.quaternion_from_euler(d[3], d[4], d[5])
    gp.orientation.x = q[0]
    gp.orientation.y = q[1]
    gp.orientation.z = q[2]
    gp.orientation.w = q[3]
    # Convert to base frame, add the angle in (ensures planar grasp, camera isn't guaranteed to be perpendicular).
    gp_base = convert_pose(gp, 'camera_link', 'j2n6s300_link_base')
    if gp_base == None:
        av = np.array([0, 0, 0, 0, 0, 0, 0])
        return av
    gpbo = gp_base.orientation
    # e = tft.euler_from_quaternion([gpbo.x, gpbo.y, gpbo.z, gpbo.w])
    # Only really care about rotation about x (e[0]). update is mean function
    # av = pose_averager.update(np.array([gp_base.position.x, gp_base.position.y, gp_base.position.z, e[0],e[1],e[2]]))
    av = np.array([
        gp_base.position.x, gp_base.position.y, gp_base.position.z, gpbo.x,
        gpbo.y, gpbo.z, gpbo.w
    ])
    return av
コード例 #2
0
    def execute(self):
        # Get the bottle pose from the camera.
        rospy.loginfo('Moveing to view')
        m.move_to_named_pose('whole_arm', 'view_checkerboard')

        rospy.sleep(4)

        poses = []
        qt = tf.transformations.quaternion_from_euler(0.0, 0.0,
                                                      -3.141592653 / 2)

        for i in range(16):
            rospy.loginfo('GET OBJECT POSE')
            b_req = cartesian_calibration.srv.GetACoorRequest()
            b_req.data_set = "measured"
            b_req.object_idx = i
            b_res = self.pose_service.call(b_req)
            p = b_res.object_pose.pose
            # p.orientation.x = qt[0]
            # p.orientation.y = qt[1]
            # p.orientation.z = qt[2]
            # p.orientation.w = qt[3]

            object_global = t.convert_pose(p, self.camera_reference_frame,
                                           'global_xyz_link')
            # object_global.position.z = min(0.80,p.position.z)
            poses.append(object_global)

        m.move_to_named_pose('realsense', 'tool_change_position')
        m.move_to_named_pose('wrist_only', 'sucker')

        for p in poses:

            if p.position.x == 0 and p.position.y == 0 and p.position.z == 0:
                rospy.logerr('Failed to find an object. %s' % i)
                # return 'failed'
                continue

            print("MOVE SUCKER TO ABOVE OBJECT")
            p = t.align_pose_orientation_to_frame(p, 'global_xyz_link',
                                                  'sucker_endpoint')

            t.publish_pose_as_transform(p, 'global_xyz_link', 'MOVE HERE', 0.5)
            res = m.move_to_global(p.position.x - 0.02,
                                   p.position.y + 0.01,
                                   p.position.z - 0.02,
                                   'sucker',
                                   orientation=p.orientation)

            rospy.sleep(4)
コード例 #3
0
def move_pos():
    g_pose1 = geometry_msgs.msg.Pose()
    g_pose1.orientation.w = 1
    end_effector = convert_pose(g_pose1, 'j2n6s300_end_effector',
                                'j2n6s300_link_base')
    end_effector_list = [
        end_effector.orientation.x, end_effector.orientation.y,
        end_effector.orientation.z, end_effector.orientation.w
    ]

    q = tft.quaternion_from_euler(
        np.pi, np.pi / 2, np.pi / 2
    )  # 绕j2n6s300_base_link's fixed x y z轴转动到正的位置;  np.pi, 0, np.pi/2 to left; np.pi/2, 0, np.pi/2 to forward;np.pi, np.pi/2, np.pi/2 to down
    q1 = tft.quaternion_from_euler(np.pi / 2, 0, 0)  #将手指竖起来
    q = tft.quaternion_multiply(q1, q)
    end_effector_inverse = tft.quaternion_inverse(end_effector_list)
    pgo = tft.quaternion_multiply(q, end_effector_inverse)

    q1 = [pgo[0], pgo[1], pgo[2], pgo[3]]
    # q1 = [pgo.x, pgo.y, pgo.z, pgo.w]
    e = tft.euler_from_quaternion(q1)
    dr = 1 * e[0]
    dp = 1 * e[1]
    dyaw = 1 * e[2]

    vx = max(min(0 * 2.5, MAX_VELO_X), -1.0 * MAX_VELO_X)
    vy = max(min(0 * 2.5, MAX_VELO_Y), -1.0 * MAX_VELO_Y)
    vz = max(min(0, MAX_VELO_Z), -1.0 * MAX_VELO_Z)

    # Apply a nonlinearity to the velocity
    v = np.array([vx, vy, vz])
    vc = np.dot(v, VELO_COV)

    CURRENT_VELOCITY[0] = vc[0]
    CURRENT_VELOCITY[1] = vc[1]
    CURRENT_VELOCITY[2] = vc[2]

    CURRENT_VELOCITY[3] = 1 * dr  #x: end effector self rotate
    CURRENT_VELOCITY[4] = 1 * dp  #y: up and down rotate
    CURRENT_VELOCITY[
        5] = 1 * dyaw  #max(min(1 * dyaw, MAX_ROTATION), -1 * MAX_ROTATION) #z: left and right rotate
コード例 #4
0
def command_callback(target_pose, target_quaternion):
    global SERVO
    global CURR_Z, MIN_Z
    global CURR_DEPTH
    global pose_averager
    global GOAL_Z
    global GRIP_WIDTH_MM
    global VELO_COV

    #CURR_DEPTH = msg.data[5]

    if SERVO:

        # d = list(msg.data)

        # PBVS Method.

        #if d[2] > 0.150:  # Min effective range of the realsense.

        # Convert width in pixels to mm.
        # 0.07 is distance from end effector (CURR_Z) to camera.
        # 0.1 is approx degrees per pixel for the realsense.
        # if d[2] > 0.25:
        #     GRIP_WIDTH_PX = msg.data[4]
        #     GRIP_WIDTH_MM = 2 * ((CURR_Z + 0.07)) * np.tan(0.1 * GRIP_WIDTH_PX / 2.0 / 180.0 * np.pi) * 1000

        # Construct the Pose in the frame of the camera.
        # gp = geometry_msgs.msg.Pose()
        # gp.position.x = d[0]
        # gp.position.y = d[1]
        # gp.position.z = d[2]
        # q = tft.quaternion_from_euler(0, 0, -1 * d[3])
        # gp.orientation.x = q[0]
        # gp.orientation.y = q[1]
        # gp.orientation.z = q[2]
        # gp.orientation.w = q[3]

        # Calculate Pose of Grasp in Robot Base Link Frame
        # Average over a few predicted poses to help combat noise.
        # x: 0.645162225459
        # y: -0.318927784776
        # z: 0.694286052858
        # w: -0.00420092196819

        # x: 0.81874143089
        # y: -0.506091943919
        # z: 0.374129838746

        # gp_base = convert_pose(gp, 'camera_link', 'j2n6s300_link_base')
        # print(gp_base.orientation)
        # print(gp_base.position)
        #gpbo = gp_base.orientation
        e = tft.euler_from_quaternion(target_quaternion)
        # Only really care about rotation about x (e[0]). update is mean function
        av = pose_averager.update(np.array(target_pose + [e[0], e[1], e[2]]))

        # else:
        #     gp_base = geometry_msgs.msg.Pose()
        #     av = pose_averager.evaluate()

        # compute end_effector to j2n6s300_link_base's orientation and euler
        g_pose1 = geometry_msgs.msg.Pose()
        g_pose1.orientation.w = 1
        try:
            end_effector = convert_pose(g_pose1, 'j2n6s300_end_effector',
                                        'j2n6s300_link_base')
            end_effector_list = [
                end_effector.orientation.x, end_effector.orientation.y,
                end_effector.orientation.z, end_effector.orientation.w
            ]
        except Exception as ex:
            template = "An exception of type {0} occurred. Arguments:{1!r}"
            message = template.format(type(ex).__name__, ex.args)
            print(message)
            return
        e1 = tft.euler_from_quaternion(end_effector_list)

        # Average pick pose in base frame.
        gp_base = geometry_msgs.msg.Pose()
        gp_base.position.x = av[0]
        gp_base.position.y = av[1]
        gp_base.position.z = av[2]
        GOAL_Z = av[2]
        ang = av[3] - np.pi / 2  # We don't want to align, we want to grip.
        q = tft.quaternion_from_euler(
            np.pi, 0, np.pi / 2
        )  # 绕j2n6s300_base_link's fixed x y z轴转动到正的位置;  np.pi, 0, np.pi/2 to left; np.pi/2, 0, np.pi/2 to forward;np.pi, np.pi/2, np.pi/2 to down
        q1 = tft.quaternion_from_euler(np.pi / 2, 0, 0)  #将手指竖起来
        q = tft.quaternion_multiply(q1, q)
        gp_base.orientation.x = q[0]
        gp_base.orientation.y = q[1]
        gp_base.orientation.z = q[2]
        gp_base.orientation.w = q[3]

        # Get the Position of the End Effector in the frame of the Robot base Link
        g_pose = geometry_msgs.msg.Pose()
        g_pose.position.z = 0.03  # Offset from the end_effector frame to the actual position of the fingers.
        g_pose.orientation.w = 1
        p_gripper = geometry_msgs.msg.Pose()
        try:
            p_gripper = convert_pose(g_pose, 'j2n6s300_end_effector',
                                     'j2n6s300_link_base')
        except Exception as ex:
            template = "An exception of type {0} occurred. Arguments:{1!r}"
            message = template.format(type(ex).__name__, ex.args)
            print(message)
            return
        publish_pose_as_transform(gp_base, 'j2n6s300_link_base', 'G', 0.0)

        # Calculate Position Error. pick pose - finger pose in base_frame
        dx = (gp_base.position.x - p_gripper.position.x)
        dy = (gp_base.position.y - p_gripper.position.y)
        dz = (gp_base.position.z - p_gripper.position.z)

        # Orientation velocity control is done in the frame of the gripper,
        #  so figure out the rotation offset in the end effector frame.
        end_effector_inverse = tft.quaternion_inverse(end_effector_list)
        pgo = tft.quaternion_multiply(q, end_effector_inverse)

        q1 = [pgo[0], pgo[1], pgo[2], pgo[3]]
        # q1 = [pgo.x, pgo.y, pgo.z, pgo.w]
        e = tft.euler_from_quaternion(q1)
        dr = 1 * e[0]
        dp = 1 * e[1]
        dyaw = 1 * e[2]

        vx = max(min(dx * 2.5, MAX_VELO_X), -1.0 * MAX_VELO_X)
        vy = max(min(dy * 2.5, MAX_VELO_Y), -1.0 * MAX_VELO_Y)
        vz = max(min(dz - 0.04, MAX_VELO_Z), -1.0 * MAX_VELO_Z)

        # Apply a nonlinearity to the velocity
        v = np.array([vx, vy, vz])
        vc = np.dot(v, VELO_COV)

        CURRENT_VELOCITY[0] = vc[0]
        CURRENT_VELOCITY[1] = vc[1]
        CURRENT_VELOCITY[2] = vc[2]

        CURRENT_VELOCITY[3] = 1 * dr  #x: end effector self rotate
        CURRENT_VELOCITY[4] = 1 * dp  #y: up and down rotate
        CURRENT_VELOCITY[
            5] = 1 * dyaw  #max(min(1 * dyaw, MAX_ROTATION), -1 * MAX_ROTATION) #z: left and right rotate
コード例 #5
0
def execute_grasp(cont, N, p):

    width = p[6*N+4]
    pred=[[0.52,-0.346, 0.6, 0, 0, -math.pi/4],
    [0.52, -0.346, 0.37, 0, 0, -math.pi/4],
    [0.52,-0.346, 0.14,0,0,-math.pi/4],
    [0.565,-0.31, 0.6, 0,0,-math.pi/4],
    [0.565,-0.31, 0.37,0,0, -math.pi/4],
    [0.565,-0.31, 0.14,0,0,-math.pi/4],
    [0.52, -0.346, 0.39, 0, 0, -math.pi/4],
    [0.52, -0.346, 0.39, 0, 0, -math.pi/4],
    [0.52, -0.346, 0.39, 0, 0, -math.pi/4],
    [0.565,-0.31, 0.62, 0,0,-math.pi/4],
    [0.565,-0.31, 0.39,0,0, -math.pi/4],
    [0.565,-0.31, 0.16, 0, 0, -math.pi/4]]
    pred2= [[0.69,-0.516, 0.6,0,0,-math.pi/4],
    [0.69,-0.516, 0.37,0,0,-math.pi/4],
    [0.69,-0.516, 0.14,0,0,-math.pi/4],
    [0.735,-0.48, 0.60,0,0,-math.pi/4],
    [0.735,-0.48, 0.39,0,0,-math.pi/4],
    [0.735,-0.48, 0.16,0,0,-math.pi/4],
    [0.69,-0.516, 0.39,0,0,-math.pi/4],
    [0.69,-0.516, 0.39,0,0,-math.pi/4],
    [0.69,-0.516, 0.39,0,0,-math.pi/4],
    [0.735,-0.48, 0.62,0,0,-math.pi/4],
    [0.735,-0.48, 0.39,0,0,-math.pi/4],
    [0.735,-0.48, 0.16,0,0,-math.pi/4]]

    mov = p[6*N+5]


    punto =to_pose(p[6*N+0],p[6*N+1],p[6*N+2],0,0,0,1)
    punto1 = geometry_msgs.msg.Pose()
    punto2 = geometry_msgs.msg.Pose()
    punto1 = convert_pose(punto,"cam","world")
    eu =  [0, 1.5, p[6*N+3]]
    q = tft.quaternion_from_euler(0, 1.5, p[6*N+3]*math.pi/180)
    lista=PoseRPYarray()
    puntorpy=PoseRPY()
    puntorpy.position.x= punto1.position.x
    puntorpy.position.y=punto1.position.y
    puntorpy.position.z = Z_OFF
    puntorpy.rpy.roll = 0
    puntorpy.rpy.pitch = 1.57
    puntorpy.rpy.yaw = p[6*N+3]
    print(puntorpy)
    lista.poses.append(copy.deepcopy(puntorpy))
    #enviar arriba del objeto

    puntorpy.position.z= 0.02
    #puntorpy.position.z= 0.26
    lista.poses.append(copy.deepcopy(puntorpy))

    pub = rospy.Publisher('robot_commander/cmd_path', PoseRPYarray, queue_size=10)
    rate = rospy.Rate(1) # 10hz
    rate.sleep()
    pub.publish(lista)

    msg=rospy.wait_for_message('/robot_commander/path_done',std_msgs.msg.String)
    print('crrar')
    set_gripper_position(width)
    rospy.sleep(0.2)
    set_gripper_position(width)
    print('crro')
    rospy.sleep(1)


    lista=[]
    lista=PoseRPYarray()
    print(lista)

    #enviar de regreso a la pos predefinida arriba del objeto
    puntorpy.position.z= 0.35
    lista.poses.append(copy.deepcopy(puntorpy))

    # puntorpy.rpy.pitch=0
    # lista.poses.append(copy.deepcopy(puntorpy))

    puntorpy.position.x= pred[cont][0]
    puntorpy.position.y= pred[cont][1]
    puntorpy.position.z= pred[cont][2]
    puntorpy.rpy.roll = pred[cont][3]
    puntorpy.rpy.pitch = pred[cont][4]
    puntorpy.rpy.yaw = pred[cont][5]
    lista.poses.append(copy.deepcopy(puntorpy))



    puntorpy.position.x= pred2[cont][0]
    puntorpy.position.y= pred2[cont][1]
    puntorpy.position.z= pred2[cont][2]
    puntorpy.rpy.roll = pred2[cont][3]
    puntorpy.rpy.pitch = pred2[cont][4]
    puntorpy.rpy.yaw = pred2[cont][5]
    lista.poses.append(copy.deepcopy(puntorpy))

    pub = rospy.Publisher('robot_commander/cmd_path', PoseRPYarray, queue_size=10)
    rate = rospy.Rate(1) # 10hz
    rate.sleep()
    pub.publish(lista)

    msg=rospy.wait_for_message('/robot_commander/path_done',std_msgs.msg.String)
    print('abr')
    set_gripper_position(width+45)
    print('siiiiiii')
    rospy.sleep(1)

    lista=[]
    lista=PoseRPYarray()
    print(lista)



    puntorpy.position.x= pred[cont][0]
    puntorpy.position.y= pred[cont][1]
    puntorpy.position.z= pred[cont][2]
    puntorpy.rpy.roll = pred[cont][3]
    puntorpy.rpy.pitch = pred[cont][4]
    puntorpy.rpy.yaw = pred[cont][5]
    lista.poses.append(copy.deepcopy(puntorpy))

    #enviar a la posicion pred al frente del estante
    print lista
    pub = rospy.Publisher('robot_commander/cmd_path', PoseRPYarray, queue_size=10)
    rate = rospy.Rate(1) # 10hz
    rate.sleep()
    pub.publish(lista)

    msg=rospy.wait_for_message('/robot_commander/path_done',std_msgs.msg.String)

    pubJo.publish(home)

    msg=rospy.wait_for_message('/robot_commander/joint_done',std_msgs.msg.String)

    print('finalizo mov')
    #joint_commander.move_to_jointspose(home)

    flag=True

    return flag
コード例 #6
0
def push_object(cont, N, p):

    width = p[6*N+4]

    mov = p[6*N+5]


    punto =to_pose(p[6*N+0],p[6*N+1],p[6*N+2],0,0,0,1)
    punto1 = geometry_msgs.msg.Pose()
    punto2 = geometry_msgs.msg.Pose()
    punto1 = convert_pose(punto,"cam","world")
    eu =  [0, 1.5, p[6*N+3]]
    q = tft.quaternion_from_euler(0, 1.5, p[6*N+3]*math.pi/180)
    lista=PoseRPYarray()
    puntorpy=PoseRPY()
    puntorpy.position.x= punto1.position.x
    puntorpy.position.y=punto1.position.y
    puntorpy.position.z = Z_OFF
    puntorpy.rpy.roll = 0
    puntorpy.rpy.pitch = 1.57
    puntorpy.rpy.yaw = p[6*N+3]

    lista.poses.append(copy.deepcopy(puntorpy))
    #enviar arriba del objeto
    set_gripper_position(0)
    puntorpy.position.z= punto1.position.z
    lista.poses.append(copy.deepcopy(puntorpy))

    #enviar de regreso a la pos predefinida arriba del objeto
    if mov ==  3:
        puntorpy.position.x= puntorpy.position.x-0.1
    elif mov == 4:
        puntorpy.position.x= puntorpy.position.x+0.1
    elif mov == 1:
        puntorpy.position.x= puntorpy.position.y-0.1
    elif mov == 2:
        puntorpy.position.x= puntorpy.position.y+0.1


    lista.poses.append(copy.deepcopy(puntorpy))
    puntorpy.position.z= 0.35

    # puntorpy.rpy.pitch=0
    lista.poses.append(copy.deepcopy(puntorpy))

    #enviar a la posicion pred al frente del estante
    print lista
    pub = rospy.Publisher('robot_commander/cmd_path', PoseRPYarray, queue_size=10)
    rate = rospy.Rate(1) # 10hz
    rate.sleep()
    pub.publish(lista)

    #msg=rospy.wait_for_message('/robot_commander/path_done',std_msgs.msg.String)

    pubJo.publish(home)

    msg=rospy.wait_for_message('/robot_commander/joint_done',std_msgs.msg.String)

    print('finalizo mov')
    #joint_commander.move_to_jointspose(home)

    flag=True

    return flag
コード例 #7
0
               rospy.sleep(1)
               p = list(msg.data)
               punto =to_pose(p[6*N+0],p[6*N+1],p[6*N+2],0,0,0,1)
               #invertidos porque si
               #punto.position.x=y
               #punto.position.y=x
               #punto.position.z=-z
               #print punto
               #print punto
               #x =0.5
               #y=-0.1
               #z=0.6
               w = 1
               punto1 = geometry_msgs.msg.Pose()
               punto2 = geometry_msgs.msg.Pose()
               punto1 = convert_pose(punto,"cam","world")
               print('punto1', punto1)

               print "este es el punto"
               print(p)
               if p[5]!=6:
                   if p[6*N+5]==0.0:
                       flag = execute_grasp(cont, N, p)
                       N=N+1
                       pubJo.publish(home)
                   elif p[6*N+5] != 0.0:
                       push_object(cont, N, p)
               elif p[5]==6:
                    print('No hay objetos u hipotesis de grasping')

           #joint_commander.move_to_jointspose(home)
コード例 #8
0
def command_callback(msg):
    global SERVO
    global CURR_Z, MIN_Z
    global CURR_DEPTH
    global pose_averager
    global GOAL_Z
    global GRIP_WIDTH_MM
    global VELO_COV

    CURR_DEPTH = msg.data[5]

    if SERVO:

        d = list(msg.data)

        # PBVS Method.

        if d[2] > 0.150:  # Min effective range of the realsense.

            # Convert width in pixels to mm.
            # 0.07 is distance from end effector (CURR_Z) to camera.
            # 0.1 is approx degrees per pixel for the realsense.
            if d[2] > 0.25:
                GRIP_WIDTH_PX = msg.data[4]
                GRIP_WIDTH_MM = 2 * ((CURR_Z + 0.07)) * np.tan(
                    0.1 * GRIP_WIDTH_PX / 2.0 / 180.0 * np.pi) * 1000

            # Construct the Pose in the frame of the camera.
            gp = geometry_msgs.msg.Pose()
            gp.position.x = d[0]
            gp.position.y = d[1]
            gp.position.z = d[2]
            q = tft.quaternion_from_euler(0, 0, -1 * d[3])
            gp.orientation.x = q[0]
            gp.orientation.y = q[1]
            gp.orientation.z = q[2]
            gp.orientation.w = q[3]

            # Calculate Pose of Grasp in Robot Base Link Frame
            # Average over a few predicted poses to help combat noise.
            gp_base = convert_pose(gp, 'camera_depth_frame',
                                   'j2n6s300_link_base')
            gpbo = gp_base.orientation
            e = tft.euler_from_quaternion([gpbo.x, gpbo.y, gpbo.z, gpbo.w])
            # Only really care about rotation about z (e[2]).
            av = pose_averager.update(
                np.array([
                    gp_base.position.x, gp_base.position.y, gp_base.position.z,
                    e[2]
                ]))

        else:
            gp_base = geometry_msgs.msg.Pose()
            av = pose_averager.evaluate()

        # Average pose in base frame.
        gp_base.position.x = av[0]
        gp_base.position.y = av[1]
        gp_base.position.z = av[2]
        GOAL_Z = av[2]
        ang = av[3] - np.pi / 2  # We don't want to align, we want to grip.
        q = tft.quaternion_from_euler(np.pi, 0, ang)
        gp_base.orientation.x = q[0]
        gp_base.orientation.y = q[1]
        gp_base.orientation.z = q[2]
        gp_base.orientation.w = q[3]

        # Get the Position of the End Effector in the frame fo the Robot base Link
        g_pose = geometry_msgs.msg.Pose()
        g_pose.position.z = 0.03  # Offset from the end_effector frame to the actual position of the fingers.
        g_pose.orientation.w = 1
        p_gripper = convert_pose(g_pose, 'j2n6s300_end_effector',
                                 'j2n6s300_link_base')

        publish_pose_as_transform(gp_base, 'j2n6s300_link_base', 'G', 0.0)

        # Calculate Position Error.
        dx = (gp_base.position.x - p_gripper.position.x)
        dy = (gp_base.position.y - p_gripper.position.y)
        dz = (gp_base.position.z - p_gripper.position.z)

        # Orientation velocity control is done in the frame of the gripper,
        #  so figure out the rotation offset in the end effector frame.
        gp_gripper = convert_pose(gp_base, 'j2n6s300_link_base',
                                  'j2n6s300_end_effector')
        pgo = gp_gripper.orientation
        q1 = [pgo.x, pgo.y, pgo.z, pgo.w]
        e = tft.euler_from_quaternion(q1)
        dr = 1 * e[0]
        dp = 1 * e[1]
        dyaw = 1 * e[2]

        vx = max(min(dx * 2.5, MAX_VELO_X), -1.0 * MAX_VELO_X)
        vy = max(min(dy * 2.5, MAX_VELO_Y), -1.0 * MAX_VELO_Y)
        vz = max(min(dz - 0.04, MAX_VELO_Z), -1.0 * MAX_VELO_Z)

        # Apply a nonlinearity to the velocity
        v = np.array([vx, vy, vz])
        vc = np.dot(v, VELO_COV)

        CURRENT_VELOCITY[0] = vc[0]
        CURRENT_VELOCITY[1] = vc[1]
        CURRENT_VELOCITY[2] = vc[2]

        CURRENT_VELOCITY[3] = -1 * dp
        CURRENT_VELOCITY[4] = 1 * dr
        CURRENT_VELOCITY[5] = max(min(1 * dyaw, MAX_ROTATION),
                                  -1 * MAX_ROTATION)
        print(CURRENT_VELOCITY)
コード例 #9
0
def execute_grasp():
    # Execute a grasp.
    global MOVING
    global CURR_Z
    global start_force_srv
    global stop_force_srv

    # Get the positions.
    msg = rospy.wait_for_message('/ggcnn/out/command',
                                 std_msgs.msg.Float32MultiArray)
    d = list(msg.data)

    # Calculate the gripper width.
    grip_width = d[4]
    # Convert width in pixels to mm.
    # 0.07 is distance from end effector (CURR_Z) to camera.
    # 0.1 is approx degrees per pixel for the realsense.
    #g_width = 2 * ((CURR_Z + 0.07)) * np.tan(0.1 * grip_width / 2.0 / 180.0 * np.pi) * 1000
    # Convert into motor positions.
    #g = min((1 - (min(g_width, 70)/70)) * (6800-4000) + 4000, 5500)

    #set_finger_positions([g, g])

    rospy.sleep(0.5)

    # Pose of the grasp (position only) in the camera frame.
    gp = geometry_msgs.msg.Pose()
    gp.position.x = d[0]
    gp.position.y = d[1]
    gp.position.z = d[2]
    gp.orientation.w = 1

    # Convert to base frame, add the angle in (ensures planar grasp, camera isn't guaranteed to be perpendicular).
    gp_base = convert_pose(gp, 'camera_depth_optical_frame',
                           'm1n6s200_link_base')

    q = tft.quaternion_from_euler(np.pi, 0, d[3])
    gp_base.orientation.x = q[0]
    gp_base.orientation.y = q[1]
    gp_base.orientation.z = q[2]
    gp_base.orientation.w = q[3]

    publish_pose_as_transform(gp_base, 'm1n6s200_link_base', 'G', 0.5)

    # Offset for initial pose.
    initial_offset = 0.20
    gp_base.position.z += initial_offset

    # Disable force control, makes the robot more accurate.
    stop_force_srv.call(kinova_msgs.srv.StopRequest())

    move_to_pose(gp_base)
    rospy.sleep(0.1)

    # Start force control, helps prevent bad collisions.
    start_force_srv.call(kinova_msgs.srv.StartRequest())

    rospy.sleep(0.25)

    # Reset the position
    gp_base.position.z -= initial_offset

    # Flag to check for collisions.
    MOVING = True

    # Generate a nonlinearity for the controller.
    cart_cov = generate_cartesian_covariance(0)

    # Move straight down under velocity control.
    velo_pub = rospy.Publisher('/m1n6s200_driver/in/cartesian_velocity',
                               kinova_msgs.msg.PoseVelocity,
                               queue_size=1)
    while MOVING and CURR_Z - 0.02 > gp_base.position.z:
        dz = gp_base.position.z - CURR_Z - 0.03  # Offset by a few cm for the fingertips.
        MAX_VELO_Z = 0.08
        dz = max(min(dz, MAX_VELO_Z), -1.0 * MAX_VELO_Z)

        v = np.array([0, 0, dz])
        vc = list(np.dot(v, cart_cov)) + [0, 0, 0]
        velo_pub.publish(kinova_msgs.msg.PoseVelocity(*vc))
        rospy.sleep(1 / 100.0)

    MOVING = False

    # close the fingers.
    rospy.sleep(0.1)
    set_finger_positions([8000, 8000])
    rospy.sleep(0.5)

    # Move back up to initial position.
    gp_base.position.z += initial_offset
    gp_base.orientation.x = 1
    gp_base.orientation.y = 0
    gp_base.orientation.z = 0
    gp_base.orientation.w = 0
    move_to_pose(gp_base)

    stop_force_srv.call(kinova_msgs.srv.StopRequest())

    return
    def execute(self, userdata):

        rospy.loginfo('Finding best segment.')
        segments = []
        ignored_segments = []
        move_mode_seen_wanted_items = []
        for i in range(len(userdata.data['identified_objects']['labels'])):

            l = userdata.data['identified_objects']['labels'][i]
            if l == 'tote':
                continue

            if userdata.data['task'] == 'stow':
                # If we're nearing the end of the stow task, we might be checking
                #  for recalssified items, so ignore them until they're bumped out of the list.
                reclass_item = False
                for _, to_item in userdata.data['weight_reclassifications']:
                    if l == to_item:
                        reclass_item = True
                        break
                if reclass_item:
                    rospy.logerr("Ignoring %s as reclassified item" % l)
                    continue

            if userdata.data.get('move_objects_mode', False):
                # Don't filter wanted items.
                if l in userdata.data['moved_objects']:
                    continue
            elif userdata.data[
                    'wanted_items'] is not None and l not in userdata.data[
                        'wanted_items']:
                continue

            s = userdata.data['identified_objects']['point_clouds'][i]
            ad = userdata.data['identified_objects']['aligned_dimensions'][i]
            if (ad.width.data < 0.01 and
                    ad.height.data < 0.01) and self.failed_last_time == False:
                rospy.logwarn(
                    'Ignoring %s because the point cloud is too small' % l)
                continue

            if userdata.data['identified_objects']['segment_certainties'][
                    i] < 6 and self.failed_last_time == False:
                rospy.logwarn(
                    'Ignoring %s because the certainty isn\'t high enough' % l)
                continue

            # Round to the nearest xx cm
            round_cm = 3.0
            height_sort = round(ad.average_z.data *
                                (100.0 / round_cm)) / (100.0 / round_cm)

            if userdata.data.get('move_objects_mode', False):
                # sort by size (negative one to sort smallest to largest.)
                item_weight = ad.width.data * ad.height.data

                # Weight it higher if it intersects an object that we wanted in older views.
                found_intersection = False
                for view in userdata.data['previous_views']:
                    if l in view['labels']:
                        rospy.logerr('%s is in previsous view' % l)
                        # Figure out if the view contained any objects that we're trying to find.
                        seen_wanted_items = [
                            label for label in view['labels']
                            if label in userdata.data['wanted_items']
                        ]
                        if not seen_wanted_items:
                            rospy.logerr("No wanted items in that view though")
                            continue
                        rospy.logerr(
                            "There were also wanted items in that view.")
                        item_mask = (view['masked_classifications'] == (
                            view['labels'].index(l) + 1)).astype(np.uint8)
                        for wanted_item_label in seen_wanted_items:
                            wanted_item_id = view['labels'].index(
                                wanted_item_label) + 1
                            wanted_item_mask = (view['masked_classifications']
                                                == wanted_item_id).astype(
                                                    np.uint8)
                            wanted_item_mask = ndimage.morphology.binary_dilation(
                                wanted_item_mask)
                            intersection = item_mask * wanted_item_mask
                            if intersection.max() > 0:
                                rospy.logerr("Found intersection with %s" %
                                             wanted_item_label)
                                found_intersection = True
                                item_weight += 10
                                break

                        if found_intersection:
                            break

                segments.append((-1 * item_weight, s, l, ad))
            else:
                # Sort by height in the tote and certainty of the classification.
                segments.append(
                    ((height_sort, 255 - userdata.data['identified_objects']
                      ['segment_certainties'][i]), s, l, ad))

        if len(segments) == 0:
            rospy.loginfo("No Segments")
            if userdata.data['move_objects_mode'] == True:
                userdata.data['moved_objects'] = []
            if userdata.data['task'] == 'pick':
                # We use multiple viewpoints for the pick task.
                userdata.data['failed_views'][
                    userdata.data['chosen_bin']].append(
                        userdata.data['camera_location'])
            else:
                self.failed_last_time = True
            return 'failed'

        # Sort by size, biggest first.
        segments.sort()

        grasp_poses = []
        grasp_point_type = ''

        last_failed = userdata.data['last_failed']
        ignored_segments = 1
        pick_task_break = False
        # If we don't have grasp poses, but have ignored segments,
        # Then repeat the process, because eventually one of the ignored ones will become active again.

        IGNORED_ITEMS_RESET = 3
        if userdata.data['task'] == 'stow':
            IGNORED_ITEMS_RESET = 4

        while (not grasp_poses
               and ignored_segments > 0) and not pick_task_break:

            if userdata.data['task'] == 'pick':
                # Stop being in a loop of trying the same object, ingoring and and not moving.
                pick_task_break = True

            rospy.logerr(last_failed)
            ignored_segments = 0
            to_remove = []
            for l in last_failed:
                last_failed[l] += 1
                if last_failed[l] > IGNORED_ITEMS_RESET:
                    to_remove.append(l)
            for l in to_remove:
                del last_failed[l]

            for s in segments:
                grasp_poses = []
                item_label = s[2]
                grasp_point_type = item_meta[item_label]['grasp_point_type']

                if item_label == 'mesh_cup' and len(segments) > 1:
                    continue

                if item_label in userdata.data['last_failed']:
                    # Ignore it for now.
                    ignored_segments += 1
                    continue

                rospy.loginfo(
                    "\033[92mI WANT TO PICK ITEM: %s, GRASP POINT TYPE: %s\033[0m"
                    % (item_label, grasp_point_type))

                normal_succeeded = False
                centroid_succeeded = False
                rgb_succeeded = False

                # Try and get fancy grasp poses.
                if grasp_point_type == 'normal':
                    try:
                        gc_res = self.grasp_candidate_service.call(
                            s[1], 0.01, False)  # False = Don't use centroid.
                        normal_poses = gc_res.grasp_candidates.grasp_poses.poses
                        self.pc_pub.publish(s[1])
                        if len(normal_poses) > 0:
                            normal_poses = rank_grasp_poses(
                                normal_poses,
                                gc_res.grasp_candidates.grasp_utilities)
                            grasp_poses.extend(normal_poses[:2])
                            normal_succeeded = True
                        else:
                            rospy.logerr(
                                'Failed to find any normal points for object.')
                    except rospy.ServiceException:
                        rospy.logerr(
                            'Failed to find any normal grasp points for object.'
                        )

                # Get the centroid grasp pose.
                if grasp_point_type in ['normal', 'centroid']:
                    try:
                        gc_res = self.grasp_candidate_service.call(
                            s[1], 0.01, True)  # True = Use Centroid.
                        centroid_poses = gc_res.grasp_candidates.grasp_poses.poses
                        if len(centroid_poses) > 0:
                            cp = centroid_poses[0]
                            # Make it straight up and down.
                            cp.orientation.x = 0
                            cp.orientation.y = 0
                            cp.orientation.z = 0
                            cp.orientation.w = 1
                            grasp_poses.append(cp)
                            centroid_succeeded = True
                        else:
                            rospy.logerr(
                                'Failed to find any centroid points for object.'
                            )
                    except IndexError:
                        rospy.logerr(
                            'Failed to find any centroid points for object.')

                if grasp_point_type == 'rgb_centroid' or \
                   (grasp_point_type in ['normal', 'centroid'] and centroid_succeeded == False):
                    try:
                        # Calculate the real world location of the RGB centroid.
                        index = userdata.data['identified_objects'][
                            'labels'].index(item_label)
                        class_mask = np.equal(
                            index + 1,
                            self.cb.imgmsg_to_cv2(
                                userdata.data['identified_objects']
                                ['masked_classifications'],
                                'mono8')).astype(np.uint8)
                        com = ndimage.measurements.center_of_mass(class_mask)
                        img_y = Int32(com[0])
                        img_x = Int32(com[1])
                        res = self.image_to_world_service.call(
                            userdata.data['point_cloud'], img_x, img_y)
                        p = Pose()
                        p.position.x = res.world_x.data
                        p.position.y = res.world_y.data
                        p.position.z = 1.2  # big number, will get overridden to the storage bottom.
                        p.orientation.w = 1
                        grasp_poses.append(p)
                        rgb_succeeded = True

                    except IndexError:
                        rospy.logerr(
                            'Failed to calculate RGB Centroid grasp point for object'
                        )

                if normal_succeeded == False and centroid_succeeded == False and rgb_succeeded == False:
                    continue

                break

        # Make sure we actually got something.
        if not grasp_poses:
            rospy.logerr('Didn\'t find grasp points for any objects')
            if userdata.data['move_objects_mode'] == True:
                userdata.data['moved_objects'] = []
            if userdata.data['task'] == 'pick':
                userdata.data['failed_views'][
                    userdata.data['chosen_bin']].append(
                        userdata.data['camera_location'])
            else:
                self.failed_last_time = True
            return 'failed'

        self.failed_last_time = False

        # Convert the grasp poses to the global reference frame
        converted_poses = []
        for p in grasp_poses:
            converted_poses.append(
                t.convert_pose(p, self.camera_reference_frame,
                               'global_xyz_link'))

        # Get the PCA Rotation of the RGB segment because its more reliable than the point cloud.
        index = userdata.data['identified_objects']['labels'].index(item_label)
        class_mask = np.equal(
            index + 1,
            self.cb.imgmsg_to_cv2(
                userdata.data['identified_objects']['masked_classifications'],
                'mono8')).astype(np.uint8)
        y, x = np.nonzero(class_mask.squeeze())
        x = x - np.mean(x)
        y = y - np.mean(y)
        coords = np.vstack([x, y])
        cov = np.cov(coords)
        evals, evecs = np.linalg.eig(cov)
        sort_indices = np.argsort(evals)[::-1]
        evec1, evec2 = evecs[:, sort_indices]
        eig_x, eig_y = evec2
        angle = np.arctan2(eig_y, eig_x)
        if abs(eig_y) > abs(eig_x):
            angle *= -1.0
        if angle > 1.57:
            angle -= 3.14
        if angle < -1.57:
            angle += 3.14
        q = tft.quaternion_from_euler(0, 0, angle)
        pca_rotation = Quaternion()
        pca_rotation.x = q[0]
        pca_rotation.y = q[1]
        pca_rotation.z = q[2]
        pca_rotation.w = q[3]

        userdata.data['item_to_pick'] = {
            'label': item_label,
            'grasp_poses': converted_poses,
            'pointcloud': s[1],
            'grasp_point_type': grasp_point_type,
            'pca_yaw': pca_rotation,
            'aligned_dimensions': s[3]
        }

        if (userdata.data['move_objects_mode']
                or userdata.data['move_objects_between_bins_mode'] is not None
            ) and userdata.data[
                'wanted_items'] is not None and item_label in userdata.data[
                    'wanted_items']:
            # Just in case we somehow now pick something we want.
            userdata.data['move_objects_mode'] = False
            userdata.data['move_objects_between_bins_mode'] = None
        else:
            userdata.data['moved_objects'].append(item_label)

        rospy.loginfo('Generated %s grasp poses for object.' %
                      len(grasp_poses))

        # Publish the point cloud segment for debugging purposes.
        self.pc_pub.publish(s[1])

        # Select sucking or grasping.
        grasp_type = item_meta[item_label]['grasp_type']
        if grasp_type.startswith('grip'):
            return 'succeeded_gripper'
        elif grasp_type.startswith('suck'):
            return 'succeeded_sucker'
        else:
            rospy.logerr("UNKNOWN GRASP TYPE %s for %s" %
                         (grasp_type, item_label))
            return 'succeeded_sucker'

        return 'failed'
コード例 #11
0
def execute_grasp():
    # 执行抓取。
    global MOVING
    global CURR_Z
    global start_force_srv
    global stop_force_srv

    # 得到位置。
    msg = rospy.wait_for_message('/ggcnn/out/command',
                                 std_msgs.msg.Float32MultiArray)
    d = list(msg.data)

    # 计算抓爪宽度。
    grip_width = d[4]
    # 将以像素为单位的宽度转换为毫米。
    # 0.07是从末端执行器(CURR_Z)到摄像机的距离。
    #对于实感,每个像素约0.1度。
    g_width = 2 * ((CURR_Z + 0.07)) * np.tan(
        0.1 * grip_width / 2.0 / 180.0 * np.pi) * 1000
    # 转换为电机位置。
    g = min((1 - (min(g_width, 70) / 70)) * (6800 - 4000) + 4000, 5500)
    set_finger_positions([g, g])

    rospy.sleep(0.5)

    # 握把的姿势(仅位置)在相机框架中。
    gp = geometry_msgs.msg.Pose()
    gp.position.x = d[0]
    gp.position.y = d[1]
    gp.position.z = d[2]
    gp.orientation.w = 1

    # 转换为基本框架,并添加角度(以确保平面抓地力,不保证相机垂直)。
    gp_base = convert_pose(gp, 'camera_depth_optical_frame',
                           'm1n6s200_link_base')

    q = tft.quaternion_from_euler(np.pi, 0, d[3])
    gp_base.orientation.x = q[0]
    gp_base.orientation.y = q[1]
    gp_base.orientation.z = q[2]
    gp_base.orientation.w = q[3]

    publish_pose_as_transform(gp_base, 'm1n6s200_link_base', 'G', 0.5)

    # 初始姿势的偏移量。
    initial_offset = 0.20
    gp_base.position.z += initial_offset

    # 禁用力控制,使机器人更加精确。
    stop_force_srv.call(kinova_msgs.srv.StopRequest())

    move_to_pose(gp_base)
    rospy.sleep(0.1)

    # 启动力控制,有助于防止不良碰撞。
    start_force_srv.call(kinova_msgs.srv.StartRequest())

    rospy.sleep(0.25)

    # 重设位置
    gp_base.position.z -= initial_offset

    # 标记以检查碰撞。
    MOVING = True

    # 为控制器生成非线性。
    cart_cov = generate_cartesian_covariance(0)

    # 在速度控制下直线向下移动。
    velo_pub = rospy.Publisher('/m1n6s200_driver/in/cartesian_velocity',
                               kinova_msgs.msg.PoseVelocity,
                               queue_size=1)
    while MOVING and CURR_Z - 0.02 > gp_base.position.z:
        dz = gp_base.position.z - CURR_Z - 0.03  # Offset by a few cm for the fingertips.
        MAX_VELO_Z = 0.08
        dz = max(min(dz, MAX_VELO_Z), -1.0 * MAX_VELO_Z)

        v = np.array([0, 0, dz])
        vc = list(np.dot(v, cart_cov)) + [0, 0, 0]
        velo_pub.publish(kinova_msgs.msg.PoseVelocity(*vc))
        rospy.sleep(1 / 100.0)

    MOVING = False

    # 闭合手指。
    rospy.sleep(0.1)
    set_finger_positions([8000, 8000])
    rospy.sleep(0.5)

    # 移回初始位置。
    gp_base.position.z += initial_offset
    gp_base.orientation.x = 1
    gp_base.orientation.y = 0
    gp_base.orientation.z = 0
    gp_base.orientation.w = 0
    move_to_pose(gp_base)

    stop_force_srv.call(kinova_msgs.srv.StopRequest())

    return
コード例 #12
0
def open_servo(av):
    global SERVO
    global CURR_Z, MIN_Z
    global CURR_DEPTH
    global pose_averager
    global GOAL_Z
    global GRIP_WIDTH_MM
    global VELO_COV
    #CURR_DEPTH = msg.data[5]

    if SERVO:
        # Average pick pose in base frame.
        gp_base = geometry_msgs.msg.Pose()
        gp_base.position.x = av[0]
        gp_base.position.y = av[1]
        gp_base.position.z = av[2]
        gp_base.orientation.x = av[3]
        gp_base.orientation.y = av[4]
        gp_base.orientation.z = av[5]
        gp_base.orientation.w = av[6]
        # publish_pose_as_transform(gp_base, 'j2n6s300_link_base', 'G4', 1.0)
        # Get the Position of the End Effector in the frame of the Robot base Link
        g_pose = geometry_msgs.msg.Pose()
        g_pose.position.z = 0.05  # Offset from the end_effector frame to the actual position of the fingers.
        g_pose.orientation.w = 1
        p_gripper = geometry_msgs.msg.Pose()
        try:
            p_gripper = convert_pose(g_pose, 'j2n6s300_end_effector',
                                     'j2n6s300_link_base')
            end_effector_list = [
                p_gripper.orientation.x, p_gripper.orientation.y,
                p_gripper.orientation.z, p_gripper.orientation.w
            ]
            # publish_pose_as_transform(p_gripper, 'j2n6s300_link_base', 'G', 1)
        except Exception as ex:
            template = "An exception of type {0} occurred. Arguments:{1!r}"
            message = template.format(type(ex).__name__, ex.args)
            print(message)
            return

        # Calculate Position Error. pick pose - finger pose in base_frame
        dx = (gp_base.position.x - p_gripper.position.x)  #forward
        dy = (gp_base.position.y - p_gripper.position.y)  #left
        dz = (gp_base.position.z - p_gripper.position.z)  #up

        # Orientation velocity control is done in the frame of the gripper,
        #  so figure out the rotation offset in the end effector frame.
        end_effector_inverse = tft.quaternion_inverse(end_effector_list)
        pgo = tft.quaternion_multiply(av[3:], end_effector_inverse)

        q1 = [pgo[0], pgo[1], pgo[2], pgo[3]]
        e = tft.euler_from_quaternion(q1)

        if (np.sqrt(dx * dx + dy * dy + dz * dz) < 0.001):
            vx = 0
            vy = 0
            vz = 0
            dm = max(max(abs(e[0]), abs(e[1])), abs(e[2]))
            if dm > 0.001:
                dr = e[0] / dm * MAX_VELO
                dp = e[1] / dm * MAX_VELO
                dyaw = e[2] / dm * MAX_VELO
            else:
                return
        else:
            dm = max(max(abs(dx), abs(dy)), abs(dz))
            vx = dx / dm * MAX_VELO
            vy = dy / dm * MAX_VELO
            vz = dz / dm * MAX_VELO
            t = dm / MAX_VELO

            dr = 1 * e[0] / t
            dp = 1 * e[1] / t
            dyaw = 1 * e[2] / t
        # Apply a nonlinearity to the velocity
        v = np.array([vx, vy, vz])
        vc = np.dot(v, VELO_COV)

        CURRENT_VELOCITY[0] = vc[0]  # forward
        CURRENT_VELOCITY[1] = vc[1]  # left
        CURRENT_VELOCITY[2] = vc[2]  # up

        CURRENT_VELOCITY[
            3] = 1 * dr  #x: end effector self rotate,positive is shun
        CURRENT_VELOCITY[4] = 1 * dp  #y: up and down rotate (positive is down)
        CURRENT_VELOCITY[
            5] = 1 * dyaw  #z: left and right rotate,positive is left