Exemplo n.º 1
0
    def execute(self):
        # Get the bottle pose from the camera.
        rospy.loginfo('Moveing to view')
        m.move_to_named_pose('whole_arm', 'view_checkerboard')

        rospy.sleep(4)

        poses = []
        qt = tf.transformations.quaternion_from_euler(0.0, 0.0,
                                                      -3.141592653 / 2)

        for i in range(16):
            rospy.loginfo('GET OBJECT POSE')
            b_req = cartesian_calibration.srv.GetACoorRequest()
            b_req.data_set = "measured"
            b_req.object_idx = i
            b_res = self.pose_service.call(b_req)
            p = b_res.object_pose.pose
            # p.orientation.x = qt[0]
            # p.orientation.y = qt[1]
            # p.orientation.z = qt[2]
            # p.orientation.w = qt[3]

            object_global = t.convert_pose(p, self.camera_reference_frame,
                                           'global_xyz_link')
            # object_global.position.z = min(0.80,p.position.z)
            poses.append(object_global)

        m.move_to_named_pose('realsense', 'tool_change_position')
        m.move_to_named_pose('wrist_only', 'sucker')

        for p in poses:

            if p.position.x == 0 and p.position.y == 0 and p.position.z == 0:
                rospy.logerr('Failed to find an object. %s' % i)
                # return 'failed'
                continue

            print("MOVE SUCKER TO ABOVE OBJECT")
            p = t.align_pose_orientation_to_frame(p, 'global_xyz_link',
                                                  'sucker_endpoint')

            t.publish_pose_as_transform(p, 'global_xyz_link', 'MOVE HERE', 0.5)
            res = m.move_to_global(p.position.x - 0.02,
                                   p.position.y + 0.01,
                                   p.position.z - 0.02,
                                   'sucker',
                                   orientation=p.orientation)

            rospy.sleep(4)
def command_callback(target_pose, target_quaternion):
    global SERVO
    global CURR_Z, MIN_Z
    global CURR_DEPTH
    global pose_averager
    global GOAL_Z
    global GRIP_WIDTH_MM
    global VELO_COV

    #CURR_DEPTH = msg.data[5]

    if SERVO:

        # d = list(msg.data)

        # PBVS Method.

        #if d[2] > 0.150:  # Min effective range of the realsense.

        # Convert width in pixels to mm.
        # 0.07 is distance from end effector (CURR_Z) to camera.
        # 0.1 is approx degrees per pixel for the realsense.
        # if d[2] > 0.25:
        #     GRIP_WIDTH_PX = msg.data[4]
        #     GRIP_WIDTH_MM = 2 * ((CURR_Z + 0.07)) * np.tan(0.1 * GRIP_WIDTH_PX / 2.0 / 180.0 * np.pi) * 1000

        # Construct the Pose in the frame of the camera.
        # gp = geometry_msgs.msg.Pose()
        # gp.position.x = d[0]
        # gp.position.y = d[1]
        # gp.position.z = d[2]
        # q = tft.quaternion_from_euler(0, 0, -1 * d[3])
        # gp.orientation.x = q[0]
        # gp.orientation.y = q[1]
        # gp.orientation.z = q[2]
        # gp.orientation.w = q[3]

        # Calculate Pose of Grasp in Robot Base Link Frame
        # Average over a few predicted poses to help combat noise.
        # x: 0.645162225459
        # y: -0.318927784776
        # z: 0.694286052858
        # w: -0.00420092196819

        # x: 0.81874143089
        # y: -0.506091943919
        # z: 0.374129838746

        # gp_base = convert_pose(gp, 'camera_link', 'j2n6s300_link_base')
        # print(gp_base.orientation)
        # print(gp_base.position)
        #gpbo = gp_base.orientation
        e = tft.euler_from_quaternion(target_quaternion)
        # Only really care about rotation about x (e[0]). update is mean function
        av = pose_averager.update(np.array(target_pose + [e[0], e[1], e[2]]))

        # else:
        #     gp_base = geometry_msgs.msg.Pose()
        #     av = pose_averager.evaluate()

        # compute end_effector to j2n6s300_link_base's orientation and euler
        g_pose1 = geometry_msgs.msg.Pose()
        g_pose1.orientation.w = 1
        try:
            end_effector = convert_pose(g_pose1, 'j2n6s300_end_effector',
                                        'j2n6s300_link_base')
            end_effector_list = [
                end_effector.orientation.x, end_effector.orientation.y,
                end_effector.orientation.z, end_effector.orientation.w
            ]
        except Exception as ex:
            template = "An exception of type {0} occurred. Arguments:{1!r}"
            message = template.format(type(ex).__name__, ex.args)
            print(message)
            return
        e1 = tft.euler_from_quaternion(end_effector_list)

        # Average pick pose in base frame.
        gp_base = geometry_msgs.msg.Pose()
        gp_base.position.x = av[0]
        gp_base.position.y = av[1]
        gp_base.position.z = av[2]
        GOAL_Z = av[2]
        ang = av[3] - np.pi / 2  # We don't want to align, we want to grip.
        q = tft.quaternion_from_euler(
            np.pi, 0, np.pi / 2
        )  # 绕j2n6s300_base_link's fixed x y z轴转动到正的位置;  np.pi, 0, np.pi/2 to left; np.pi/2, 0, np.pi/2 to forward;np.pi, np.pi/2, np.pi/2 to down
        q1 = tft.quaternion_from_euler(np.pi / 2, 0, 0)  #将手指竖起来
        q = tft.quaternion_multiply(q1, q)
        gp_base.orientation.x = q[0]
        gp_base.orientation.y = q[1]
        gp_base.orientation.z = q[2]
        gp_base.orientation.w = q[3]

        # Get the Position of the End Effector in the frame of the Robot base Link
        g_pose = geometry_msgs.msg.Pose()
        g_pose.position.z = 0.03  # Offset from the end_effector frame to the actual position of the fingers.
        g_pose.orientation.w = 1
        p_gripper = geometry_msgs.msg.Pose()
        try:
            p_gripper = convert_pose(g_pose, 'j2n6s300_end_effector',
                                     'j2n6s300_link_base')
        except Exception as ex:
            template = "An exception of type {0} occurred. Arguments:{1!r}"
            message = template.format(type(ex).__name__, ex.args)
            print(message)
            return
        publish_pose_as_transform(gp_base, 'j2n6s300_link_base', 'G', 0.0)

        # Calculate Position Error. pick pose - finger pose in base_frame
        dx = (gp_base.position.x - p_gripper.position.x)
        dy = (gp_base.position.y - p_gripper.position.y)
        dz = (gp_base.position.z - p_gripper.position.z)

        # Orientation velocity control is done in the frame of the gripper,
        #  so figure out the rotation offset in the end effector frame.
        end_effector_inverse = tft.quaternion_inverse(end_effector_list)
        pgo = tft.quaternion_multiply(q, end_effector_inverse)

        q1 = [pgo[0], pgo[1], pgo[2], pgo[3]]
        # q1 = [pgo.x, pgo.y, pgo.z, pgo.w]
        e = tft.euler_from_quaternion(q1)
        dr = 1 * e[0]
        dp = 1 * e[1]
        dyaw = 1 * e[2]

        vx = max(min(dx * 2.5, MAX_VELO_X), -1.0 * MAX_VELO_X)
        vy = max(min(dy * 2.5, MAX_VELO_Y), -1.0 * MAX_VELO_Y)
        vz = max(min(dz - 0.04, MAX_VELO_Z), -1.0 * MAX_VELO_Z)

        # Apply a nonlinearity to the velocity
        v = np.array([vx, vy, vz])
        vc = np.dot(v, VELO_COV)

        CURRENT_VELOCITY[0] = vc[0]
        CURRENT_VELOCITY[1] = vc[1]
        CURRENT_VELOCITY[2] = vc[2]

        CURRENT_VELOCITY[3] = 1 * dr  #x: end effector self rotate
        CURRENT_VELOCITY[4] = 1 * dp  #y: up and down rotate
        CURRENT_VELOCITY[
            5] = 1 * dyaw  #max(min(1 * dyaw, MAX_ROTATION), -1 * MAX_ROTATION) #z: left and right rotate
Exemplo n.º 3
0
def command_callback(msg):
    global SERVO
    global CURR_Z, MIN_Z
    global CURR_DEPTH
    global pose_averager
    global GOAL_Z
    global GRIP_WIDTH_MM
    global VELO_COV

    CURR_DEPTH = msg.data[5]

    if SERVO:

        d = list(msg.data)

        # PBVS Method.

        if d[2] > 0.150:  # Min effective range of the realsense.

            # Convert width in pixels to mm.
            # 0.07 is distance from end effector (CURR_Z) to camera.
            # 0.1 is approx degrees per pixel for the realsense.
            if d[2] > 0.25:
                GRIP_WIDTH_PX = msg.data[4]
                GRIP_WIDTH_MM = 2 * ((CURR_Z + 0.07)) * np.tan(
                    0.1 * GRIP_WIDTH_PX / 2.0 / 180.0 * np.pi) * 1000

            # Construct the Pose in the frame of the camera.
            gp = geometry_msgs.msg.Pose()
            gp.position.x = d[0]
            gp.position.y = d[1]
            gp.position.z = d[2]
            q = tft.quaternion_from_euler(0, 0, -1 * d[3])
            gp.orientation.x = q[0]
            gp.orientation.y = q[1]
            gp.orientation.z = q[2]
            gp.orientation.w = q[3]

            # Calculate Pose of Grasp in Robot Base Link Frame
            # Average over a few predicted poses to help combat noise.
            gp_base = convert_pose(gp, 'camera_depth_frame',
                                   'j2n6s300_link_base')
            gpbo = gp_base.orientation
            e = tft.euler_from_quaternion([gpbo.x, gpbo.y, gpbo.z, gpbo.w])
            # Only really care about rotation about z (e[2]).
            av = pose_averager.update(
                np.array([
                    gp_base.position.x, gp_base.position.y, gp_base.position.z,
                    e[2]
                ]))

        else:
            gp_base = geometry_msgs.msg.Pose()
            av = pose_averager.evaluate()

        # Average pose in base frame.
        gp_base.position.x = av[0]
        gp_base.position.y = av[1]
        gp_base.position.z = av[2]
        GOAL_Z = av[2]
        ang = av[3] - np.pi / 2  # We don't want to align, we want to grip.
        q = tft.quaternion_from_euler(np.pi, 0, ang)
        gp_base.orientation.x = q[0]
        gp_base.orientation.y = q[1]
        gp_base.orientation.z = q[2]
        gp_base.orientation.w = q[3]

        # Get the Position of the End Effector in the frame fo the Robot base Link
        g_pose = geometry_msgs.msg.Pose()
        g_pose.position.z = 0.03  # Offset from the end_effector frame to the actual position of the fingers.
        g_pose.orientation.w = 1
        p_gripper = convert_pose(g_pose, 'j2n6s300_end_effector',
                                 'j2n6s300_link_base')

        publish_pose_as_transform(gp_base, 'j2n6s300_link_base', 'G', 0.0)

        # Calculate Position Error.
        dx = (gp_base.position.x - p_gripper.position.x)
        dy = (gp_base.position.y - p_gripper.position.y)
        dz = (gp_base.position.z - p_gripper.position.z)

        # Orientation velocity control is done in the frame of the gripper,
        #  so figure out the rotation offset in the end effector frame.
        gp_gripper = convert_pose(gp_base, 'j2n6s300_link_base',
                                  'j2n6s300_end_effector')
        pgo = gp_gripper.orientation
        q1 = [pgo.x, pgo.y, pgo.z, pgo.w]
        e = tft.euler_from_quaternion(q1)
        dr = 1 * e[0]
        dp = 1 * e[1]
        dyaw = 1 * e[2]

        vx = max(min(dx * 2.5, MAX_VELO_X), -1.0 * MAX_VELO_X)
        vy = max(min(dy * 2.5, MAX_VELO_Y), -1.0 * MAX_VELO_Y)
        vz = max(min(dz - 0.04, MAX_VELO_Z), -1.0 * MAX_VELO_Z)

        # Apply a nonlinearity to the velocity
        v = np.array([vx, vy, vz])
        vc = np.dot(v, VELO_COV)

        CURRENT_VELOCITY[0] = vc[0]
        CURRENT_VELOCITY[1] = vc[1]
        CURRENT_VELOCITY[2] = vc[2]

        CURRENT_VELOCITY[3] = -1 * dp
        CURRENT_VELOCITY[4] = 1 * dr
        CURRENT_VELOCITY[5] = max(min(1 * dyaw, MAX_ROTATION),
                                  -1 * MAX_ROTATION)
        print(CURRENT_VELOCITY)
Exemplo n.º 4
0
def execute_grasp():
    # Execute a grasp.
    global MOVING
    global CURR_Z
    global start_force_srv
    global stop_force_srv

    # Get the positions.
    msg = rospy.wait_for_message('/ggcnn/out/command',
                                 std_msgs.msg.Float32MultiArray)
    d = list(msg.data)

    # Calculate the gripper width.
    grip_width = d[4]
    # Convert width in pixels to mm.
    # 0.07 is distance from end effector (CURR_Z) to camera.
    # 0.1 is approx degrees per pixel for the realsense.
    #g_width = 2 * ((CURR_Z + 0.07)) * np.tan(0.1 * grip_width / 2.0 / 180.0 * np.pi) * 1000
    # Convert into motor positions.
    #g = min((1 - (min(g_width, 70)/70)) * (6800-4000) + 4000, 5500)

    #set_finger_positions([g, g])

    rospy.sleep(0.5)

    # Pose of the grasp (position only) in the camera frame.
    gp = geometry_msgs.msg.Pose()
    gp.position.x = d[0]
    gp.position.y = d[1]
    gp.position.z = d[2]
    gp.orientation.w = 1

    # Convert to base frame, add the angle in (ensures planar grasp, camera isn't guaranteed to be perpendicular).
    gp_base = convert_pose(gp, 'camera_depth_optical_frame',
                           'm1n6s200_link_base')

    q = tft.quaternion_from_euler(np.pi, 0, d[3])
    gp_base.orientation.x = q[0]
    gp_base.orientation.y = q[1]
    gp_base.orientation.z = q[2]
    gp_base.orientation.w = q[3]

    publish_pose_as_transform(gp_base, 'm1n6s200_link_base', 'G', 0.5)

    # Offset for initial pose.
    initial_offset = 0.20
    gp_base.position.z += initial_offset

    # Disable force control, makes the robot more accurate.
    stop_force_srv.call(kinova_msgs.srv.StopRequest())

    move_to_pose(gp_base)
    rospy.sleep(0.1)

    # Start force control, helps prevent bad collisions.
    start_force_srv.call(kinova_msgs.srv.StartRequest())

    rospy.sleep(0.25)

    # Reset the position
    gp_base.position.z -= initial_offset

    # Flag to check for collisions.
    MOVING = True

    # Generate a nonlinearity for the controller.
    cart_cov = generate_cartesian_covariance(0)

    # Move straight down under velocity control.
    velo_pub = rospy.Publisher('/m1n6s200_driver/in/cartesian_velocity',
                               kinova_msgs.msg.PoseVelocity,
                               queue_size=1)
    while MOVING and CURR_Z - 0.02 > gp_base.position.z:
        dz = gp_base.position.z - CURR_Z - 0.03  # Offset by a few cm for the fingertips.
        MAX_VELO_Z = 0.08
        dz = max(min(dz, MAX_VELO_Z), -1.0 * MAX_VELO_Z)

        v = np.array([0, 0, dz])
        vc = list(np.dot(v, cart_cov)) + [0, 0, 0]
        velo_pub.publish(kinova_msgs.msg.PoseVelocity(*vc))
        rospy.sleep(1 / 100.0)

    MOVING = False

    # close the fingers.
    rospy.sleep(0.1)
    set_finger_positions([8000, 8000])
    rospy.sleep(0.5)

    # Move back up to initial position.
    gp_base.position.z += initial_offset
    gp_base.orientation.x = 1
    gp_base.orientation.y = 0
    gp_base.orientation.z = 0
    gp_base.orientation.w = 0
    move_to_pose(gp_base)

    stop_force_srv.call(kinova_msgs.srv.StopRequest())

    return
Exemplo n.º 5
0
    def execute(self, userdata):

        first_attamept = True
        label = userdata.data['item_to_pick']['label']
        suck_level = item_meta[label].get('suction_pressure', 3)
        set_suck_level(suck_level)

        USE_ANGLED = False

        position_limits = POSITION_LIMITS[
            userdata.data['picking_from']]['sucker']

        # Get the tote weight before the pick
        scales_topic = userdata.data['picking_from'] + '_scales/weight'
        if item_meta[userdata.data['item_to_pick']
                     ['label']]['grasp_type'] != 'grip_suck':
            # If we've just tried sucking, then don't re-weigh
            try:
                weight = rospy.wait_for_message(scales_topic, Int16, 2.0)
                userdata.data['weight_before'] = weight.data
            except:
                rospy.logerr('Unable to read weight from %s' % scales_topic)
                userdata.data['weight_before'] = -1
                if item_meta[userdata.data['item_to_pick']
                             ['label']]['grasp_point_type'] == 'rgb_centroid':
                    rospy.logerr(
                        "Aborting RGB Centroid pick because there aren't any scales."
                    )
                    return 'suck_failed'

        for object_global in userdata.data['item_to_pick']['grasp_poses']:

            USE_ANGLED = decide_angled(object_global, position_limits)

            object_global.position.x += 0.015
            object_global.position.y -= 0.03

            if USE_ANGLED:
                rospy.logerr('ANGLED PICK')

                # Make sure we're inside the tote.
                edge_offset = 0.0
                object_global.position.x = max(
                    object_global.position.x,
                    position_limits['x_min'] - edge_offset)
                object_global.position.x = min(
                    object_global.position.x,
                    position_limits['x_max'] + edge_offset)
                object_global.position.y = max(
                    object_global.position.y,
                    position_limits['y_min'] - edge_offset)
                object_global.position.y = min(
                    object_global.position.y,
                    position_limits['y_max'] + edge_offset)

                object_global.position.z -= 0.02  # The endpoint is half way through the suction cup, so move up slightly

                #t.publish_pose_as_transform(object_global, 'global_xyz_link', 'ORIGINAL', seconds=2)

                translation = [
                    object_global.position.x, object_global.position.y,
                    object_global.position.z
                ]
                qm = object_global.orientation
                q = (qm.x, qm.y, qm.z, qm.w)

                R = tft.quaternion_matrix(q)
                P = tft.projection_matrix([0, 0, 0], [0, 0, 1])  # XY plane
                pose_vec = R[:, 2]  # Z portion of matrix.
                proj_vec = np.dot(
                    P,
                    pose_vec)[:3]  # Projected onto XY plane as a unit vector.
                proj_vec_unit = tft.unit_vector(proj_vec)
                y_unit = [0, 1, 0]  # Unit vector in the y-direction.
                yaw_angle = np.arccos(np.dot(
                    proj_vec_unit, y_unit))  # Angle to the grasp around Z.
                pitch_angle = np.arcsin(
                    tft.vector_norm(proj_vec))  # Angle to tilt the suction cup
                if pitch_angle > 1.2:
                    pitch_angle = 1.2

                # Only get absolute angle from above.  Check the direction of the vector.
                if R[0, 2] > 0:
                    yaw_angle = -1 * yaw_angle

                # Create a quaternion with the right angles (note rotations are applied in the rotated frame in zyx order)
                q = tft.quaternion_from_euler(yaw_angle, 0, -1 * pitch_angle,
                                              'rzyx')

                if yaw_angle > 1.571 or yaw_angle < -1.571:
                    # Rotate by 180 to normalise to the sucker workspace.
                    q2 = tft.quaternion_from_euler(0, 0, 3.1415)
                    q = tft.quaternion_multiply(q, q2)

                e = list(tft.euler_from_quaternion(q))
                if e[2] > 1.5:  # Stop gimbal lock and weird joint configurations.
                    e[2] = 1.5
                if e[2] < -1.5:
                    e[2] = -1.5
                q = tft.quaternion_from_euler(e[0], e[1], e[2])

                object_global.orientation.x = q[0]
                object_global.orientation.y = q[1]
                object_global.orientation.z = q[2]
                object_global.orientation.w = q[3]

                R = tft.quaternion_matrix(q)

                t.publish_tf_quaterion_as_transform(translation,
                                                    q,
                                                    'global_xyz_link',
                                                    'MOVE_HERE',
                                                    seconds=0.5)

                # Create a pre-grasp pose away from the object.
                T = tft.translation_matrix(translation)
                T2 = tft.translation_matrix((0, 0, -0.05))
                F = tft.concatenate_matrices(T, R, T2)
                pre_grasp_t = tft.translation_from_matrix(F)
                pre_grasp_q = tft.quaternion_from_matrix(F)

                #t.publish_tf_quaterion_as_transform(pre_grasp_t, pre_grasp_q, 'global_xyz_link', 'PRE', seconds=5.0)

                grasp_orientation = Quaternion()
                grasp_orientation.x = q[0]
                grasp_orientation.y = q[1]
                grasp_orientation.z = q[2]
                grasp_orientation.w = q[3]

                rospy.loginfo('MOVE TO TOOL CHANGE HEIGHT')

                if first_attamept:
                    # Move the whole wrist and then select the sucker.
                    res = m.move_to_global(
                        min(object_global.position.x, REALSENSE_MAX_GLOBAL_X),
                        max(
                            min(object_global.position.y,
                                SUCKER_MAX_Y_STRAIGHT) - 0.02,
                            SUCKER_MIN_GLOBAL_Y + 0.02) -
                        SUCKER_REALSENSE_Y_OFFSET,
                        TOOL_CHANGE_REALSENSE_GLOBAL_Z, 'realsense')
                    if not res:
                        return 'failed'
                    rospy.loginfo('SELECTING SUCKER')
                    res = m.move_to_named_pose('wrist_only', 'sucker')
                    if not res:
                        return 'failed'
                    first_attamept = False
                else:
                    # Straighten up the sucker from the last attempt and move into position
                    straight_q = Quaternion()
                    straight_q.x = 0
                    straight_q.y = 0
                    straight_q.z = 0
                    straight_q.w = 1
                    res = m.move_to_global(
                        min(object_global.position.x, SUCKER_MAX_GLOBAL_X),
                        max(
                            min(object_global.position.y,
                                SUCKER_MAX_Y_STRAIGHT) - 0.02,
                            SUCKER_MIN_GLOBAL_Y + 0.02),
                        TOOL_CHANGE_REALSENSE_GLOBAL_Z + 0.35,
                        'sucker',
                        orientation=straight_q)
                    if not res:
                        return 'failed'

                rospy.loginfo("MOVE TO PRE-GRASP")
                res = m.move_to_global(pre_grasp_t[0],
                                       pre_grasp_t[1],
                                       min(pre_grasp_t[2],
                                           SUCKER_MAX_GLOBAL_Z),
                                       'sucker',
                                       orientation=grasp_orientation,
                                       velo_scale=0.4)
                if not res:
                    return 'failed'

                rospy.loginfo("PUMP ON, MOVING TO OBJECT")
                suction.pump_on()
                (res, stopped,
                 bail) = m.move_to_global_monitor_weight_and_suction(
                     object_global.position.x,
                     object_global.position.y,
                     min(object_global.position.z, SUCKER_MAX_GLOBAL_Z),
                     'sucker',
                     scales_topic,
                     orientation=grasp_orientation,
                     velo_scale=0.3)
                if not res:
                    return 'failed'

            else:
                # Straight Down Suck
                rospy.logerr('STRAIGHT PICK')

                # Make sure we're inside the tote.
                edge_offset = 0.0
                object_global.position.x = max(
                    object_global.position.x,
                    position_limits['x_min'] - edge_offset)
                object_global.position.x = min(
                    object_global.position.x,
                    position_limits['x_max'] + edge_offset)
                object_global.position.y = max(
                    object_global.position.y,
                    position_limits['y_min'] - edge_offset)
                object_global.position.y = min(
                    object_global.position.y,
                    position_limits['y_max'] + edge_offset)
                object_global.position.z = min(object_global.position.z,
                                               position_limits['z_max'])

                t.publish_pose_as_transform(object_global,
                                            'global_xyz_link',
                                            'MOVE_HERE',
                                            seconds=0.5)
                rospy.loginfo('MOVE TO TOOL CHANGE HEIGHT')

                # We only need to select the sucker on the first attempt.
                # Can cause it to break becasue the controllers don't switch properly
                #  if there's no actual movment and we change controllers too quickly. Not sure why, something to do with timing.
                if first_attamept:
                    res = m.move_to_global(
                        object_global.position.x,
                        max(
                            min(object_global.position.y,
                                SUCKER_MAX_Y_STRAIGHT),
                            SUCKER_MIN_Y_STRAIGHT) - SUCKER_REALSENSE_Y_OFFSET,
                        TOOL_CHANGE_REALSENSE_GLOBAL_Z, 'realsense')
                    if not res:
                        return 'failed'
                    rospy.loginfo('SELECTING SUCKER')
                    res = m.move_to_named_pose('wrist_only', 'sucker')
                    if not res:
                        return 'failed'
                    first_attamept = False
                    if item_meta[userdata.data['item_to_pick']
                                 ['label']]['grasp_type'] == 'grip_suck':
                        # Weird async movement bug.
                        rospy.sleep(1.0)
                else:
                    straight_q = Quaternion()
                    straight_q.x = 0
                    straight_q.y = 0
                    straight_q.z = 0
                    straight_q.w = 1
                    res = m.move_to_global(
                        object_global.position.x,
                        max(
                            min(object_global.position.y,
                                SUCKER_MAX_Y_STRAIGHT), SUCKER_MIN_Y_STRAIGHT),
                        TOOL_CHANGE_REALSENSE_GLOBAL_Z + 0.35,
                        'sucker',
                        orientation=straight_q)
                    if not res:
                        return 'failed'

                # Correct the rotation if required.
                pca_yaw = userdata.data['item_to_pick']['pca_yaw']
                q = (pca_yaw.x, pca_yaw.y, pca_yaw.z, pca_yaw.w)
                e = list(tft.euler_from_quaternion(q))
                if e[2] > 1.57:
                    e[2] -= 3.14
                if e[2] < -1.57:
                    e[2] += 3.14
                if e[2] > 1.5:  # Stop gimbal lock and weird joint configurations.
                    e[2] = 1.5
                if e[2] < -1.5:
                    e[2] = -1.5
                q = tft.quaternion_from_euler(0, 0, e[2])
                pca_yaw.x = q[0]
                pca_yaw.y = q[1]
                pca_yaw.z = q[2]
                pca_yaw.w = q[3]

                rospy.loginfo("PUMP ON, MOVING TO OBJECT")
                suction.pump_on()
                v_scale = 0.25
                if item_meta[userdata.data['item_to_pick']
                             ['label']]['grasp_point_type'] == 'rgb_centroid':
                    v_scale = 0.1
                (res, stopped,
                 bail) = m.move_to_global_monitor_weight_and_suction(
                     object_global.position.x,
                     min(object_global.position.y, SUCKER_MAX_Y_STRAIGHT),
                     min(object_global.position.z, SUCKER_MAX_GLOBAL_Z),
                     'sucker',
                     scales_topic,
                     orientation=pca_yaw,
                     velo_scale=v_scale)
                if not res:
                    return 'failed'

            if bail:
                m.move_relative(0, 0, -0.02, 'sucker_endpoint', 'sucker')
                rospy.sleep(0.5)
            elif not suction.check_pressure_sensor():
                # If it's not already on, give it a sec.
                rospy.sleep(1.0)

            suction_state = suction.check_pressure_sensor(
            )  #query pressure sensor for state, has the object been sucked correctly
            rospy.loginfo("PRESSURE SENSOR STATUS: %s" % suction_state)

            if not suction_state and not bail:
                set_suck_level(3)
                # Just move down a touch and double check
                move_amount = 0.02
                if USE_ANGLED:
                    move_amount = 0.035
                if object_global.position.z < (SUCKER_MAX_GLOBAL_Z):
                    move_amount = min(
                        move_amount,
                        SUCKER_MAX_GLOBAL_Z - object_global.position.z)
                    res = m.move_relative(0,
                                          0,
                                          move_amount,
                                          'sucker_endpoint',
                                          'sucker',
                                          plan_time=0.5)
                else:
                    res = True
                if res:
                    rospy.sleep(1.0)
                    suction_state = suction.check_pressure_sensor()

            if suction_state:
                #userdata.data['last_failed'] = None
                return 'succeeded'
            else:
                suction.pump_off()

        # Run out of grasp poses.
        suction.pump_off()
        if item_meta[userdata.data['item_to_pick']
                     ['label']]['grasp_type'] == 'suck_grip':
            return 'try_gripper'
        userdata.data['last_failed'][userdata.data['item_to_pick']
                                     ['label']] = 0
        return 'suck_failed'
Exemplo n.º 6
0
    def execute(self, userdata):
        gripper.open_gripper()

        position_limits = POSITION_LIMITS[userdata.data['picking_from']]['gripper']

        # Get the weight before the pick
        scales_topic = userdata.data['picking_from'] + '_scales/weight'
        if item_meta[userdata.data['item_to_pick']['label']]['grasp_type'] != 'suck_grip':
            # If we've just tried sucking, then don't re-weigh
            try:
                weight = rospy.wait_for_message(scales_topic, Int16, 2.0)
                userdata.data['weight_before'] = weight.data
            except:
                rospy.logerr('Unable to read weight from %s' % scales_topic)
                userdata.data['weight_before'] = -1
                if item_meta[userdata.data['item_to_pick']['label']]['grasp_point_type'] == 'rgb_centroid':
                    rospy.logerr("Aborting RGB Centroid pick because there aren't any scales.")
                    return 'grip_failed'

        # No way to detect failure yet, so just use one grasp point.
        object_global = userdata.data['item_to_pick']['grasp_poses'][0]

        object_global.position.x += 0.015
        object_global.position.y -= 0.03

        object_global.position.x = max(object_global.position.x, position_limits['x_min'])
        object_global.position.x = min(object_global.position.x, position_limits['x_max'])
        object_global.position.y = max(object_global.position.y, position_limits['y_min'])
        object_global.position.y = min(object_global.position.y, position_limits['y_max'])
        object_global.position.z = min(object_global.position.z, position_limits['z_max'])

        t.publish_pose_as_transform(object_global, 'global_xyz_link', 'MOVE HERE', 0.5)

        rospy.loginfo('MOVE TO TOOL CHANGE HEIGHT')
        curr = t.current_robot_pose('global_xyz_link', 'realsense_endpoint')
        # res = m.move_to_global(curr.position.x, curr.position.y, TOOL_CHANGE_REALSENSE_GLOBAL_Z, 'realsense')
        res = m.move_to_global(min(object_global.position.x, REALSENSE_MAX_GLOBAL_X), max(min(object_global.position.y, SUCKER_MAX_Y_STRAIGHT) - 0.02, SUCKER_MIN_GLOBAL_Y + 0.02) - SUCKER_REALSENSE_Y_OFFSET, TOOL_CHANGE_REALSENSE_GLOBAL_Z, 'realsense')
        if not res:
            return 'failed'

        rospy.loginfo('SELECTING GRIPPER')
        res = m.move_to_named_pose('wrist_only', 'gripper')
        if not res:
            return 'failed'

        object_global.position.z += 0.015
        pca_yaw = userdata.data['item_to_pick']['pca_yaw']
        q = (pca_yaw.x, pca_yaw.y, pca_yaw.z, pca_yaw.w)
        e = list(tft.euler_from_quaternion(q))
        if e[2] > 1.57:
            e[2] -= 3.14
        if e[2] < -1.57:
            e[2] += 3.14
        if e[2] > 1.55: # Stop gimbal lock and weird joint configurations.
            e[2] = 1.55
        if e[2] < -1.55:
            e[2] = -1.55

        q = tft.quaternion_from_euler(0, 0, e[2])
        object_global.orientation.x = q[0]
        object_global.orientation.y = q[1]
        object_global.orientation.z = q[2]
        object_global.orientation.w = q[3]

        rospy.loginfo('MOVE GRIPPER TO ABOVE OBJECT')
        res = m.move_to_global(object_global.position.x, object_global.position.y, max(object_global.position.z - 0.2, GRIPPER_MIN_GLOBAL_Z), 'gripper', orientation=object_global.orientation)
        if not res:
            return 'failed'

        rospy.loginfo('MOVING TO OBJECT')
        v_scale = 0.1
        if item_meta[userdata.data['item_to_pick']['label']]['grasp_point_type'] == 'rgb_centroid':
            # Don't trust the depth.
            v_scale = 0.1
        if userdata.data['item_to_pick']['label'] == 'mesh_cup':
            v_scale = 0.05
        (res, stopped, bail) = m.move_to_global_monitor_weight(object_global.position.x, object_global.position.y,
                               min(object_global.position.z, GRIPPER_MAX_GLOBAL_Z), 'gripper', scales_topic, velo_scale=v_scale, orientation=object_global.orientation)
        if not res:
            return 'failed'

        if bail:
            m.move_relative(0, 0, -0.02, 'gripper_endpoint', 'gripper')
        # rospy.sleep(0.5)

        rospy.loginfo('CLOSE THE GRIPPER')
        gripper.close_gripper()
        rospy.sleep(0.5)

        return('succeeded')
        rospy.loginfo(move_success + str(res))

        #MOVE TO STRAIGHT DROP POSITION

        curr = t.current_robot_pose('global_xyz_link', 'gripper_endpoint')
        pose = gmsg.Pose()
        qt = tf.transformations.quaternion_from_euler(0.0, 0.0, 1.57)
        pose.position.x = 0.05
        pose.position.y = curr.position.y
        pose.position.z = 1.0
        pose.orientation.x = qt[0]
        pose.orientation.y = qt[1]
        pose.orientation.z = qt[2]
        pose.orientation.w = qt[3]
        t.publish_pose_as_transform(pose,
                                    'global_xyz_link',
                                    'PINEAPPLE',
                                    seconds=5)
        rospy.loginfo('MOVE UP TO STRAIGHT DROP POSITION')
        res = m.move_to_global(pose.position.x,
                               curr.position.y,
                               pose.position.z,
                               'gripper',
                               orientation=pose.orientation)
        if not res:
            rospy.logerr(move_error)
        rospy.loginfo(move_success + str(res))

    elif tool is 'sucker':

        #TOOL CHANGE TO 45 DEGREES
Exemplo n.º 8
0
def execute_grasp():
    # 执行抓取。
    global MOVING
    global CURR_Z
    global start_force_srv
    global stop_force_srv

    # 得到位置。
    msg = rospy.wait_for_message('/ggcnn/out/command',
                                 std_msgs.msg.Float32MultiArray)
    d = list(msg.data)

    # 计算抓爪宽度。
    grip_width = d[4]
    # 将以像素为单位的宽度转换为毫米。
    # 0.07是从末端执行器(CURR_Z)到摄像机的距离。
    #对于实感,每个像素约0.1度。
    g_width = 2 * ((CURR_Z + 0.07)) * np.tan(
        0.1 * grip_width / 2.0 / 180.0 * np.pi) * 1000
    # 转换为电机位置。
    g = min((1 - (min(g_width, 70) / 70)) * (6800 - 4000) + 4000, 5500)
    set_finger_positions([g, g])

    rospy.sleep(0.5)

    # 握把的姿势(仅位置)在相机框架中。
    gp = geometry_msgs.msg.Pose()
    gp.position.x = d[0]
    gp.position.y = d[1]
    gp.position.z = d[2]
    gp.orientation.w = 1

    # 转换为基本框架,并添加角度(以确保平面抓地力,不保证相机垂直)。
    gp_base = convert_pose(gp, 'camera_depth_optical_frame',
                           'm1n6s200_link_base')

    q = tft.quaternion_from_euler(np.pi, 0, d[3])
    gp_base.orientation.x = q[0]
    gp_base.orientation.y = q[1]
    gp_base.orientation.z = q[2]
    gp_base.orientation.w = q[3]

    publish_pose_as_transform(gp_base, 'm1n6s200_link_base', 'G', 0.5)

    # 初始姿势的偏移量。
    initial_offset = 0.20
    gp_base.position.z += initial_offset

    # 禁用力控制,使机器人更加精确。
    stop_force_srv.call(kinova_msgs.srv.StopRequest())

    move_to_pose(gp_base)
    rospy.sleep(0.1)

    # 启动力控制,有助于防止不良碰撞。
    start_force_srv.call(kinova_msgs.srv.StartRequest())

    rospy.sleep(0.25)

    # 重设位置
    gp_base.position.z -= initial_offset

    # 标记以检查碰撞。
    MOVING = True

    # 为控制器生成非线性。
    cart_cov = generate_cartesian_covariance(0)

    # 在速度控制下直线向下移动。
    velo_pub = rospy.Publisher('/m1n6s200_driver/in/cartesian_velocity',
                               kinova_msgs.msg.PoseVelocity,
                               queue_size=1)
    while MOVING and CURR_Z - 0.02 > gp_base.position.z:
        dz = gp_base.position.z - CURR_Z - 0.03  # Offset by a few cm for the fingertips.
        MAX_VELO_Z = 0.08
        dz = max(min(dz, MAX_VELO_Z), -1.0 * MAX_VELO_Z)

        v = np.array([0, 0, dz])
        vc = list(np.dot(v, cart_cov)) + [0, 0, 0]
        velo_pub.publish(kinova_msgs.msg.PoseVelocity(*vc))
        rospy.sleep(1 / 100.0)

    MOVING = False

    # 闭合手指。
    rospy.sleep(0.1)
    set_finger_positions([8000, 8000])
    rospy.sleep(0.5)

    # 移回初始位置。
    gp_base.position.z += initial_offset
    gp_base.orientation.x = 1
    gp_base.orientation.y = 0
    gp_base.orientation.z = 0
    gp_base.orientation.w = 0
    move_to_pose(gp_base)

    stop_force_srv.call(kinova_msgs.srv.StopRequest())

    return