コード例 #1
0
ファイル: mp_builder.py プロジェクト: weigao95/kplan-ros
def test_builder():
    from kplan.kpam_opt.optimization_spec import build_mug2rack, build_mug2shelf
    optimization_spec = OptimizationProblemSpecification()
    optimization_spec.load_from_yaml(
        '/home/wei/catkin_ws/src/kplan_ros/kplan/kpam_opt/config/mug2rack.yaml'
    )
    # optimization_spec = build_mug2shelf()
    builder = OptimizationBuilderkPAM(optimization_spec)

    # Some keypoint location
    keypoint_loc = np.zeros(shape=(3, 3))
    keypoint_loc[0, :] = np.asarray(
        [0.6523959765381968, -0.0028259822512128746, -0.0077028763979177794])
    keypoint_loc[1, :] = np.asarray(
        [0.5961969831659355, -0.0020322277135196576, 0.04569443896150649])
    keypoint_loc[2, :] = np.asarray(
        [0.6477449683371657, -0.005210614473128802, 0.0852574313164387])
    problem = builder.build_optimization(keypoint_loc)

    # Try to solve it
    solve_kpam(problem)
    print(problem.T_action)

    # Transform the keypoint
    print('The transformed bottom center is: ',
          SE3_utils.transform_point(problem.T_action, keypoint_loc[0, :]))
    print('The transformed handle center is: ',
          SE3_utils.transform_point(problem.T_action, keypoint_loc[1, :]))
    print('The transformed top center is: ',
          SE3_utils.transform_point(problem.T_action, keypoint_loc[2, :]))
コード例 #2
0
def run_kpam_action_planning():
    # The ros service
    rospy.wait_for_service('plan_kpam_action')
    plan_kpam_action = rospy.ServiceProxy('plan_kpam_action',
                                          ActionPlanningkPAM)

    # Some keypoint location
    point_0 = Point()
    point_0.x = 0.7765539536119968
    point_0.y = 0.2988056134902533
    point_0.z = -0.011688694634577002

    point_1 = Point()
    point_1.x = 0.6836153987929412
    point_1.y = 0.24241428970220136
    point_1.z = 0.13068491379520353

    point_2 = Point()
    point_2.x = 0.7278681586707589
    point_2.y = 0.2742936891124349
    point_2.z = 0.025218065353234786

    # Construct the request and plan it
    request = ActionPlanningkPAMRequest()
    request.keypoint_world_frame = []
    request.keypoint_world_frame.extend([point_0, point_1, point_2])
    result = plan_kpam_action(request)  # type: ActionPlanningkPAMResponse

    # The transformed keypoint
    quat = [
        result.T_action.orientation.w, result.T_action.orientation.x,
        result.T_action.orientation.y, result.T_action.orientation.z
    ]
    T_action = quaternion_matrix(quat)
    T_action[0, 3] = result.T_action.position.x
    T_action[1, 3] = result.T_action.position.y
    T_action[2, 3] = result.T_action.position.z
    print 'The result on client'
    print T_action

    # Print the result
    keypoint_loc = np.zeros(shape=(3, 3))
    keypoint_loc[0, :] = np.asarray(
        [0.7765539536119968, 0.2988056134902533, -0.011688694634577002])
    keypoint_loc[1, :] = np.asarray(
        [0.6836153987929412, 0.24241428970220136, 0.13068491379520353])
    keypoint_loc[2, :] = np.asarray(
        [0.7278681586707589, 0.2742936891124349, 0.025218065353234786])

    # Transform the keypoint
    from kplan.utils import SE3_utils
    print('The transformed bottom center is: ',
          SE3_utils.transform_point(T_action, keypoint_loc[0, :]))
    print('The transformed handle center is: ',
          SE3_utils.transform_point(T_action, keypoint_loc[1, :]))
    print('The transformed top center is: ',
          SE3_utils.transform_point(T_action, keypoint_loc[2, :]))
コード例 #3
0
ファイル: mp_terms.py プロジェクト: weigao95/kplan-ros
def transform_point_symbolic(problem, from_keypoint, xyzrpy):
    # type: (OptimizationProblemkPAM, np.ndarray, np.ndarray) -> np.ndarray
    """
    Transform the point using symbolic expression
    :param problem:
    :param from_keypoint: np.ndarray with float
    :param xyzrpy: np.ndarray with symbolic variable
    :return:
    """
    T_eps = SE3_utils.xyzrpy_to_matrix_symbolic(xyzrpy)
    T = np.dot(problem.T_init, T_eps)
    return SE3_utils.transform_point(T, from_keypoint)
コード例 #4
0
ファイル: mp_terms.py プロジェクト: weigao95/kplan-ros
def point2plane_cost(problem, from_keypoints, goal_keypoints, weight,
                     plane_normal, xyzrpy):
    cost = 0.0
    T_eps = SE3_utils.xyzrpy_to_matrix(xyzrpy)
    T = np.dot(problem.T_init, T_eps)

    # The iteration on keypoints
    num_keypoints = from_keypoints.shape[0]
    for i in range(num_keypoints):
        q = SE3_utils.transform_point(T, from_keypoints[i, :])
        q_goal = goal_keypoints[i, :]
        delta = q - q_goal
        delta_normal = np.dot(delta, plane_normal)
        cost += np.dot(delta_normal, delta_normal)

    return weight * cost
コード例 #5
0
ファイル: mp_terms.py プロジェクト: weigao95/kplan-ros
def keypoint_l2_cost(problem, from_keypoints, goal_keypoints, weight, xyzrpy):
    # type: (OptimizationProblemkPAM, np.ndarray, np.ndarray, float, np.ndarray) -> np.ndarray
    """
    :param from_keypoints: (N, 3) np.ndarray with float
    :param goal_keypoints: (N, 3) np.ndarray with float
    :param weight: (N, 3) np.ndarray with float
    :param xyzrpy: np.ndarray with potentially symbolic variable
    :return: The cost value
    """
    cost = 0.0
    T_eps = SE3_utils.xyzrpy_to_matrix(xyzrpy)
    T = np.dot(problem.T_init, T_eps)

    # The iteration on keypoints
    num_keypoints = from_keypoints.shape[0]
    for i in range(num_keypoints):
        q = SE3_utils.transform_point(T, from_keypoints[i, :])
        q_goal = goal_keypoints[i, :]
        delta = q - q_goal
        cost += np.dot(delta, delta)

    return weight * cost
コード例 #6
0
    def plan_with_geometry(shoe_keypoint_world, pc_in_world):
        # type: (np.ndarray, np.ndarray) -> (bool, np.ndarray)
        # Read the keypoint
        assert shoe_keypoint_world.shape[0] == 6
        assert shoe_keypoint_world.shape[1] == 3
        heel_bottom = shoe_keypoint_world[2, :]
        heel_top = shoe_keypoint_world[4, :]
        tongue_kp = shoe_keypoint_world[5, :]

        # Construct the grasping
        grasp_x_axis = np.array([0, 0, -1])
        heel_center = 1 / 2.0 * (heel_bottom + heel_top)
        grasp_y_axis = heel_center - tongue_kp
        grasp_y_axis[2] = 0
        grasp_y_axis = grasp_y_axis / np.linalg.norm(grasp_y_axis)
        grasp_z_axis = np.cross(grasp_x_axis, grasp_y_axis)

        # The center of crop frame
        crop_frame_origin = heel_center
        crop_frame_origin[2] = 1 / 2.0 * (heel_top[2] + tongue_kp[2])
        T_crop_box_frame_in_world = np.eye(4)
        T_crop_box_frame_in_world[0:3, 0] = grasp_x_axis
        T_crop_box_frame_in_world[0:3, 1] = grasp_y_axis
        T_crop_box_frame_in_world[0:3, 2] = grasp_z_axis
        T_crop_box_frame_in_world[0:3, 3] = crop_frame_origin
        T_world_to_crop_frame = transformations.inverse_matrix(
            T_crop_box_frame_in_world)

        # Transform the pc to crop frame and crop it
        pc_in_crop_frame = SE3_utils.transform_point_cloud(
            T_world_to_crop_frame, pc_in_world)

        # The cropping bbox
        bbox_min = np.array([-0.05, -0.02, -0.01])
        bbox_max = np.array([0.10, 0.1, 0.01])
        mask_x = np.logical_and(pc_in_crop_frame[:, 0] > bbox_min[0],
                                pc_in_crop_frame[:, 0] < bbox_max[0])
        mask_y = np.logical_and(pc_in_crop_frame[:, 1] > bbox_min[1],
                                pc_in_crop_frame[:, 1] < bbox_max[1])
        mask_z = np.logical_and(pc_in_crop_frame[:, 2] > bbox_min[2],
                                pc_in_crop_frame[:, 2] < bbox_max[2])
        mask_xy = np.logical_and(mask_x, mask_y)
        mask_xyz = np.logical_and(mask_xy, mask_z)
        cropped_pc_in_crop_frame = pc_in_crop_frame[
            mask_xyz]  # type: np.ndarray

        # Not enough points, just return
        if cropped_pc_in_crop_frame.size < 20:
            return False, np.ndarray(shape=(4, 4))

        # The grasp center in cropped frame
        grasp_center_crop_frame = [0, 0, 0]
        grasp_center_crop_frame[0] = np.min(cropped_pc_in_crop_frame[:, 0])
        grasp_center_crop_frame[1] = np.average(cropped_pc_in_crop_frame[:, 1])
        grasp_center_crop_frame[1] -= 0.01  # to help us not hit the heel
        # grasp_center_crop_frame[1] = np.max(cropped_pc_in_crop_frame[:, 1])
        # grasp_center_crop_frame[1] -= 0.01  # to help us not hit the heel
        grasp_center_world_frame = SE3_utils.transform_point(
            T_crop_box_frame_in_world, np.asarray(grasp_center_crop_frame))

        # OK
        grasp_fingertip_in_world = np.eye(4)
        grasp_fingertip_in_world[0:3, 0] = grasp_x_axis
        grasp_fingertip_in_world[0:3, 1] = grasp_y_axis
        grasp_fingertip_in_world[0:3, 2] = grasp_z_axis
        grasp_fingertip_in_world[0:3, 3] = grasp_center_world_frame
        return True, grasp_fingertip_in_world
コード例 #7
0
    def plan_with_geometry(mug_keypoint_world, pc_in_world):
        # type: (np.ndarray, np.ndarray) -> (bool, np.ndarray)
        """
        Plan the grasp frame of mugs that are placed upright on the table.
        The output is the grasp finger tip frame
        :param mug_keypoint_world: (N, 3) mug keypoints in the order of (bottom_center, handle_center, top_center)
        :param pc_in_world: (N, 3) mug point cloud expressed in world frame.
        :return: 4x4 np.ndarray of gripper fingertip frame
        """
        # Read the keypoint
        assert mug_keypoint_world.shape[0] == 3
        assert mug_keypoint_world.shape[1] == 3
        assert pc_in_world.shape[1] == 3
        bottom_center = mug_keypoint_world[0, :]
        handle_center = mug_keypoint_world[1, :]
        top_center = mug_keypoint_world[2, :]

        # The top center averaged in xy plane
        center_avg = (bottom_center + top_center) / 2.0  # average of top and bottom keypoint detections
        top_center_avg = np.copy(top_center)
        top_center_avg[2] = top_center[2]

        # The orientation of the finger-tip frame
        grasp_x_axis = np.array([0, 0, -1])
        grasp_y_axis = center_avg - handle_center
        grasp_y_axis[2] = 0
        grasp_y_axis = grasp_y_axis / np.linalg.norm(grasp_y_axis)
        grasp_z_axis = np.cross(grasp_x_axis, grasp_y_axis)

        # The cropping box
        crop_frame_origin = top_center_avg
        T_crop_box_frame_in_world = np.eye(4)
        T_crop_box_frame_in_world[0:3, 0] = grasp_x_axis
        T_crop_box_frame_in_world[0:3, 1] = grasp_y_axis
        T_crop_box_frame_in_world[0:3, 2] = grasp_z_axis
        T_crop_box_frame_in_world[0:3, 3] = crop_frame_origin
        T_world_to_crop_frame = transformations.inverse_matrix(T_crop_box_frame_in_world)

        # Transform the pc to crop frame and crop it
        pc_in_crop_frame = SE3_utils.transform_point_cloud(T_world_to_crop_frame, pc_in_world)

        # The cropping bbox
        bbox_min = np.array([-0.02, 0.0, -0.01])
        bbox_max = np.array([0.02, 0.1, 0.01])
        mask_x = np.logical_and(pc_in_crop_frame[:, 0] > bbox_min[0], pc_in_crop_frame[:, 0] < bbox_max[0])
        mask_y = np.logical_and(pc_in_crop_frame[:, 1] > bbox_min[1], pc_in_crop_frame[:, 1] < bbox_max[1])
        mask_z = np.logical_and(pc_in_crop_frame[:, 2] > bbox_min[2], pc_in_crop_frame[:, 2] < bbox_max[2])
        mask_xy = np.logical_and(mask_x, mask_y)
        mask_xyz = np.logical_and(mask_xy, mask_z)
        cropped_pc_in_crop_frame = pc_in_crop_frame[mask_xyz]  # type: np.ndarray

        # Not enough points, just return
        if cropped_pc_in_crop_frame.size < 20:
            return False, np.ndarray(shape=(4, 4))

        # The grasp center in cropped frame
        grasp_center_crop_frame = [0, 0, 0]
        grasp_center_crop_frame[0] = np.min(cropped_pc_in_crop_frame[:, 0])
        grasp_center_crop_frame[1] = np.average(cropped_pc_in_crop_frame[:, 1])
        grasp_center_world_frame = SE3_utils.transform_point(
            T_crop_box_frame_in_world,
            np.asarray(grasp_center_crop_frame))

        # OK
        grasp_fingertip_in_world = np.eye(4)
        grasp_fingertip_in_world[0:3, 0] = grasp_x_axis
        grasp_fingertip_in_world[0:3, 1] = grasp_y_axis
        grasp_fingertip_in_world[0:3, 2] = grasp_z_axis
        grasp_fingertip_in_world[0:3, 3] = grasp_center_world_frame
        return True, grasp_fingertip_in_world