def setup_class(cls):
        c30 = np.sqrt(3) / 2.0
        s30 = -0.5

        c60 = -s30
        s60 = -c30

        # create pose1 from transformation matrix
        m1 = np.eye(4)
        m1[0:3, 3] = np.asarray([0.8660254037844386, 0.5, 0.0])
        m1[0:2, 0:2] = np.asarray([[c30, -s30], [s30, c30]])
        cls.pose1 = Pose.from_transformation_matrix(m1)

        # create pose2 from transformation matrix
        m2 = np.eye(4)
        m2[0:3, 3] = np.asarray([0.25, -0.43301270189221935, 0.0])
        m2[0:2, 0:2] = np.asarray([[c60, -s60], [s60, c60]])
        cls.pose2 = Pose.from_transformation_matrix(m2)

        # create pose3 from transformation matrix
        cls.m3 = np.eye(4)
        cls.m3[0:3, 3] = np.asarray([0.8660254037844386, 0.0, 0.0])
        cls.m3[0:2, 0:2] = np.asarray([[0.0, 1.0], [-1.0, 0.0]])
        cls.pose3 = Pose.from_transformation_matrix(cls.m3)

        # create pose4 from transformation matrix
        translation = np.asarray([0.22419, -0.78420, 0.945699])
        quaternion = Quaternion(x=0.6087561845779419,
                                y=0.7779673933982849,
                                z=0.047310106456279755,
                                w=0.1481364518404007)
        cls.pose4 = Pose(translation, quaternion)
def project_pixel_on_plane(pixel_point: np.ndarray, plane_params: np.ndarray,
                           camera_pose: Pose, camera_matrix: np.ndarray):
    """
    Find pose of specifc image pixel

    Assumption is that the image content lies on a plane

    Parameters
    ----------
    pixel_point : np.ndarray
        pixel point [x, y]
    plane_params : np.ndarray
        plane in general form ax + by + cz + d = 0
        as numpy array [a,b,c,d]
    camera_pose : xamla_motion.data_types.Pose
        pose of camera in world coordinates
    camera_matrix : np.ndarray
        3x3 camera matrix see opencv for more information
    """
    cam_pose = camera_pose.transformation_matrix()

    # create ray in camera space
    htp = np.array([pixel_point[0], pixel_point[1], 1])
    ray = np.matmul(np.linalg.inv(camera_matrix), htp)

    # transform form camera to world axis
    ray_world = np.matmul(cam_pose[0:3, 0:3], ray)
    eye = cam_pose[0:3, 3]
    at = eye + ray_world

    # compute intersection ray and plane
    t, hit = x3d.intersect_ray_plane(eye, at, plane_params)

    return hit
Exemple #3
0
    def setup_class(cls):
        cls.client = WorldViewClient()

        joint_set = JointSet('1,2,3')

        cls.joint_values_1 = JointValues(joint_set, 0.0)
        cls.joint_values_2 = JointValues(joint_set, [1, 2, 3])

        cls.pose_1 = Pose.identity()
        cls.pose_2 = Pose.identity().translate([0.1, 0.2, 1.8])
        cls.pose_3 = Pose.identity().translate([0.1, -0.2, 1.8])

        cls.cartesian_path_1 = CartesianPath([cls.pose_1, cls.pose_2])
        cls.cartesian_path_2 = CartesianPath([cls.pose_2, cls.pose_1])

        collision_box_1 = CollisionPrimitive.create_box(
            0.05,
            0.05,
            0.05,
            cls.pose_1
        )

        collision_box_2 = CollisionPrimitive.create_box(
            0.05,
            0.05,
            0.05,
            cls.pose_2
        )

        collision_box_3 = CollisionPrimitive.create_box(
            0.05,
            0.05,
            0.05,
            cls.pose_3
        )

        cls.collision_object_1 = CollisionObject([collision_box_1])
        cls.collision_object_2 = CollisionObject(
            [collision_box_2, collision_box_3])

        cls.folder_path = 'test/my_test'
        cls.joint_values_path = 'test_joint_values'
        cls.pose_path = 'test_pose'
        cls.cartesian_path = 'test_cartesian_path'
        cls.collision_object_path = 'test_collision_object'
Exemple #4
0
def main():
    # create move group instance
    move_group = MoveGroup()
    # get default endeffector of the movegroup
    end_effector = move_group.get_end_effector()

    # create cartesian path and equivalent joint path
    t1 = [0.502522, 0.2580, 0.3670]
    q1 = Quaternion(w=0.304389, x=0.5272, y=0.68704, z=0.39666)

    t2 = [0.23795, 0.46845, 0.44505]
    q2 = Quaternion(w=0.212097, x=0.470916, y=0.720915, z=0.462096)

    t3 = [0.6089578, 0.3406782, 0.208865]
    q3 = Quaternion(w=0.231852, x=0.33222, y=0.746109, z=0.528387)

    pose_1 = Pose(t1, q1)
    pose_2 = Pose(t2, q2)
    pose_3 = Pose(t3, q3)

    cartesian_path = CartesianPath([pose_1, pose_2, pose_3])

    plan = end_effector.move_cartesian(cartesian_path,
                                       collision_check=True).plan()

    stepped_client = plan.execute_supervised()

    robot_chat = RobotChatClient()

    robot_chat_stepped_motion = RobotChatSteppedMotion(robot_chat,
                                                       stepped_client,
                                                       move_group.name)

    loop = asyncio.get_event_loop()
    register_asyncio_shutdown_handler(loop)

    try:
        loop.run_until_complete(
            robot_chat_stepped_motion.handle_stepwise_motions())
    finally:
        loop.close()
Exemple #5
0
    def send_set_point(self, setPoint: Pose):
        """
        Jogging to Pose

        Parameters
        ----------
        setPoint : Pose
            The Pose to which the end effector should jog to
        """

        pose_msg  = setPoint.to_posestamped_msg()
        self._set_point_pub.publish(pose_msg)
Exemple #6
0
async def imitate_move(end_effector, move_group):
    input(
        "Press enter to set the current position as reference start point, and start to listen to poses from client"
    )
    start = end_effector.get_current_pose()
    with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
        s.bind((HOST, PORT))
        s.listen()
        while True:
            conn = None
            try:
                conn, addr = s.accept()
                with conn:
                    while True:
                        data = conn.recv(1024)
                        if not data:
                            break

                        pose_list = eval(data.decode("utf-8"))
                        # print(pose_list)
                        path = []
                        for pose in pose_list:
                            coordinate = pose[:3]

                            # The following line should be changed according to current pose and the frame of robot base
                            coordinate[0], coordinate[1], coordinate[
                                2] = coordinate[2], coordinate[0], coordinate[
                                    1]
                            coordinate = np.array(
                                coordinate) + start.translation
                            quat_element = pose[3:7]

                            # The following line should be changed to match the previous line of coordinate
                            quat = Quaternion(quat_element[0], quat_element[3],
                                              quat_element[1], quat_element[2])
                            quat = quat * start.quaternion
                            pose = Pose(coordinate, quat)
                            path.append(pose)

                        cartesian_path = CartesianPath(path)
                        joint_path = end_effector.inverse_kinematics_many(
                            cartesian_path, False).path
                        move_joints = move_group.move_joints(joint_path)
                        move_joints = move_joints.with_velocity_scaling(0.1)
                        move_joints_plan = move_joints.plan()
                        await move_joints_plan.execute_async()

            except KeyboardInterrupt:
                if conn:
                    conn.close()
                break
Exemple #7
0
def plane_parameters_from_pose(pose: Pose):
    """
    Compute plane parameters in general form ax + by + cz + d = 0

    Parameters
    ----------
    pose : xamla_motion.data_types.Pose
        pose which plane in extracted z is normal axis
    """

    pm = pose.transformation_matrix()

    z = normalize(pm[0:3, 2])
    d = np.matmul(z, pm[0:3, 3])
    return np.asarray([z[0], z[1], z[2], -d])
    async def __call__(self,
                       crop_boxes: List[CropBox],
                       transform: Pose,
                       shutter_speed_in_ms: int = 50):
        # TODO: Add timer
        # with Timer('computeCropBoxStatistics', self.logger):
        goal = CropBoxStatisticsGoal()
        goal.crop_boxes = crop_boxes
        goal.cloud_transform = transform.to_pose_msg()
        goal.shutter_speed_in_ms = float(shutter_speed_in_ms)
        responds = await self.client(goal)

        result = responds.result()
        if result.success == False:
            raise rospy.TransportException('Failed to get images')

        return result.number_of_points
def main(pose: Pose, xSize: int, ySize: int, xStepSize: float,
         yStepSize: float) -> List[Pose]:
    """
    This function uses a pose to calculate a grid of poses

    Parameters
    ----------
    pose : Pose
        Defines the position and orientation of the grid
    xSize : int
        Number of elements of the grid in x-direction
    ySize : int
        Number of elements of the grid in y-direction
    xStepSize : float
        The distance between the poses in x-direction
    yStepSize : float
        The distance between the poses in y-direction

    Returns
    ------  
    List[Pose]
        A list of generated poses
    """

    # The point defines the rotation and is a corner of the grid
    rotation = pose.quaternion
    # Calculate the delta of each step in x- and y-direction
    # For that, we scale the unit vector pointing in x-direction/y-direction
    # and then rotate it by the rotation of the pose
    delta_x = rotation.rotate(np.array([xStepSize, 0.0, 0.0]))
    delta_y = rotation.rotate(np.array([0.0, yStepSize, 0.0]))
    poses = []  # type: List[Pose]
    for y_index in range(ySize):
        for x_index in range(ySize):
            translation = pose.translation + x_index * delta_x + y_index * delta_y
            poses.append(Pose(translation, rotation))
    return poses
Exemple #10
0
def test_cached_trajectories(tc_go_to: TaskTrajectoryCache,
                             tc_come_back: TaskTrajectoryCache):

    world_view_client = WorldViewClient()
    move_group = example_utils.get_move_group()
    end_effector = move_group.get_end_effector()

    world_view_folder = "example_08_trajectory_cache"

    add_generated_folder(world_view_client, world_view_folder)

    # Get start and end from world view
    start_jv = world_view_client.get_joint_values("start_jv",
                                                  world_view_folder)
    start_pose = end_effector.compute_pose(start_jv)
    end_pose = world_view_client.get_pose("end_pose", world_view_folder)

    # Get a SampleBox, which contains a Volume defined by translations and rotations
    end_box = get_sample_box(end_pose)

    # Show the grid in world view
    sample_positions = end_box.sample_positions
    for i in range(len(sample_positions[0])):
        translation = [
            sample_positions[0][i], sample_positions[1][i],
            sample_positions[2][i]
        ]
        pose = Pose.from_transformation_matrix(np.eye(4))
        pose = pose.translate(translation)
        pose = pose.rotate(end_box.quaternions[0])
        # poses contains a list of poses with same translation, different rotation
        world_view_client.add_pose(element_name="pose_{}".format(i),
                                   folder_path=world_view_folder +
                                   "/generated",
                                   pose=pose)

    # Move to start_jv
    loop.run_until_complete(move_group.move_joints_collision_free(start_jv))

    if tc_go_to is not None and tc_come_back is not None:
        world_view_client.add_pose(element_name="target_pose",
                                   folder_path=world_view_folder +
                                   "/generated",
                                   pose=end_pose)

        input(
            "Please move 'target_pose' in world view in folder 'example_08_trajectory_cache/generated', then press enter."
        )
        target_pose = world_view_client.get_pose(
            element_name="target_pose",
            folder_path=world_view_folder + "/generated")
        try:
            # This movement uses the trajectory which ends nearest to target_pose
            print("Move to nearest pose")
            execution = move_with_trajectory_cache(
                cache=tc_go_to,
                end_effector=end_effector,
                start_joint_values=start_jv,
                target_pose=target_pose,
                max_position_diff_radius=0.05)
            loop.run_until_complete(execution)

            # This movement uses the trajectory which begins nearest to target_pose
            print("Return home")
            execution = move_with_trajectory_cache(
                cache=tc_come_back,
                end_effector=end_effector,
                start_joint_values=None,
                target_pose=start_pose,
                max_position_diff_radius=0.05)
            loop.run_until_complete(execution)
        except RuntimeError as e:
            print(e)
            print(
                "The chosen pose must lie in the radius of one of the poses in the used SampleVolume "
            )
    else:
        print("Could not open serialized data. ")

    input("Press enter to clean up")
    world_view_client.remove_element("generated", world_view_folder)
Exemple #11
0
def main():
    # create move group instance
    move_group = MoveGroup()
    # get default endeffector of the movegroup
    end_effector = move_group.get_end_effector()

    # create cartesian path and equivalent joint path
    t1 = [0.502522, 0.2580, 0.3670]
    q1 = Quaternion(w=0.304389, x=0.5272, y=0.68704, z=0.39666)

    t2 = [0.23795, 0.46845, 0.44505]
    q2 = Quaternion(w=0.212097, x=0.470916, y=0.720915, z=0.462096)

    t3 = [0.6089578, 0.3406782, 0.208865]
    q3 = Quaternion(w=0.231852, x=0.33222, y=0.746109, z=0.528387)

    pose_1 = Pose(t1, q1)
    pose_2 = Pose(t2, q2)
    pose_3 = Pose(t3, q3)

    cartesian_path = CartesianPath([pose_1, pose_2, pose_3])

    joint_path = end_effector.inverse_kinematics_many(cartesian_path,
                                                      False).path

    loop = asyncio.get_event_loop()
    register_asyncio_shutdown_handler(loop)

    async def example_moves():
        print('test MoveGroup class')
        print('----------------          move joints                 -------------------')
        move_joints = move_group.move_joints(joint_path)
        move_joints = move_joints.with_velocity_scaling(0.1)

        move_joints_plan = move_joints.plan()

        await move_joints_plan.execute_async()

        print('----------------        move joints supervised        -------------------')
        move_joints = move_joints.with_velocity_scaling(0.5)
        stepped_motion_client = move_joints.plan().execute_supervised()
        await run_supervised(stepped_motion_client)

        print('----------------      move joints collision free      -------------------')
        move_joints_cf = MoveJointsCollisionFreeOperation(
            move_joints.to_args())

        await move_joints_cf.plan().execute_async()

        print('test EndEffector class')
        print('----------------          move cartesian               -------------------')
        move_cartesian = end_effector.move_cartesian(cartesian_path)
        move_cartesian = move_cartesian.with_velocity_scaling(0.4)

        move_cartesian_plan = move_cartesian.plan()

        await move_cartesian_plan.execute_async()

        print('----------------      move cartesian supervised        -------------------')
        move_cartesian = move_cartesian.with_velocity_scaling(0.8)
        stepped_motion_client = move_cartesian.plan().execute_supervised()
        await run_supervised(stepped_motion_client)

        print('----------------    move cartesian collision free      -------------------')
        move_cartesian_cf = MoveCartesianCollisionFreeOperation(
            move_cartesian.to_args()
        )

        await move_cartesian_cf.plan().execute_async()

    try:
        loop.run_until_complete(example_moves())
    finally:
        loop.close()
  def runTooltipCalib(self):

    print('Please move robot to capture pose. Then press \'Enter\' and wait.')
    sys.stdin.read(1)
    sys.stdin.read(1)

    if not os.path.isdir(self.storage_folder) :
      os.makedirs(self.storage_folder)

    image_left = None
    image_right = None
    if not self.offline :
      # capture images of calibration target and write them into storage folder
      images = self.capture_client(self.exposure_time)
      image_left = cv.cvtColor(images[self.left_serial], cv.COLOR_GRAY2RGB)
      image_right = cv.cvtColor(images[self.right_serial], cv.COLOR_GRAY2RGB)
      cv.imwrite(os.path.join(self.storage_folder, 'left.png'), image_left)
      cv.imwrite(os.path.join(self.storage_folder, 'right.png'), image_right)
    else :
      # offline version: read images of calibration target
      image_left = cv.imread(os.path.join(self.storage_folder, 'left.png'))
      image_right = cv.imread(os.path.join(self.storage_folder, 'right.png'))

    current_posture_1 = None
    current_pose_1 = None
    if not self.offline :
      # store robot poses into storage folder
      current_posture_1 = self.move_group.get_current_joint_positions()
      current_pose_1 = self.move_group.motion_service.query_pose(self.move_group.name, current_posture_1, self.link_name)
      with open(os.path.join(self.storage_folder, 'posture_1.p'), 'wb') as f:
          pickle.dump(current_posture_1, f)
      with open(os.path.join(self.storage_folder, 'pose_1.p'), 'wb') as f:
          pickle.dump(current_pose_1, f)
    else :
      # offline version: read posture and poses
      with open(os.path.join(self.storage_folder, 'posture_1.p'), 'rb') as f:
        current_posture_1 = pickle.load(f)
      with open(os.path.join(self.storage_folder, 'pose_1.p'), 'rb') as f:
        current_pose_1 = pickle.load(f)

    self.pattern_localizer.setStereoCalibration(self.stereocalib) # self.stereocalib_sensorhead_cams
    camPoseFinalList, circleGridPointsLeft, circleGridPointsRight, pointsInCamCoords = self.pattern_localizer.calcCamPoseViaPlaneFit(image_left, image_right, "left", False, True)
    if (circleGridPointsLeft == None or circleGridPointsRight == None) :
      print("Not all 4 patterns have been found in both camera images.")
      print("-> One of the patterns may lie too near to the image boundary, such that it is cropped after image rectification.")
      print("-> The images might be too dark, i.e. exposure time might be too low, ...")
      print("-> Run pattern search in debug mode to analyze problem:")
      camPoseFinalList, circleGridPointsLeft, circleGridPointsRight, pointsInCamCoords = self.pattern_localizer.calcCamPoseViaPlaneFit(image_left, image_right, "left", True, True)
      return False
    elif (len(circleGridPointsLeft) != 4 or len(circleGridPointsRight) != 4) :
      print("Not all 4 patterns have been found in both camera images.")
      print("-> One of the patterns may lie too near to the image boundary, such that it is cropped after image rectification.")
      print("-> The images might be too dark, i.e. exposure time might be too low, ...")
      print("-> Run pattern search in debug mode to analyze problem:")
      camPoseFinalList, circleGridPointsLeft, circleGridPointsRight, pointsInCamCoords = self.pattern_localizer.calcCamPoseViaPlaneFit(image_left, image_right, "left", True, True)
      return False

    Hc1 = camPoseFinalList["1"]
    Hc2 = camPoseFinalList["3"]
    Hc3 = camPoseFinalList["5"]
    Hc4 = camPoseFinalList["7"]
    print("camPoseFinalList[\"1\"]:")
    print(Hc1)
    print("camPoseFinalList[\"3\"]:")
    print(Hc2)
    print("camPoseFinalList[\"5\"]:")
    print(Hc3)
    print("camPoseFinalList[\"7\"]:")
    print(Hc4)
    print("\n")

    Hg_Pose = deepcopy(current_pose_1)
    H_Pose = deepcopy(self.hand_eye) # hand_eye is torso_eye (= torso_to_cam) here!
    H = deepcopy(H_Pose.transformation_matrix())
    Hg = deepcopy(Hg_Pose.transformation_matrix())
    print("Hg (torso pose):")
    print(Hg)
    print("H (torso_to_cam):")
    print(H)
    print("\n")

    t_mat1 = np.matmul(Hg, np.matmul(H, Hc1))
    t_mat2 = np.matmul(Hg, np.matmul(H, Hc2))
    t_mat3 = np.matmul(Hg, np.matmul(H, Hc3))
    t_mat4 = np.matmul(Hg, np.matmul(H, Hc4))
    print("Pose of pattern with id 1 in base coordinates:")
    print(t_mat1)
    print("Pose of pattern with id 3 in base coordinates:")
    print(t_mat2)
    print("Pose of pattern with id 5 in base coordinates:")
    print(t_mat3)
    print("Pose of pattern with id 7 in base coordinates:")
    print(t_mat4)
    print("\n")

    t1 = np.zeros(shape=2, dtype=np.float64)
    t2 = np.zeros(shape=2, dtype=np.float64)
    t3 = np.zeros(shape=2, dtype=np.float64)
    t4 = np.zeros(shape=2, dtype=np.float64)
    t1[0] = t_mat1[0][3]
    t1[1] = t_mat1[1][3]
    t2[0] = t_mat2[0][3]
    t2[1] = t_mat2[1][3]
    t3[0] = t_mat3[0][3]
    t3[1] = t_mat3[1][3]
    t4[0] = t_mat4[0][3]
    t4[1] = t_mat4[1][3]

    def calc_intersection(p1,p2,q1,q2) :
      x1 = deepcopy(p1[0])
      y1 = deepcopy(p1[1])
      x2 = deepcopy(p2[0])
      y2 = deepcopy(p2[1])
      a1 = deepcopy(q1[0])
      b1 = deepcopy(q1[1])
      a2 = deepcopy(q2[0])
      b2 = deepcopy(q2[1])
      zaehler = -y1 + b1 - (((a1-x1)/(x2-x1)) * (y2-y1))
      nenner  = (((a2-a1)/(x2-x1)) * (y2-y1)) - (b2-b1)
      quotient = zaehler / nenner
      s1 = a1 + quotient * (a2-a1)
      s2 = b1 + quotient * (b2-b1)
      s = np.zeros(shape=2, dtype=np.float64)
      s[0] = s1
      s[1] = s2
      return s

    intersection = calc_intersection(t1,t3,t2,t4)
    
    print("=> Position of cross lines:")
    cross_pos = np.zeros(shape=3, dtype=np.float64)
    cross_pos[0] = intersection[0]
    cross_pos[1] = intersection[1]
    cross_pos[2] = 0.25 * (t_mat1[2][3] + t_mat2[2][3] + t_mat3[2][3] + t_mat4[2][3])
    print(cross_pos)
    print("\n")

    print('Please move tooltip straight down to cross lines. Then press \'Enter\'.')
    sys.stdin.read(1)

    current_posture_2 = None
    current_pose_2 = None
    if not self.offline :
      # store robot pose into storage folder
      current_posture_2 = self.move_group.get_current_joint_positions()
      current_pose_2 = self.move_group.motion_service.query_pose(self.move_group.name, current_posture_2, self.flange_link_name)
      with open(os.path.join(self.storage_folder, 'posture_2.p'), 'wb') as f:
          pickle.dump(current_posture_2, f)
      with open(os.path.join(self.storage_folder, 'pose_2.p'), 'wb') as f:
          pickle.dump(current_pose_2, f)
    else :
      # offline version: read posture and poses
      with open(os.path.join(self.storage_folder, 'posture_2.p'), 'rb') as f:
        current_posture_2 = pickle.load(f)
      with open(os.path.join(self.storage_folder, 'pose_2.p'), 'rb') as f:
        current_pose_2 = pickle.load(f)

    Hg = deepcopy(current_pose_2.transformation_matrix())
    cross_pose = deepcopy(current_pose_2.transformation_matrix())
    cross_pose[0][3] = cross_pos[0]
    cross_pose[1][3] = cross_pos[1]
    cross_pose[2][3] = cross_pos[2]
    print("cross_pose:")
    print(cross_pose)
    cross_pose_worldview = Pose.from_transformation_matrix(matrix=cross_pose, frame_id='world', normalize_rotation=False)
    self.addOrUpdatePose("cross_pose", "/Calibration", cross_pose_worldview)

    hand_tooltip = np.matmul(inv(Hg), cross_pose)

    print("Tooltip (grippertip) pose in flange coordinates (with same rotation as flange):")
    print(hand_tooltip)
    print("\n")
    np.save(os.path.join(self.storage_folder, 'tooltip_pose_in_flange_coordinates.npy'), hand_tooltip)

    print("To relocate the TCP in the 3D View, add a \'tcp_link\' to the file \'robotModel/main.xacro\' of your project folder.")
    print("As \'origin xyz\' of your tcp_link choose the translation vector of your calculated tooltip pose in flange coordinates.")
    print("More precisely, you have to add the following lines inside the <robot> </robot> environment: \n")
    print('<link name=\"tcp_link\" />')
    print('<joint name=\"tcp_joint_F\" type=\"fixed\">')
    print('  <child link=\"tcp_link\" />')
    print('  <parent link=\"e.g. arm_left_link_tool0\" />   <- adapt this to your endeffector link name')
    print('  <origin xyz=\"{:f} {:f} {:f}\" rpy=\"0 0 0\" />'.format(hand_tooltip[0][3], hand_tooltip[1][3], hand_tooltip[2][3]))
    print('</joint>')
    print("\n")
    print("Now, compile the new main.xacro.")
    print("Then, in the \'Configuration View\' once press compile to make the \'tcp_link\' selectable.")
    print("In the \'Configuration View\' under \'MotionPlanning\'->\'MoveIt!\'->\'groups\'->\'<group_name>\' select the new \'tcp_link\' as \'Tip link\'.")
    print("Moreover, under \'MotionPlanning\'->\'MoveIt!\'->\'endEffectors\'->\'<end effector name>\' select the new \'tcp_link\' as \'Parent link\'.")
    print("After compiling the new robot configuration, starting ROS, changing into the 'World View' and choosing the corresponding move group and end effector, the interactive marker appears at the new tcp link.")

    return True
    
        The distance between the poses in y-direction

    Returns
    ------  
    List[Pose]
        A list of generated poses
    """

    # The point defines the rotation and is a corner of the grid
    rotation = pose.quaternion
    # Calculate the delta of each step in x- and y-direction
    # For that, we scale the unit vector pointing in x-direction/y-direction
    # and then rotate it by the rotation of the pose
    delta_x = rotation.rotate(np.array([xStepSize, 0.0, 0.0]))
    delta_y = rotation.rotate(np.array([0.0, yStepSize, 0.0]))
    poses = []  # type: List[Pose]
    for y_index in range(ySize):
        for x_index in range(ySize):
            translation = pose.translation + x_index * delta_x + y_index * delta_y
            poses.append(Pose(translation, rotation))
    return poses


if __name__ == '__main__':
    # Called when running this script standalone
    translation = [0.6089578, 0.3406782, 0.208865]
    rotation = Quaternion(w=0.231852, x=0.33222, y=0.746109, z=0.528387)
    pose = Pose(translation, rotation)
    poses = main(pose, 3, 3, 0.05, 0.05)
    print(poses)
def main(xSize: int, ySize: int, xStepSize: float , yStepSize: float):
    """
    The parameter for this function describes the size of the grid.
    To manipulate orientation and position, one can alter the Pose named "GridPose" 
    in the folder "example_02_palletizing" of the world view.

    Parameters
    ----------
    xSize : int
        Number of elements of the grid in x-direction
    ySize : int
        Number of elements of the grid in y-direction
    xStepSize : float
        The distance between the poses in x-direction
    yStepSize : float
        The distance between the poses in y-direction
    """

    world_view_folder = "example_02_palletizing"
    # Create move group instance targeting the right arm of the robot
    move_group = example_utils.get_move_group()

    # Get the gripper attached at the right arm
    wsg_gripper = example_utils.get_gripper(move_group)

    # Create a instance of WorldViewClient to get access to rosvita world view
    world_view_client = WorldViewClient()

    # Generate the folders used by this example in world vew
    generate_folders(world_view_client)

    # Get the pose of the position which defines the location and rotation of the grid
    pose = world_view_client.get_pose("GridPose", world_view_folder)
    jv_home = world_view_client.get_joint_values("Home", world_view_folder)


    # Get the target place poses
    poses = example_02_1_generate_grid.main(pose, xSize, ySize, xStepSize, yStepSize)
    rotation = pose.quaternion

    # Calculate the orthogonal vector of the plane we want to span
    # Since we use [1,0,0] and [0,1,0] vectors to span the grid relatively to the 
    # pose orientation, [0,0,1] is orthogonal to the grid
    orthogonal = rotation.rotate(np.array([0,0,1]))

    # For visualization and possible collisions, add some boxes below the positions 
    # we want to visit
    getBoxPose = lambda pose : Pose(pose.translation + (orthogonal * (0.02)), pose.quaternion) 
    boxPoses = list(map(getBoxPose, poses))
    boxes = example_02_2_create_collision_boxes.main(boxPoses, (xStepSize*0.9, yStepSize*0.9, 0.01))
    world_view_client.add_collision_object("collision_matrix", 
                                    "/{}/generated/collision_objects".format(world_view_folder), 
                                    boxes)

    # Now calculate the pre place poses, which hover over the desired place poses
    # Since we want to access every pose in the grid "from above", we apply a 
    # translation orthogonal to the place poses for the pre place poses  
    #func = lambda pose : Pose(pose.translation + (orthogonal * (-0.1)), pose.quaternion) 
    func = lambda pose : Pose(pose.translation + (pose.quaternion.rotate(np.array([0,0,1])) * (-0.1)), pose.quaternion) 
    pre_place_poses = list(map( func , poses))

    # Now that we have all the poses we want to visit, we should find the 
    # corresponding joint values
    pre_place_jvs = calculate_pre_place_joint_values(pre_place_poses, 
                                                jv_home, 
                                                move_group, 
                                                world_view_client,
                                                world_view_folder) 

    example_02_4_pick_place_poses.main(poses, pre_place_jvs, jv_home, move_group)

    world_view_client.remove_element("generated", world_view_folder)
Exemple #15
0
 def runPatternCalibration(self):
     # determined by measuring using a ruler
     offset_to_edge = Pose([-0.0145, -0.0142, 0])
     edge_to_screw_center_table = Pose([0.045, 0.045, 0.02333])
     return offset_to_edge + edge_to_screw_center_table
    # Read names too to associate the joint values to the poses
    names = list(map( lambda posix_path: posix_path.name, list(joint_values_dict.keys())))
    # Create no names for joint values
    new_names = list(map( lambda name: "shifted_joint_values_of_{}".format(name), names))


    # Read poses from world view 
    reference_pose = world_view_client.get_pose("Reference", 
                              "/{}/poses".format(world_view_folder))
    shift_pose = world_view_client.get_pose("Shift", 
                              "/{}/poses".format(world_view_folder))

    # Create a pose defining the shift from the reference_poses view 
    # First undo translation and rotation of reference pose
    # then translate and rotate by the shift pose
    diff_pose = Pose( -reference_pose.translation, reference_pose.quaternion.inverse)\
      .translate(shift_pose.translation).rotate(shift_pose.quaternion)
    
    shifted_joint_values_list = main(joint_values_list, diff_pose)
    # Save the shifted joint values to world view
    print("Save the shifted joint values to world view")
    try:
        # delete folder if it already exists
        world_view_client.remove_element("generated", world_view_folder)
    except Exception as e:
        None
    world_view_client.add_folder("generated", world_view_folder)
    for i in range(len(shifted_joint_values_list)):
        joint_values = shifted_joint_values_list[i]
        # Test if not "None"
        if joint_values:
            name = new_names[i]
def main():
    world_view_client = WorldViewClient()

    input_var = input("Please type in the torch filename (with path) of the sensorhead stereo camera calibration (e.g. \"calibration/torso_cameras/stereo_cams_<serial1>_<serial2>.t7\" ): ")
    cam_calib_fn = str(input_var)
    stereocalib_sensorhead_cams = torchfile.load(cam_calib_fn)
    end_of_right_serial = cam_calib_fn.rfind(".")
    start_of_right_serial = cam_calib_fn.rfind("_")
    right_serial = cam_calib_fn[start_of_right_serial+1:end_of_right_serial]
    end_of_left_serial = start_of_right_serial
    start_of_left_serial = cam_calib_fn.rfind("_", 0, end_of_left_serial)
    left_serial = cam_calib_fn[start_of_left_serial+1:end_of_left_serial]
    print("right_serial:")
    print(right_serial)
    print("left_serial:")
    print(left_serial)
    print("Please type in the exposure time of the cameras [in microseconds],")
    input_var = input("or press \'enter\' to use 120000ms:")
    if input_var == "" :
      exposure_time = 120000
    else :
      exposure_time = int(input_var)
    print("exposure_time:")
    print(exposure_time)
    print(type(exposure_time))

    move_group_names = MotionService.query_available_move_groups()
    move_group_name = move_group_names[2].name # left_arm_torso
    flange_link_name = move_group_names[2].end_effector_link_names[0] # arm_left_link_tool0
    print("For which arm will the tooltip be calibrated? Please type l or r, then press \'Enter\'.")
    print("l: left arm")
    print("r: right arm")
    which = sys.stdin.read(1)
    if which == "r" :
      move_group_name = move_group_names[3].name # right_arm_torso
      flange_link_name = move_group_names[3].end_effector_link_names[0] # arm_right_link_tool0
    print("move_group_name:")
    print(move_group_name)
    move_group = MoveGroup(move_group_name)
    print("flange_link_name")
    print(flange_link_name)
    torso_link_name = "torso_link_b1"
    print("torso_link_name:")
    print(torso_link_name)

    last_slash = cam_calib_fn.rfind("/")
    torso_to_cam_fn = cam_calib_fn[:last_slash] + "/LeftCam_torso_joint_b1.t7"
    print("Please type in the name of the torchfile (with path) containing the transformation between torso joint and torso cameras,")
    input_var = input("or press \'enter\' to use " + torso_to_cam_fn)
    if input_var != "" :
      torso_to_cam_fn = str(input_var)
    print("torso_to_cam_fn:")
    print(torso_to_cam_fn)
    torso_to_cam_t7 = torchfile.load(torso_to_cam_fn)
    torso_to_cam = Pose.from_transformation_matrix(matrix=torso_to_cam_t7, frame_id=torso_link_name, normalize_rotation=False)

    pattern_localizer = patternlocalisation.PatternLocalisation()
    pattern_localizer.circleFinderParams.minArea = 50
    pattern_localizer.circleFinderParams.maxArea = 1000
    pattern_localizer.setPatternData(8, 11, 0.003)
    pattern_localizer.setStereoCalibration(stereocalib_sensorhead_cams)
    #pattern_localizer.setFindCirclesGridFlag(cv.CALIB_CB_ASYMMETRIC_GRID) # Don't use "cv.CALIB_CB_CLUSTERING", because it's much more sensitive to background clutter.
    pattern_localizer.setFindCirclesGridFlag(cv.CALIB_CB_ASYMMETRIC_GRID + cv.CALIB_CB_CLUSTERING)

    world_view_folder = '/Calibration'
    storage_folder = '/tmp/calibration/storage_tooltipcalib'

    offline = False

    tooltip_calib = TooltipCalibration(pattern_localizer, stereocalib_sensorhead_cams, left_serial, right_serial, exposure_time,
                                       world_view_client, world_view_folder, move_group, torso_to_cam, storage_folder, torso_link_name,
                                       flange_link_name, offline)

    tooltip_calib.runTooltipCalib()
def main():
    # create entities which should be added to and manipulated in world view
    joint_set1 = JointSet(['Joint1', 'Joint2', 'Joint3'])

    joint_values1 = JointValues(joint_set1, [1.0, 2.0, 3.0])

    joint_set2 = JointSet(['Joint1', 'Joint2'])
    joint_values2 = JointValues(joint_set2, [19.0, 20.0])

    joint_set3 = JointSet(['Joint1', 'Joint4'])
    joint_values3 = JointValues(joint_set3, [100.0, 30.0])

    t1 = [0.502522, 0.2580, 0.3670]
    q1 = Quaternion(w=0.304389, x=0.5272, y=0.68704, z=0.39666)

    t2 = [0.23795, 0.46845, 0.44505]
    q2 = Quaternion(w=0.212097, x=0.470916, y=0.720915, z=0.462096)

    t3 = [0.6089578, 0.3406782, 0.208865]
    q3 = Quaternion(w=0.231852, x=0.33222, y=0.746109, z=0.528387)

    pose1 = Pose(t1, q1, 'world')
    pose2 = Pose(t2, q2, 'world')
    pose3 = Pose(t3, q3, 'world')

    cartesian_path1 = CartesianPath([pose1, pose2, pose3])

    sphere = CollisionPrimitive.create_sphere(0.1, pose2)
    cylinder = CollisionPrimitive.create_cylinder(0.4, 0.05, pose2)
    box = CollisionPrimitive.create_box(0.2, 0.2, 0.1, pose1)

    plane = CollisionPrimitive.create_plane(1.0, 1.0, 1.0, 1.0, pose3)
    cone = CollisionPrimitive.create_cone(0.2, 0.2, pose3)

    collision_object1 = CollisionObject([box])
    collision_object2 = CollisionObject([cylinder])
    collision_object3 = CollisionObject([plane, cone])

    # create a instance of WorldViewClient to get access to rosvita world view
    world_view_client = WorldViewClient()

    print('---------------- add folder --------------')
    # add folder test at WorldView root/test
    world_view_client.add_folder('test/joint_values')

    world_view_client.add_folder('test/poses')

    world_view_client.add_folder('test/cartesian_paths')

    world_view_client.add_folder('test/collision_objects')

    print('---------------- add joint values --------------')
    world_view_client.add_joint_values(
        'test/joint_values/joint_values1',
        joint_values1
    )
    world_view_client.add_joint_values(
        'test/joint_values/test',
        joint_values2
    )

    print('---------------- update joint values --------------')
    world_view_client.add_joint_values(
        'test/joint_values/joint_values1',
        joint_values2
    )

    print('---------------- get joint values --------------')

    get_value = world_view_client.get_joint_values(
        'test/joint_values/joint_values1'
    )
    print('joint_values1 is: ' + str(get_value))

    print('---------------- query joint values --------------')
    # query all values which start with t under test/joint_values
    queried_values = world_view_client.query_joint_values(
        'test/joint_values',
        't'
    )

    for v in queried_values:
        print(str(v))

    print('---------------- add poses --------------')
    world_view_client.add_pose(
        'test/poses/pose1',
        pose1
    )
    world_view_client.add_pose(
        'test/poses/test',
        pose3
    )

    print('---------------- update pose --------------')
    world_view_client.add_pose(
        'test/poses/pose1',
        pose2
    )

    print('---------------- get pose --------------')

    get_value = world_view_client.get_pose(
        'test/poses/pose1'
    )
    print('pose1 is: ' + str(get_value))

    print('---------------- query poses --------------')
    # query all poses which start with t under test/pose
    queried_values = world_view_client.query_poses('test/poses', 't')

    for v in queried_values:
        print(str(v))

    print('---------------- add cartesian path --------------')
    world_view_client.add_cartesian_path(
        'test/cartesian_paths/cartesian_path1',
        cartesian_path1
    )
    world_view_client.add_cartesian_path(
        'test/cartesian_paths/test',
        cartesian_path1
    )

    print('---------------- update cartesian path --------------')
    world_view_client.add_cartesian_path(
        'test/cartesian_paths/cartesian_path1',
        cartesian_path1
    )

    print('---------------- get cartesian path --------------')

    get_value = world_view_client.get_cartesian_path(
        'test/cartesian_paths/cartesian_path1'
    )
    print('cartesian_path1 is: ' + str(get_value))

    print('---------------- query cartesian paths --------------')
    # query all cartesian_paths which start with t under test/cartesian_path
    queried_values = world_view_client.query_cartesian_paths(
        'test/cartesian_paths',
        't'
    )

    for v in queried_values:
        print(str(v))

    print('---------------- add collision object --------------')
    world_view_client.add_collision_object(
        'test/collision_objects/collision_object1',
        collision_object1
    )
    world_view_client.add_collision_object(
        'test/collision_objects/test',
        collision_object1
    )

    print('---------------- update collision object --------------')
    world_view_client.add_collision_object(
        'test/collision_objects/collision_object1',
        collision_object2
    )

    print('---------------- get collision object --------------')

    get_value = world_view_client.get_collision_object(
        'test/collision_objects/collision_object1'
    )
    print('collision_object1 is: ' + str(get_value))

    print('---------------- query collision object --------------')
    # query all collision_objects which start with t under test/collision_object
    queried_values = world_view_client.query_collision_objects(
        'test/collision_objects',
        't'
    )

    for v in queried_values:
        print(str(v))

    input('Press enter to clean up')

    print('----------------- remove folder test ---------------')
    world_view_client.remove_element('test', '')
 def test_rotate(self):
     m = self.m3.copy()
     m[0:3, 0:3] = np.eye(3)
     p = Pose.from_transformation_matrix(m)
     r_pose = p.rotate(self.m3[0:3, 0:3])
     assert r_pose.transformation_matrix() == pytest.approx(self.m3)