コード例 #1
0
    def setup_class(cls):
        c30 = np.sqrt(3) / 2.0
        s30 = -0.5

        c60 = -s30
        s60 = -c30

        # create pose1 from transformation matrix
        m1 = np.eye(4)
        m1[0:3, 3] = np.asarray([0.8660254037844386, 0.5, 0.0])
        m1[0:2, 0:2] = np.asarray([[c30, -s30], [s30, c30]])
        cls.pose1 = Pose.from_transformation_matrix(m1)

        # create pose2 from transformation matrix
        m2 = np.eye(4)
        m2[0:3, 3] = np.asarray([0.25, -0.43301270189221935, 0.0])
        m2[0:2, 0:2] = np.asarray([[c60, -s60], [s60, c60]])
        cls.pose2 = Pose.from_transformation_matrix(m2)

        # create pose3 from transformation matrix
        cls.m3 = np.eye(4)
        cls.m3[0:3, 3] = np.asarray([0.8660254037844386, 0.0, 0.0])
        cls.m3[0:2, 0:2] = np.asarray([[0.0, 1.0], [-1.0, 0.0]])
        cls.pose3 = Pose.from_transformation_matrix(cls.m3)

        # create pose4 from transformation matrix
        translation = np.asarray([0.22419, -0.78420, 0.945699])
        quaternion = Quaternion(x=0.6087561845779419,
                                y=0.7779673933982849,
                                z=0.047310106456279755,
                                w=0.1481364518404007)
        cls.pose4 = Pose(translation, quaternion)
コード例 #2
0
def test_cached_trajectories(tc_go_to: TaskTrajectoryCache,
                             tc_come_back: TaskTrajectoryCache):

    world_view_client = WorldViewClient()
    move_group = example_utils.get_move_group()
    end_effector = move_group.get_end_effector()

    world_view_folder = "example_08_trajectory_cache"

    add_generated_folder(world_view_client, world_view_folder)

    # Get start and end from world view
    start_jv = world_view_client.get_joint_values("start_jv",
                                                  world_view_folder)
    start_pose = end_effector.compute_pose(start_jv)
    end_pose = world_view_client.get_pose("end_pose", world_view_folder)

    # Get a SampleBox, which contains a Volume defined by translations and rotations
    end_box = get_sample_box(end_pose)

    # Show the grid in world view
    sample_positions = end_box.sample_positions
    for i in range(len(sample_positions[0])):
        translation = [
            sample_positions[0][i], sample_positions[1][i],
            sample_positions[2][i]
        ]
        pose = Pose.from_transformation_matrix(np.eye(4))
        pose = pose.translate(translation)
        pose = pose.rotate(end_box.quaternions[0])
        # poses contains a list of poses with same translation, different rotation
        world_view_client.add_pose(element_name="pose_{}".format(i),
                                   folder_path=world_view_folder +
                                   "/generated",
                                   pose=pose)

    # Move to start_jv
    loop.run_until_complete(move_group.move_joints_collision_free(start_jv))

    if tc_go_to is not None and tc_come_back is not None:
        world_view_client.add_pose(element_name="target_pose",
                                   folder_path=world_view_folder +
                                   "/generated",
                                   pose=end_pose)

        input(
            "Please move 'target_pose' in world view in folder 'example_08_trajectory_cache/generated', then press enter."
        )
        target_pose = world_view_client.get_pose(
            element_name="target_pose",
            folder_path=world_view_folder + "/generated")
        try:
            # This movement uses the trajectory which ends nearest to target_pose
            print("Move to nearest pose")
            execution = move_with_trajectory_cache(
                cache=tc_go_to,
                end_effector=end_effector,
                start_joint_values=start_jv,
                target_pose=target_pose,
                max_position_diff_radius=0.05)
            loop.run_until_complete(execution)

            # This movement uses the trajectory which begins nearest to target_pose
            print("Return home")
            execution = move_with_trajectory_cache(
                cache=tc_come_back,
                end_effector=end_effector,
                start_joint_values=None,
                target_pose=start_pose,
                max_position_diff_radius=0.05)
            loop.run_until_complete(execution)
        except RuntimeError as e:
            print(e)
            print(
                "The chosen pose must lie in the radius of one of the poses in the used SampleVolume "
            )
    else:
        print("Could not open serialized data. ")

    input("Press enter to clean up")
    world_view_client.remove_element("generated", world_view_folder)
コード例 #3
0
def main():
    world_view_client = WorldViewClient()

    input_var = input("Please type in the torch filename (with path) of the sensorhead stereo camera calibration (e.g. \"calibration/torso_cameras/stereo_cams_<serial1>_<serial2>.t7\" ): ")
    cam_calib_fn = str(input_var)
    stereocalib_sensorhead_cams = torchfile.load(cam_calib_fn)
    end_of_right_serial = cam_calib_fn.rfind(".")
    start_of_right_serial = cam_calib_fn.rfind("_")
    right_serial = cam_calib_fn[start_of_right_serial+1:end_of_right_serial]
    end_of_left_serial = start_of_right_serial
    start_of_left_serial = cam_calib_fn.rfind("_", 0, end_of_left_serial)
    left_serial = cam_calib_fn[start_of_left_serial+1:end_of_left_serial]
    print("right_serial:")
    print(right_serial)
    print("left_serial:")
    print(left_serial)
    print("Please type in the exposure time of the cameras [in microseconds],")
    input_var = input("or press \'enter\' to use 120000ms:")
    if input_var == "" :
      exposure_time = 120000
    else :
      exposure_time = int(input_var)
    print("exposure_time:")
    print(exposure_time)
    print(type(exposure_time))

    move_group_names = MotionService.query_available_move_groups()
    move_group_name = move_group_names[2].name # left_arm_torso
    flange_link_name = move_group_names[2].end_effector_link_names[0] # arm_left_link_tool0
    print("For which arm will the tooltip be calibrated? Please type l or r, then press \'Enter\'.")
    print("l: left arm")
    print("r: right arm")
    which = sys.stdin.read(1)
    if which == "r" :
      move_group_name = move_group_names[3].name # right_arm_torso
      flange_link_name = move_group_names[3].end_effector_link_names[0] # arm_right_link_tool0
    print("move_group_name:")
    print(move_group_name)
    move_group = MoveGroup(move_group_name)
    print("flange_link_name")
    print(flange_link_name)
    torso_link_name = "torso_link_b1"
    print("torso_link_name:")
    print(torso_link_name)

    last_slash = cam_calib_fn.rfind("/")
    torso_to_cam_fn = cam_calib_fn[:last_slash] + "/LeftCam_torso_joint_b1.t7"
    print("Please type in the name of the torchfile (with path) containing the transformation between torso joint and torso cameras,")
    input_var = input("or press \'enter\' to use " + torso_to_cam_fn)
    if input_var != "" :
      torso_to_cam_fn = str(input_var)
    print("torso_to_cam_fn:")
    print(torso_to_cam_fn)
    torso_to_cam_t7 = torchfile.load(torso_to_cam_fn)
    torso_to_cam = Pose.from_transformation_matrix(matrix=torso_to_cam_t7, frame_id=torso_link_name, normalize_rotation=False)

    pattern_localizer = patternlocalisation.PatternLocalisation()
    pattern_localizer.circleFinderParams.minArea = 50
    pattern_localizer.circleFinderParams.maxArea = 1000
    pattern_localizer.setPatternData(8, 11, 0.003)
    pattern_localizer.setStereoCalibration(stereocalib_sensorhead_cams)
    #pattern_localizer.setFindCirclesGridFlag(cv.CALIB_CB_ASYMMETRIC_GRID) # Don't use "cv.CALIB_CB_CLUSTERING", because it's much more sensitive to background clutter.
    pattern_localizer.setFindCirclesGridFlag(cv.CALIB_CB_ASYMMETRIC_GRID + cv.CALIB_CB_CLUSTERING)

    world_view_folder = '/Calibration'
    storage_folder = '/tmp/calibration/storage_tooltipcalib'

    offline = False

    tooltip_calib = TooltipCalibration(pattern_localizer, stereocalib_sensorhead_cams, left_serial, right_serial, exposure_time,
                                       world_view_client, world_view_folder, move_group, torso_to_cam, storage_folder, torso_link_name,
                                       flange_link_name, offline)

    tooltip_calib.runTooltipCalib()
コード例 #4
0
  def runTooltipCalib(self):

    print('Please move robot to capture pose. Then press \'Enter\' and wait.')
    sys.stdin.read(1)
    sys.stdin.read(1)

    if not os.path.isdir(self.storage_folder) :
      os.makedirs(self.storage_folder)

    image_left = None
    image_right = None
    if not self.offline :
      # capture images of calibration target and write them into storage folder
      images = self.capture_client(self.exposure_time)
      image_left = cv.cvtColor(images[self.left_serial], cv.COLOR_GRAY2RGB)
      image_right = cv.cvtColor(images[self.right_serial], cv.COLOR_GRAY2RGB)
      cv.imwrite(os.path.join(self.storage_folder, 'left.png'), image_left)
      cv.imwrite(os.path.join(self.storage_folder, 'right.png'), image_right)
    else :
      # offline version: read images of calibration target
      image_left = cv.imread(os.path.join(self.storage_folder, 'left.png'))
      image_right = cv.imread(os.path.join(self.storage_folder, 'right.png'))

    current_posture_1 = None
    current_pose_1 = None
    if not self.offline :
      # store robot poses into storage folder
      current_posture_1 = self.move_group.get_current_joint_positions()
      current_pose_1 = self.move_group.motion_service.query_pose(self.move_group.name, current_posture_1, self.link_name)
      with open(os.path.join(self.storage_folder, 'posture_1.p'), 'wb') as f:
          pickle.dump(current_posture_1, f)
      with open(os.path.join(self.storage_folder, 'pose_1.p'), 'wb') as f:
          pickle.dump(current_pose_1, f)
    else :
      # offline version: read posture and poses
      with open(os.path.join(self.storage_folder, 'posture_1.p'), 'rb') as f:
        current_posture_1 = pickle.load(f)
      with open(os.path.join(self.storage_folder, 'pose_1.p'), 'rb') as f:
        current_pose_1 = pickle.load(f)

    self.pattern_localizer.setStereoCalibration(self.stereocalib) # self.stereocalib_sensorhead_cams
    camPoseFinalList, circleGridPointsLeft, circleGridPointsRight, pointsInCamCoords = self.pattern_localizer.calcCamPoseViaPlaneFit(image_left, image_right, "left", False, True)
    if (circleGridPointsLeft == None or circleGridPointsRight == None) :
      print("Not all 4 patterns have been found in both camera images.")
      print("-> One of the patterns may lie too near to the image boundary, such that it is cropped after image rectification.")
      print("-> The images might be too dark, i.e. exposure time might be too low, ...")
      print("-> Run pattern search in debug mode to analyze problem:")
      camPoseFinalList, circleGridPointsLeft, circleGridPointsRight, pointsInCamCoords = self.pattern_localizer.calcCamPoseViaPlaneFit(image_left, image_right, "left", True, True)
      return False
    elif (len(circleGridPointsLeft) != 4 or len(circleGridPointsRight) != 4) :
      print("Not all 4 patterns have been found in both camera images.")
      print("-> One of the patterns may lie too near to the image boundary, such that it is cropped after image rectification.")
      print("-> The images might be too dark, i.e. exposure time might be too low, ...")
      print("-> Run pattern search in debug mode to analyze problem:")
      camPoseFinalList, circleGridPointsLeft, circleGridPointsRight, pointsInCamCoords = self.pattern_localizer.calcCamPoseViaPlaneFit(image_left, image_right, "left", True, True)
      return False

    Hc1 = camPoseFinalList["1"]
    Hc2 = camPoseFinalList["3"]
    Hc3 = camPoseFinalList["5"]
    Hc4 = camPoseFinalList["7"]
    print("camPoseFinalList[\"1\"]:")
    print(Hc1)
    print("camPoseFinalList[\"3\"]:")
    print(Hc2)
    print("camPoseFinalList[\"5\"]:")
    print(Hc3)
    print("camPoseFinalList[\"7\"]:")
    print(Hc4)
    print("\n")

    Hg_Pose = deepcopy(current_pose_1)
    H_Pose = deepcopy(self.hand_eye) # hand_eye is torso_eye (= torso_to_cam) here!
    H = deepcopy(H_Pose.transformation_matrix())
    Hg = deepcopy(Hg_Pose.transformation_matrix())
    print("Hg (torso pose):")
    print(Hg)
    print("H (torso_to_cam):")
    print(H)
    print("\n")

    t_mat1 = np.matmul(Hg, np.matmul(H, Hc1))
    t_mat2 = np.matmul(Hg, np.matmul(H, Hc2))
    t_mat3 = np.matmul(Hg, np.matmul(H, Hc3))
    t_mat4 = np.matmul(Hg, np.matmul(H, Hc4))
    print("Pose of pattern with id 1 in base coordinates:")
    print(t_mat1)
    print("Pose of pattern with id 3 in base coordinates:")
    print(t_mat2)
    print("Pose of pattern with id 5 in base coordinates:")
    print(t_mat3)
    print("Pose of pattern with id 7 in base coordinates:")
    print(t_mat4)
    print("\n")

    t1 = np.zeros(shape=2, dtype=np.float64)
    t2 = np.zeros(shape=2, dtype=np.float64)
    t3 = np.zeros(shape=2, dtype=np.float64)
    t4 = np.zeros(shape=2, dtype=np.float64)
    t1[0] = t_mat1[0][3]
    t1[1] = t_mat1[1][3]
    t2[0] = t_mat2[0][3]
    t2[1] = t_mat2[1][3]
    t3[0] = t_mat3[0][3]
    t3[1] = t_mat3[1][3]
    t4[0] = t_mat4[0][3]
    t4[1] = t_mat4[1][3]

    def calc_intersection(p1,p2,q1,q2) :
      x1 = deepcopy(p1[0])
      y1 = deepcopy(p1[1])
      x2 = deepcopy(p2[0])
      y2 = deepcopy(p2[1])
      a1 = deepcopy(q1[0])
      b1 = deepcopy(q1[1])
      a2 = deepcopy(q2[0])
      b2 = deepcopy(q2[1])
      zaehler = -y1 + b1 - (((a1-x1)/(x2-x1)) * (y2-y1))
      nenner  = (((a2-a1)/(x2-x1)) * (y2-y1)) - (b2-b1)
      quotient = zaehler / nenner
      s1 = a1 + quotient * (a2-a1)
      s2 = b1 + quotient * (b2-b1)
      s = np.zeros(shape=2, dtype=np.float64)
      s[0] = s1
      s[1] = s2
      return s

    intersection = calc_intersection(t1,t3,t2,t4)
    
    print("=> Position of cross lines:")
    cross_pos = np.zeros(shape=3, dtype=np.float64)
    cross_pos[0] = intersection[0]
    cross_pos[1] = intersection[1]
    cross_pos[2] = 0.25 * (t_mat1[2][3] + t_mat2[2][3] + t_mat3[2][3] + t_mat4[2][3])
    print(cross_pos)
    print("\n")

    print('Please move tooltip straight down to cross lines. Then press \'Enter\'.')
    sys.stdin.read(1)

    current_posture_2 = None
    current_pose_2 = None
    if not self.offline :
      # store robot pose into storage folder
      current_posture_2 = self.move_group.get_current_joint_positions()
      current_pose_2 = self.move_group.motion_service.query_pose(self.move_group.name, current_posture_2, self.flange_link_name)
      with open(os.path.join(self.storage_folder, 'posture_2.p'), 'wb') as f:
          pickle.dump(current_posture_2, f)
      with open(os.path.join(self.storage_folder, 'pose_2.p'), 'wb') as f:
          pickle.dump(current_pose_2, f)
    else :
      # offline version: read posture and poses
      with open(os.path.join(self.storage_folder, 'posture_2.p'), 'rb') as f:
        current_posture_2 = pickle.load(f)
      with open(os.path.join(self.storage_folder, 'pose_2.p'), 'rb') as f:
        current_pose_2 = pickle.load(f)

    Hg = deepcopy(current_pose_2.transformation_matrix())
    cross_pose = deepcopy(current_pose_2.transformation_matrix())
    cross_pose[0][3] = cross_pos[0]
    cross_pose[1][3] = cross_pos[1]
    cross_pose[2][3] = cross_pos[2]
    print("cross_pose:")
    print(cross_pose)
    cross_pose_worldview = Pose.from_transformation_matrix(matrix=cross_pose, frame_id='world', normalize_rotation=False)
    self.addOrUpdatePose("cross_pose", "/Calibration", cross_pose_worldview)

    hand_tooltip = np.matmul(inv(Hg), cross_pose)

    print("Tooltip (grippertip) pose in flange coordinates (with same rotation as flange):")
    print(hand_tooltip)
    print("\n")
    np.save(os.path.join(self.storage_folder, 'tooltip_pose_in_flange_coordinates.npy'), hand_tooltip)

    print("To relocate the TCP in the 3D View, add a \'tcp_link\' to the file \'robotModel/main.xacro\' of your project folder.")
    print("As \'origin xyz\' of your tcp_link choose the translation vector of your calculated tooltip pose in flange coordinates.")
    print("More precisely, you have to add the following lines inside the <robot> </robot> environment: \n")
    print('<link name=\"tcp_link\" />')
    print('<joint name=\"tcp_joint_F\" type=\"fixed\">')
    print('  <child link=\"tcp_link\" />')
    print('  <parent link=\"e.g. arm_left_link_tool0\" />   <- adapt this to your endeffector link name')
    print('  <origin xyz=\"{:f} {:f} {:f}\" rpy=\"0 0 0\" />'.format(hand_tooltip[0][3], hand_tooltip[1][3], hand_tooltip[2][3]))
    print('</joint>')
    print("\n")
    print("Now, compile the new main.xacro.")
    print("Then, in the \'Configuration View\' once press compile to make the \'tcp_link\' selectable.")
    print("In the \'Configuration View\' under \'MotionPlanning\'->\'MoveIt!\'->\'groups\'->\'<group_name>\' select the new \'tcp_link\' as \'Tip link\'.")
    print("Moreover, under \'MotionPlanning\'->\'MoveIt!\'->\'endEffectors\'->\'<end effector name>\' select the new \'tcp_link\' as \'Parent link\'.")
    print("After compiling the new robot configuration, starting ROS, changing into the 'World View' and choosing the corresponding move group and end effector, the interactive marker appears at the new tcp link.")

    return True
    
コード例 #5
0
 def test_rotate(self):
     m = self.m3.copy()
     m[0:3, 0:3] = np.eye(3)
     p = Pose.from_transformation_matrix(m)
     r_pose = p.rotate(self.m3[0:3, 0:3])
     assert r_pose.transformation_matrix() == pytest.approx(self.m3)