Example #1
0
def abb_irb6640_180_255_robot():
    """Return a Robot instance for the ABB IRB6640 180-255 robot"""

    x = np.array([1, 0, 0])
    y = np.array([0, 1, 0])
    z = np.array([0, 0, 1])
    a = np.array([0, 0, 0])

    H = np.array([z, y, y, x, y, x]).T
    P = np.array(
        [0.78 * z, 0.32 * x, 1.075 * z, 0.2 * z, 1.1425 * x, 0.2 * x, a]).T
    joint_type = [0, 0, 0, 0, 0, 0]
    joint_min = np.deg2rad(np.array([-170, -65, -180, -300, -120, -360]))
    joint_max = np.deg2rad(np.array([170, 85, 70, 300, 120, 360]))

    p_tool = np.array([0, 0, 0])
    R_tool = rox.rot([0, 1, 0], np.pi / 2.0)

    return rox.Robot(H,
                     P,
                     joint_type,
                     joint_min,
                     joint_max,
                     R_tool=R_tool,
                     p_tool=p_tool)
Example #2
0
    def test_attached_collision_object(self):

        expected_scene_1 = rospy.wait_for_message("/expected_planning_scene_1",
                                                  PlanningScene,
                                                  timeout=10)
        scene_1 = rospy.wait_for_message('/planning_scene',
                                         PlanningScene,
                                         timeout=10)
        self._assert_planning_scene_equal(expected_scene_1, scene_1)

        rospy.sleep(rospy.Duration(1))

        update_pose_proxy = rospy.ServiceProxy("update_payload_pose",
                                               UpdatePayloadPose)

        payload2_to_gripper_target_tf = self.tf_listener.lookupTransform(
            "payload2", "payload2_gripper_target", rospy.Time(0))

        req = UpdatePayloadPoseRequest()
        req.header.frame_id = "vacuum_gripper_tool"
        req.name = "payload2"
        req.pose = rox_msg.transform2pose_msg(
            payload2_to_gripper_target_tf.inv())
        req.confidence = 0.5
        res = update_pose_proxy(req)
        assert res.success

        expected_scene_2 = rospy.wait_for_message("/expected_planning_scene_2",
                                                  PlanningScene,
                                                  timeout=10)
        scene_2 = rospy.wait_for_message('/planning_scene',
                                         PlanningScene,
                                         timeout=10)
        self._assert_planning_scene_equal(expected_scene_2, scene_2)

        world_to_fixture2_payload2_target_tf = self.tf_listener.lookupTransform(
            "world", "fixture2_payload2_target", rospy.Time(0))

        #Add in an offset to represent a final placement error
        fixture2_payload2_target_to_payload2_tf = rox.Transform(
            rox.rot([0, 0, 1], np.deg2rad(5)), [0.1, 0, 0],
            "fixture2_payload2_target", "payload2")

        req2 = UpdatePayloadPoseRequest()
        req2.header.frame_id = "world"
        req2.name = "payload2"
        req2.pose = rox_msg.transform2pose_msg(
            world_to_fixture2_payload2_target_tf *
            fixture2_payload2_target_to_payload2_tf)
        res2 = update_pose_proxy(req2)
        assert res2.success

        expected_scene_3 = rospy.wait_for_message("/expected_planning_scene_3",
                                                  PlanningScene,
                                                  timeout=10)
        scene_3 = rospy.wait_for_message('/planning_scene',
                                         PlanningScene,
                                         timeout=10)
        self._assert_planning_scene_equal(expected_scene_3, scene_3)
def test_q2rot():
    rot_t=np.array([[-0.5057639,-0.1340537,0.8521928], \
                    [0.6456962,-0.7139224,0.2709081], \
                    [0.5720833,0.6872731,0.4476342]])

    q = np.array([0.2387194, 0.4360402, 0.2933459, 0.8165967])
    k, theta = rox.q2rot(q)
    np.testing.assert_allclose(rox.rot(k, theta), rot_t, atol=1e-6)
Example #4
0
    def runTest(self):

        #TODO: other joint types

        #Home configuration (See Page 2-2 of Puma 260 manual)
        puma = puma260b_robot()
        pose = rox.fwdkin(puma, np.zeros(6))
        np.testing.assert_allclose(pose.R, np.identity(3))
        np.testing.assert_allclose(pose.p,
                                   np.array([10, -4.9, 4.25]) * in_2_m,
                                   atol=1e-6)

        #Another right-angle configuration
        joints2 = np.array([180, -90, -90, 90, 90, 90]) * np.pi / 180.0
        pose2 = rox.fwdkin(puma, joints2)
        np.testing.assert_allclose(pose2.R,
                                   rox.rot([0, 0, 1], np.pi).dot(
                                       rox.rot([0, 1, 0], -np.pi / 2)),
                                   atol=1e-6)
        np.testing.assert_allclose(pose2.p,
                                   np.array([-0.75, 4.9, 31]) * in_2_m)

        #Random configuration
        joints3 = np.array([50, -105, 31, 4, 126, -184]) * np.pi / 180
        pose3 = rox.fwdkin(puma, joints3)

        pose3_R_t=np.array([[0.4274, 0.8069, -0.4076],\
                            [0.4455, -0.5804,-0.6817], \
                            [-0.7866, 0.1097, -0.6076]])
        pose3_p_t = [0.2236, 0.0693, 0.4265]

        np.testing.assert_allclose(pose3.R, pose3_R_t, atol=1e-4)
        np.testing.assert_allclose(pose3.p, pose3_p_t, atol=1e-4)

        puma_tool = puma260b_robot_tool()

        pose4 = rox.fwdkin(puma_tool, joints3)
        pose4_R_t=np.array([[0.4076, 0.8069, 0.4274],\
                            [0.6817, -0.5804,0.4455], \
                            [0.6076, 0.1097, -0.7866]])

        pose4_p_t = [0.2450, 0.0916, 0.3872]

        np.testing.assert_allclose(pose4.R, pose4_R_t, atol=1e-4)
        np.testing.assert_allclose(pose4.p, pose4_p_t, atol=1e-4)
async def async_jog_cartesian_gamepad(P_axis, R_axis):
    move_distance = 0.01  # meters
    rotate_angle = np.deg2rad(5)  # radians

    global d, num_joints, joint_lower_limits, joint_upper_limits, joint_vel_limits
    global pose  # Get the Current Pose of the robot

    global is_gamepadaxisactive
    global is_gamepadbuttondown
    # print_div("here 0<br>")
    if (is_gamepadaxisactive or is_gamepadbuttondown):
        # Update joint angles
        d_q = await update_joint_info()  # Joint angles in radian ndarray
        # UPdate the end effector pose info
        pose = await update_end_info()
        await update_state_flags()

        Rd = pose.R
        pd = pose.p

        # print_div("here 1<br>")

        if P_axis is not None:
            pd = pd + Rd.dot(move_distance * P_axis)
            # print_div("here 2<br>")

        if R_axis is not None:
            # R = rox.rot(np.array(([1.],[0.],[0.])), 0.261799)
            R = rox.rot(R_axis, rotate_angle)
            Rd = Rd.dot(R)  # Rotate
            # print_div("here 3<br>")

        try:
            # Update desired inverse kineamtics info
            joint_angles, converged = update_ik_info(Rd, pd)
            if not converged:
                print_div("Inverse Kinematics Algo. Could not Converge<br>")
                raise
            elif not (joint_angles < joint_upper_limits).all() or not (
                    joint_angles > joint_lower_limits).all():
                print_div("Specified joints are out of range<br>")
                raise
            else:
                await d.async_jog_joint(joint_angles, joint_vel_limits, False,
                                        True, None)
        except:
            print_div("Specified joints might be out of range<br>")

    global is_jogging
    is_jogging = False
    def compute_step_gripper_target_pose(self,
                                         img,
                                         Kp,
                                         no_z=False,
                                         z_offset=0):

        fixed_marker_transform, payload_marker_transform, error_transform = self.compute_error_transform(
            img)

        if no_z:
            error_transform.p[2] = 0
        else:
            error_transform.p[2] -= z_offset

        gripper_to_camera_tf = self.tf_listener.lookupTransform(
            "vacuum_gripper_tool", "gripper_camera_2", rospy.Time(0))

        world_to_vacuum_gripper_tool_tf = self.tf_listener.lookupTransform(
            "world", "vacuum_gripper_tool", rospy.Time(0))

        #Scale by Kp
        k, theta = rox.R2rot(error_transform.R)
        r = np.multiply(k * theta, Kp[0:3])
        r_norm = np.linalg.norm(r)
        if (r_norm < 1e-6):
            error_transform2_R = np.eye(3)
        else:
            error_transform2_R = rox.rot(r / r_norm, r_norm)
        error_transform2_p = np.multiply(error_transform.p, (Kp[3:6]))

        error_transform2 = rox.Transform(error_transform2_R,
                                         error_transform2_p)

        gripper_to_fixed_marker_tf = gripper_to_camera_tf * fixed_marker_transform
        gripper_to_desired_fixed_marker_tf = gripper_to_fixed_marker_tf * error_transform2

        #print gripper_to_fixed_marker_tf

        ret = world_to_vacuum_gripper_tool_tf * (
            gripper_to_desired_fixed_marker_tf *
            gripper_to_fixed_marker_tf.inv()).inv()

        #print world_to_vacuum_gripper_tool_tf
        #print ret

        #print error_transform

        return ret, error_transform
def _R2rot_test(k1, theta1):
    R = rox.rot(k1, theta1)
    k2, theta2 = rox.R2rot(R)
    if abs(theta1 - theta2) > (theta1 + theta2):
        k2 = -k2
        theta2 = -theta2

    np.testing.assert_allclose(theta1, theta2, atol=1e-6)
    if (abs(theta1) < 1e-9):
        return

    if ((np.abs(theta1) - np.pi) < 1e-9):
        if np.linalg.norm(k1 + k2) < 1e-6:
            np.testing.assert_allclose(k1, -k2, atol=1e-6)
            return
        np.testing.assert_allclose(k1, k2, atol=1e-6)
        return

    np.testing.assert_allclose(k1, k2, atol=1e-6)
Example #8
0
def compute_step_gripper_target_pose(img, fixed_marker, payload_marker, desired_transform, \
                 camera_info, aruco_dict, Kp, tf_listener):

    fixed_marker_transform, error_transform = compute_error_transform(img, fixed_marker, payload_marker, \
                                              desired_transform, camera_info, aruco_dict)

    gripper_to_camera_tf = tf_listener.lookupTransform("vacuum_gripper_tool",
                                                       "gripper_camera_2",
                                                       rospy.Time(0))

    world_to_vacuum_gripper_tool_tf = tf_listener.lookupTransform(
        "world", "vacuum_gripper_tool", rospy.Time(0))

    #Scale by Kp
    k, theta = rox.R2rot(error_transform.R)
    r = np.multiply(k * theta, Kp[0:3])
    r_norm = np.linalg.norm(r)
    if (r_norm < 1e-6):
        error_transform2_R = np.eye(3)
    else:
        error_transform2_R = rox.rot(r / r_norm, r_norm)
    error_transform2_p = np.multiply(error_transform.p, (Kp[3:6]))

    error_transform2 = rox.Transform(error_transform2_R, error_transform2_p)

    gripper_to_fixed_marker_tf = gripper_to_camera_tf * fixed_marker_transform
    gripper_to_desired_fixed_marker_tf = gripper_to_fixed_marker_tf * error_transform2

    #print gripper_to_fixed_marker_tf

    ret = world_to_vacuum_gripper_tool_tf * (
        gripper_to_desired_fixed_marker_tf *
        gripper_to_fixed_marker_tf.inv()).inv()

    #print world_to_vacuum_gripper_tool_tf
    #print ret

    print error_transform

    return ret
Example #9
0
def puma260b_robot_tool():

    robot = puma260b_robot()
    robot.R_tool = rox.rot([0, 1, 0], np.pi / 2.0)
    robot.p_tool = [0.05, 0, 0]
    return robot
Example #10
0
 def _rot_test(self, k, theta, rot_t):
     rot = rox.rot(k, theta)
     np.testing.assert_allclose(rot, rot_t, atol=1e-5)
Example #11
0
    def runTest(self):
        x = [1, 0, 0]
        y = [0, 1, 0]
        z = [0, 0, 1]

        #subproblem0
        assert (rox.subproblem0(x, y, z) == np.pi / 2)

        #subproblem1
        k1 = (np.add(x, z)) / np.linalg.norm(np.add(x, z))
        k2 = (np.add(y, z)) / np.linalg.norm(np.add(y, z))

        assert (rox.subproblem1(k1, k2, z) == np.pi / 2)

        #subproblem2
        p2 = x
        q2 = np.add(x, np.add(y, z))
        q2 = q2 / np.linalg.norm(q2)

        a2 = rox.subproblem2(p2, q2, z, y)
        assert len(a2) == 2

        r1 = rox.rot(z, a2[0][0]).dot(rox.rot(y, a2[0][1]))[:, 0]
        r2 = rox.rot(z, a2[1][0]).dot(rox.rot(y, a2[1][1]))[:, 0]

        np.testing.assert_allclose(r1, q2, atol=1e-4)
        np.testing.assert_allclose(r2, q2, atol=1e-4)

        a3 = rox.subproblem2(x, z, z, y)
        assert len(a3) == 1

        r3 = rox.rot(z, a3[0][0]).dot(rox.rot(y, a3[0][1]))[:, 0]
        np.testing.assert_allclose(r3, z, atol=1e-4)

        #subproblem3
        p4 = [.5, 0, 0]
        q4 = [0, .75, 0]

        a4 = rox.subproblem3(p4, q4, z, .5)
        a5 = rox.subproblem3(p4, q4, z, 1.25)

        assert len(a4) == 2
        np.testing.assert_allclose(
            np.linalg.norm(np.add(q4,
                                  rox.rot(z, a4[0]).dot(p4))), 0.5)
        np.testing.assert_allclose(
            np.linalg.norm(np.add(q4,
                                  rox.rot(z, a4[1]).dot(p4))), 0.5)

        assert len(a5) == 1
        np.testing.assert_allclose(
            np.linalg.norm(np.add(q4,
                                  rox.rot(z, a5[0]).dot(p4))), 1.25)

        #subproblem4

        p6 = y
        q6 = [.8, .2, .5]
        d6 = .3

        a6 = rox.subproblem4(p6, q6, z, d6)

        np.testing.assert_allclose(np.dot(p6,
                                          rox.rot(z, a6[0]).dot(q6)),
                                   d6,
                                   atol=1e-4)
        np.testing.assert_allclose(np.dot(p6,
                                          rox.rot(z, a6[1]).dot(q6)),
                                   d6,
                                   atol=1e-4)
Example #12
0
    def joystick_state_cb(self, joy_state):
        with self._lock:

            group = self.jog_joints_joystick_group
            frame = self.jog_cartesian_joystick_frame
            if group < 0 and frame is None:
                return

            # Rate limit command sends
            now = time.time()

            have_command = False
            if now - self.jog_joints_joystick_last_enable_time > 0.2:
                self.jog_joints_joystick_group = -1
            else:
                have_command = True
            if now - self.jog_cartesian_joystick_last_enable_time > 0.2:
                self.jog_cartesian_joystick_frame = None
            else:
                have_command = True

            if not have_command:
                return

            if now - self.joystick_last_command_time < 0.05:
                return
            self.joystick_last_command_time = now
        try:
            joy_vals = joy_state.axes / 32767.0

            for i in range(len(joy_vals)):
                if joy_vals[i] > 0:
                    if joy_vals[i] < self.joystick_deadzone:
                        joy_vals[i] = 0
                    else:
                        joy_vals[i] = (joy_vals[i] - self.joystick_deadzone
                                       ) * (1 - self.joystick_deadzone)
                if joy_vals[i] < 0:
                    if -joy_vals[i] < self.joystick_deadzone:
                        joy_vals[i] = 0
                    else:
                        joy_vals[i] = (joy_vals[i] + self.joystick_deadzone
                                       ) * (1 - self.joystick_deadzone)

            if group >= 0:
                jog_command = np.zeros((self.num_joints, ), dtype=np.float64)

                if self.num_joints == 6:
                    if group == 0:
                        jog_command[0:3] = joy_vals[0:3]
                    else:
                        jog_command[3:6] = joy_vals[0:3]
                elif self.num_joints == 7:
                    if group == 0:
                        jog_command[0:4] = joy_vals[0:4]
                    else:
                        jog_command[4:7] = joy_vals[0:3]
                else:
                    return

                jog_command = 0.01 * self.jog_joints_joystick_speed_perc * np.multiply(
                    jog_command, self.joint_vel_limits) * 0.25
                self.jog_joints_with_limits2(jog_command, 0.2, False)
            elif frame is not None:
                if frame == "robot":
                    R_axis = joy_vals[3:6] * np.deg2rad(45)
                    P_axis = joy_vals[0:3] * 0.254

                    R_spacemouse = rox.rot([1, 0, 0], np.pi)
                    R_axis = R_spacemouse @ R_axis
                    P_axis = R_spacemouse @ P_axis

                    #TODO: Rotate frame about X 180 degrees
                    qdot = self.update_qdot2(
                        R_axis, P_axis, self.jog_cartesian_joystick_speed_perc)
                    self.robot.jog_joint(qdot, 0.2, False)
        except:
            traceback.print_exc()
Example #13
0
    def _do_grab_place_object_planar(self, robot_local_device_name,
                                     tool_local_device_name,
                                     robot_origin_calib_global_name,
                                     reference_pose_global_name, object_x,
                                     object_y, object_theta, z_offset_before,
                                     z_offset_grab, speed_perc, grab):

        var_storage = self.device_manager.get_device_client(
            "variable_storage", 0.1)

        robot = self.device_manager.get_device_client(robot_local_device_name)
        tool = self.device_manager.get_device_client(tool_local_device_name)

        reference_pose = var_storage.getf_variable_value(
            "globals", reference_pose_global_name)
        robot_origin_calib = var_storage.getf_variable_value(
            "globals", robot_origin_calib_global_name)

        T_robot = self._geom_util.named_pose_to_rox_transform(
            robot_origin_calib.data.pose)

        robot_info = robot.robot_info
        rox_robot = self._robot_util.robot_info_to_rox_robot(robot_info, 0)

        # The object's pose in world coordinates
        T_object = rox.Transform(rox.rot([0., 0., 1.], object_theta),
                                 [object_x, object_y, 0.])

        # The robot's reference pose in its own frame
        T_reference_pose_r = rox.fwdkin(rox_robot,
                                        np.deg2rad(reference_pose.data))

        # The robot's reference pose in world coordinates
        T_reference_pose = T_robot * T_reference_pose_r

        # The nominal grab pose copy
        T_grab_pose_n = copy.deepcopy(T_reference_pose)

        # Translate the reference pose in x and y to the nominal grab point
        T_grab_pose_n.p[0] = T_object.p[0]
        T_grab_pose_n.p[1] = T_object.p[1]

        # Rotate the reference pose to align with the object
        T_grab_pose_n.R = T_object.R @ T_grab_pose_n.R

        # Before pose
        T_grab_pose_before = copy.deepcopy(T_grab_pose_n)
        T_grab_pose_before.p[2] += z_offset_before
        # Transform to robot frame
        T_grab_pose_before_r = T_robot.inv() * T_grab_pose_before

        # Grab pose
        T_grab_pose = copy.deepcopy(T_grab_pose_n)
        T_grab_pose.p[2] += z_offset_grab
        # Transform to robot frame
        T_grab_pose_r = T_robot.inv() * T_grab_pose

        robot_state, _ = robot.robot_state.PeekInValue()
        q_current = robot_state.joint_position

        ## Compute inverse kinematics
        # q_grab_before, res = invkin.update_ik_info3(rox_robot, T_grab_pose_before_r, q_current)
        # assert res, "Invalid setpoint: invkin did not converge"
        # q_grab, res = invkin.update_ik_info3(rox_robot, T_grab_pose_r, q_current)
        # assert res, "Invalid setpoint: invkin did not converge"

        # print(f"q_grab_before: {q_grab_before}")
        # print(f"q_grab: {q_grab}")
        # print()

        final_seed = np.deg2rad(reference_pose.data)
        traj_before = self._generate_movel_trajectory_tool_j_range(
            robot, rox_robot, q_current, T_grab_pose_before_r, speed_perc,
            final_seed)

        q_init_grab = traj_before.waypoints[-1].joint_position
        traj_grab = self._generate_movel_trajectory_tool_j_range(
            robot, rox_robot, q_init_grab, T_grab_pose_r, speed_perc,
            q_init_grab)

        q_init_after = traj_grab.waypoints[-1].joint_position
        traj_after = self._generate_movel_trajectory_tool_j_range(
            robot, rox_robot, q_init_after, T_grab_pose_before_r, speed_perc,
            q_init_grab)

        gen = PickPlaceMotionGenerator(robot, rox_robot, tool, traj_before,
                                       traj_grab, traj_after, grab, self._node)

        # robot.jog_freespace(q_grab_before,max_velocity,True)
        # time.sleep(0.5)
        # robot.jog_freespace(q_grab,max_velocity*.25,True)
        # time.sleep(0.5)
        # robot.jog_freespace(q_grab_before,max_velocity*.25,True)
        return gen
Example #14
0
    def _do_aruco_detection(self, img, intrinsic_global_name,
                            extrinsic_global_name, aruco_dict_str, aruco_id,
                            aruco_markersize, roi):

        intrinsic_calib = None
        extrinsic_calib = None

        if intrinsic_global_name is not None and extrinsic_global_name is not None:
            var_storage = self.device_manager.get_device_client(
                "variable_storage", 0.1)
            intrinsic_calib = var_storage.getf_variable_value(
                "globals", intrinsic_global_name).data
            extrinsic_calib = var_storage.getf_variable_value(
                "globals", extrinsic_global_name).data

        display_img = img.copy()

        assert aruco_dict_str.startswith(
            "DICT_"), "Invalid aruco dictionary name"

        aruco_dict_i = getattr(aruco,
                               aruco_dict_str)  # convert string to python
        aruco_dict = cv2.aruco.Dictionary_get(aruco_dict_i)
        aruco_params = cv2.aruco.DetectorParameters_create()
        aruco_params.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX

        corners1, ids1, rejected = cv2.aruco.detectMarkers(
            img, aruco_dict, parameters=aruco_params)

        if corners1 is None:
            corners1 = []
        if ids1 is None:
            ids1 = []

        if aruco_id < 0:
            corners2 = corners1
            ids2 = ids1
        else:
            corners2 = []
            ids2 = []
            for id2, corner2 in zip(ids1, corners1):
                if id2 == aruco_id:
                    corners2.append(corner2)
                    ids2.append(id2)
            ids2 = np.array(ids2)

        if roi is None:
            corners = corners2
            ids = ids2
        else:
            roi_x = roi.center.pose[0]["position"]["x"]
            roi_y = roi.center.pose[0]["position"]["y"]
            roi_theta = roi.center.pose[0]["orientation"]
            roi_w = roi.size[0]["width"]
            roi_h = roi.size[0]["height"]
            geom_roi1 = shapely.geometry.box(-roi_w / 2.,
                                             -roi_h / 2.,
                                             roi_w / 2.,
                                             roi_h / 2.,
                                             ccw=True)
            geom_roi2 = shapely.affinity.translate(geom_roi1,
                                                   xoff=roi_x,
                                                   yoff=roi_y)
            geom_roi = shapely.affinity.rotate(geom_roi2,
                                               roi_theta,
                                               origin='centroid',
                                               use_radians=True)

            corners = []
            ids = []

            for id3, corner3 in zip(ids2, corners2):
                centroid = shapely.geometry.Polygon([
                    shapely.geometry.Point(corner3[0, i, 0], corner3[0, i, 1])
                    for i in range(4)
                ])
                if geom_roi.contains(centroid):
                    corners.append(corner3)
                    ids.append(id3)
            ids = np.array(ids)

            roi_outline = np.array([geom_roi.exterior.coords], dtype=np.int32)
            display_img = cv2.polylines(display_img,
                                        roi_outline,
                                        True,
                                        color=(255, 255, 0))

        if len(ids) > 0:
            display_img = aruco.drawDetectedMarkers(display_img, corners, ids)

        poses = None
        if intrinsic_calib is not None and extrinsic_calib is not None:
            poses = []
            mtx = intrinsic_calib.K
            d = intrinsic_calib.distortion_info.data
            dist = np.array([d.k1, d.k2, d.p1, d.p2, d.k3])

            T_cam = self._geom_util.named_pose_to_rox_transform(
                extrinsic_calib.pose)

            for id4, corner4 in zip(ids, corners):
                rvec, tvec, markerPoints = cv2.aruco.estimatePoseSingleMarkers(
                    corner4, aruco_markersize, mtx, dist)

                display_img = cv2.aruco.drawAxis(display_img, mtx, dist, rvec,
                                                 tvec, 0.05)

                # R_marker1 = cv2.Rodrigues(rvec.flatten())[0]
                # TODO: 3D pose estimation from rvec is very innaccurate. Use a simple trigonometry
                # to estimate the Z rotation of the tag

                # compute vectors from opposite corners
                v1 = corner4[0, 2, :] - corner4[0, 0, :]
                v2 = corner4[0, 3, :] - corner4[0, 1, :]

                # Use atan2 on each vector and average
                theta1 = (np.arctan2(v1[1], v1[0]) - np.deg2rad(45)) % (2. *
                                                                        np.pi)
                theta2 = (np.arctan2(v2[1], v2[0]) - np.deg2rad(135)) % (2. *
                                                                         np.pi)

                theta = (theta1 + theta2) / 2.

                R_marker1 = rox.rot([1., 0., 0.], np.pi) @ rox.rot(
                    [0., 0., -1.], theta)

                p_marker1 = tvec.flatten()

                T_marker1 = rox.Transform(R_marker1, p_marker1, "camera",
                                          f"aruco{int(id4)}")
                T_marker = T_cam * T_marker1
                rr_marker_pose = self._geom_util.rox_transform_to_named_pose(
                    T_marker)
                poses.append(rr_marker_pose)

        ret_markers = []
        if poses is None:
            poses = [None] * len(ids)
        for id_, corner, pose in zip(ids, corners, poses):
            m = self._detected_marker()
            m.marker_id = int(id_)
            m.marker_corners = np.zeros((4, ), dtype=self._point2d_dtype)
            for i in range(4):
                m.marker_corners[i] = self._geom_util.xy_to_point2d(
                    corner[0, i, :])

            m.marker_pose = pose

            ret_markers.append(m)

        ret = self._aruco_detection_result()
        ret.detected_markers = ret_markers
        ret.display_image = self._image_util.array_to_compressed_image_jpg(
            display_img, 70)
        return ret
Example #15
0
def _rpy_to_rot(rpy):
    return rox.rot([0,0,1],rpy[2]).dot(rox.rot([0,1,0],rpy[1]))\
        .dot(rox.rot([1,0,0],rpy[0]))
def robot6_sphericalwrist_invkin(robot, desired_pose, last_joints=None):
    """
    Inverse kinematics for six axis articulated industrial robots
    with sherical wrists. Examples include Puma 260, ABB IRB6640, 
    Staubli TX40, etc. Note that this is not for Universal Robot 
    wrist configurations.
    
    :type    robot: general_robotics_toolbox.Robot
    :param   robot: The robot object representing the geometry of the robot
    :type    desired_pose: general_robotics_toolbox.Transform
    :param   desired_pose: The desired pose of the robot
    :type    last_joints: list, tuple, or numpy.array
    :param   last_joints: The joints of the robot at the last timestep. The returned 
             first returned joint configuration will be the closests to last_joints. Optional
    :rtype:  list of numpy.array
    :return: A list of zero or more joint angles that match the desired pose. An
             empty list means that the desired pose cannot be reached. If last_joints
             is specified, the first entry is the closest configuration to last_joints.    
    """

    R06 = desired_pose.R
    p0T = desired_pose.p

    if robot.R_tool is not None and robot.p_tool is not None:
        R06 = R06.dot(np.transpose(robot.R_tool))
        p0T = p0T - R06.dot(robot.p_tool)

    H = robot.H
    P = robot.P

    theta_v = []

    #Correct for spherical joint position vectors
    if not np.all(P[:, 4] == 0):
        P4_d = P[:, 4].dot(H[:, 3])
        assert np.all(P[:, 4] - P4_d * H[:, 3] == 0)
        P[:, 3] += P[:, 4]
        P[:, 4] = np.zeros(3)

    if not np.all(P[:, 5] == 0):
        P5_d = P[:, 5].dot(H[:, 5])
        # assert np.all(P[:,5] - P5_d*H[:,5] == 0)
        P[:, 6] += P[:, 5]
        P[:, 5] = np.zeros(3)

    d1 = np.dot(ey, P[:, 1] + P[:, 2] + P[:, 3])
    v1 = p0T - R06.dot(P[:, 6])
    p1 = ey

    Q1 = rox.subproblem4(p1, v1, -H[:, 0], d1)

    normalize = normalize_joints(robot, last_joints)

    for q1 in normalize(0, Q1):

        R01 = rox.rot(H[:, 0], q1)

        p26_q1 = R01.T.dot(p0T - R06.dot(P[:, 6])) - (P[:, 0] + P[:, 1])

        d3 = np.linalg.norm(p26_q1)
        v3 = P[:, 2]
        p3 = P[:, 3]
        Q3 = rox.subproblem3(p3, v3, H[:, 2], d3)

        for q3 in normalize(2, Q3):

            R23 = rox.rot(H[:, 2], q3)

            v2 = p26_q1
            p2 = P[:, 2] + R23.dot(P[:, 3])
            q2 = rox.subproblem1(p2, v2, H[:, 1])

            q2 = normalize(1, [q2])
            if len(q2) == 0:
                continue
            q2 = q2[0]

            R12 = rox.rot(H[:, 1], q2)

            R03 = R01.dot(R12).dot(R23)

            R36 = R03.T.dot(R06)

            v4 = R36.dot(H[:, 5])
            p4 = H[:, 5]

            Q4_Q5 = rox.subproblem2(p4, v4, H[:, 3], H[:, 4])

            for q4, q5 in normalize((3, 4), Q4_Q5):

                R35 = rox.rot(H[:, 3], q4).dot(rox.rot(H[:, 4], q5))
                R05 = R03.dot(R35)
                R56 = R05.T.dot(R06)

                p6 = H[:, 4]
                v6 = R56.dot(H[:, 4])

                q6 = rox.subproblem1(p6, v6, H[:, 5])

                q6 = normalize(5, [q6])
                if len(q6) == 0:
                    continue
                q6 = q6[0]

                theta_v.append(np.array([q1, q2, q3, q4, q5, q6]))
    if last_joints is not None:
        theta_dist = np.linalg.norm(np.subtract(theta_v, last_joints), axis=1)
        return [theta_v[i] for i in list(np.argsort(theta_dist))]
    else:
        return theta_v
Example #17
0
    def _generate_movel_trajectory(self, robot_client, rox_robot,
                                   initial_q_or_T, T_final, speed_perc,
                                   q_final_seed):

        JointTrajectoryWaypoint = RRN.GetStructureType(
            "com.robotraconteur.robotics.trajectory.JointTrajectoryWaypoint",
            robot_client)
        JointTrajectory = RRN.GetStructureType(
            "com.robotraconteur.robotics.trajectory.JointTrajectory",
            robot_client)

        dof = len(rox_robot.joint_type)

        if isinstance(initial_q_or_T, rox.Robot):
            T_initial = initial_q_or_T
            q_initial = invkin.update_ik_info3(rox_robot, initial_q_or_T,
                                               np.random.randn((dof, )))
        else:
            q_initial = initial_q_or_T
            T_initial = rox.fwdkin(rox_robot, q_initial)

        p_diff = T_final.p - T_initial.p
        R_diff = np.transpose(T_initial.R) @ T_final.R
        k, theta = rox.R2rot(R_diff)

        N_samples_translate = np.linalg.norm(p_diff) * 100.0
        N_samples_rot = np.abs(theta) * 0.25 * 180 / np.pi

        N_samples_translate = np.linalg.norm(p_diff) * 100.0
        N_samples_rot = np.abs(theta) * 0.2 * 180 / np.pi

        N_samples = math.ceil(np.max((N_samples_translate, N_samples_rot, 20)))

        ss = np.linspace(0, 1, N_samples)

        if q_final_seed is None:
            q_final, res = invkin.update_ik_info3(rox_robot, T_final,
                                                  q_initial)
        else:
            q_final, res = invkin.update_ik_info3(rox_robot, T_final,
                                                  q_final_seed)
        #assert res, "Inverse kinematics failed"

        way_pts = np.zeros((N_samples, dof), dtype=np.float64)
        way_pts[0, :] = q_initial
        for i in range(1, N_samples):
            s = float(i) / (N_samples - 1)
            R_i = T_initial.R @ rox.rot(k, theta * s)
            p_i = (p_diff * s) + T_initial.p
            T_i = rox.Transform(R_i, p_i)
            #try:
            #    q, res = invkin.update_ik_info3(rox_robot, T_i, way_pts[i-1,:])
            #except AssertionError:
            ik_seed = (1.0 - s) * q_initial + s * q_final
            q, res = invkin.update_ik_info3(rox_robot, T_i, ik_seed)
            assert res, "Inverse kinematics failed"
            way_pts[i, :] = q

        vlims = rox_robot.joint_vel_limit
        alims = rox_robot.joint_acc_limit

        path = ta.SplineInterpolator(ss, way_pts)
        pc_vel = constraint.JointVelocityConstraint(vlims * 0.95 * speed_perc /
                                                    100.0)
        pc_acc = constraint.JointAccelerationConstraint(alims)

        instance = algo.TOPPRA([pc_vel, pc_acc],
                               path,
                               parametrizer="ParametrizeConstAccel")
        jnt_traj = instance.compute_trajectory()

        if jnt_traj is None:
            return None

        ts_sample = np.linspace(0, jnt_traj.duration, N_samples)
        qs_sample = jnt_traj(ts_sample)

        waypoints = []

        for i in range(N_samples):
            wp = JointTrajectoryWaypoint()
            wp.joint_position = qs_sample[i, :]
            wp.time_from_start = ts_sample[i]
            waypoints.append(wp)

        traj = JointTrajectory()
        traj.joint_names = rox_robot.joint_names
        traj.waypoints = waypoints

        return traj