def test_geometry_util_transform_types():
    node = RR.RobotRaconteurNode()
    node.SetLogLevelFromString("DEBUG")
    node.Init()

    try:
        RRC.RegisterStdRobDefServiceTypes(node)
        geom_util = GeometryUtil(node)

        _do_transform_test(geom_util.rox_transform_to_transform,
                           geom_util.transform_to_rox_transform, "Transform",
                           node)
        _do_transform_test(geom_util.rox_transform_to_pose,
                           geom_util.pose_to_rox_transform, "Pose", node)
        _do_named_transform_test(geom_util.rox_transform_to_named_transform,
                                 geom_util.named_transform_to_rox_transform,
                                 "NamedTransform", node)
        _do_named_transform_test(geom_util.rox_transform_to_named_pose,
                                 geom_util.named_pose_to_rox_transform,
                                 "NamedPose", node)

        _do_transform_test_xyz_rpy(geom_util.xyz_rpy_to_transform,
                                   geom_util.transform_to_xyz_rpy, "Transform",
                                   node)
        _do_transform_test_xyz_rpy(geom_util.xyz_rpy_to_pose,
                                   geom_util.pose_to_xyz_rpy, "Pose", node)
        _do_named_transform_test_xyz_rpy(geom_util.xyz_rpy_to_named_transform,
                                         geom_util.named_transform_to_xyz_rpy,
                                         "NamedTransform", node)
        _do_named_transform_test_xyz_rpy(geom_util.xyz_rpy_to_named_pose,
                                         geom_util.named_pose_to_xyz_rpy,
                                         "NamedPose", node)

    finally:
        node.Shutdown()
示例#2
0
    def handle_create(self, *args):
        try:
            robot_local_device_name = self.vue["$data"].robot_selected
            intrinsic_calib = self.vue["$data"].camera_intrinsic_selected
            extrinsic_calib = self.vue["$data"].camera_extrinsic_selected
            image_sequence_global_name = self.vue[
                "$data"].image_sequence_selected
            aruco_dict = self.vue["$data"].aruco_dict_selected
            aruco_tag_id = int(self.vue["$data"].aruco_tag_id)
            aruco_tag_size = float(self.vue["$data"].aruco_tag_size)
            xyz = np.zeros((3, ), dtype=np.float64)
            rpy = np.zeros((3, ), dtype=np.float64)
            xyz[0] = float(self.vue["$data"].marker_pose_x)
            xyz[1] = float(self.vue["$data"].marker_pose_y)
            xyz[2] = float(self.vue["$data"].marker_pose_z)
            rpy[0] = float(self.vue["$data"].marker_pose_r_r)
            rpy[1] = float(self.vue["$data"].marker_pose_r_p)
            rpy[2] = float(self.vue["$data"].marker_pose_r_y)

            rpy = np.deg2rad(rpy)

            robot_calib = self.core.device_manager.get_device_subscription(
                "vision_robot_calibration").GetDefaultClient()
            geom_util = GeometryUtil(client_obj=robot_calib)
            marker_pose = geom_util.xyz_rpy_to_pose(xyz, rpy)

            self.core.create_task(do_calibration(robot_local_device_name,intrinsic_calib,extrinsic_calib,\
                image_sequence_global_name,aruco_dict,aruco_tag_id, aruco_tag_size, marker_pose, self.new_name,self.core))
        except:
            traceback.print_exc()
示例#3
0
    def __init__(self,
                 device_manager,
                 device_info=None,
                 node: RR.RobotRaconteurNode = None):
        if node is None:
            self._node = RR.RobotRaconteurNode.s
        else:
            self._node = node
        self.device_info = device_info

        self.service_path = None
        self.ctx = None

        self._detected_marker = self._node.GetStructureType(
            "tech.pyri.vision.aruco_detection.DetectedMarker")
        self._aruco_detection_result = self._node.GetStructureType(
            "tech.pyri.vision.aruco_detection.ArucoDetectionResult")
        self._pose2d_dtype = self._node.GetNamedArrayDType(
            "com.robotraconteur.geometry.Pose2D")
        self._point2d_dtype = self._node.GetNamedArrayDType(
            "com.robotraconteur.geometry.Point2D")

        self._image_util = ImageUtil(node=self._node)
        self._geom_util = GeometryUtil(node=self._node)

        self.device_manager = device_manager
        self.device_manager.connect_device_type(
            "tech.pyri.variable_storage.VariableStorage")
        self.device_manager.connect_device_type(
            "com.robotraconteur.imaging.Camera")
        self.device_manager.device_added += self._device_added
        self.device_manager.device_removed += self._device_removed
        self.device_manager.refresh_devices(5)
示例#4
0
def _calibrate_camera_intrinsic(images, calibration_target):
    ret, mtx, dist, rvecs, tvecs, mean_error, imgs = _calibrate_camera_intrinsic2(
        images, calibration_target)
    if not ret:
        raise RR.OperationFailedException(
            "Camera intrinsic calibration failed")

    geom_util = GeometryUtil()

    calib = RRN.NewStructure(
        "com.robotraconteur.imaging.camerainfo.CameraCalibration")
    calib.image_size = geom_util.wh_to_size2d(
        [images[0].image_info.width, images[0].image_info.height],
        dtype=np.int32)

    calib.K = mtx

    dist_rr = RRN.NewStructure(
        "com.robotraconteur.imaging.camerainfo.PlumbBobDistortionInfo")
    dist_rr.k1 = dist[0]
    dist_rr.k2 = dist[1]
    dist_rr.p1 = dist[2]
    dist_rr.p2 = dist[3]
    dist_rr.k3 = dist[4]

    calib.distortion_info = RR.VarValue(
        dist_rr,
        "com.robotraconteur.imaging.camerainfo.PlumbBobDistortionInfo")

    imgs2 = []
    for img in imgs:
        imgs2.append(_cv_img_to_rr_display_img(img))

    return calib, imgs2, mean_error
示例#5
0
    def __init__(self, device_manager, device_info = None, node: RR.RobotRaconteurNode = None):
        if node is None:
            self._node = RR.RobotRaconteurNode.s
        else:
            self._node = node
        self.device_info = device_info
                
        self.service_path = None
        self.ctx = None

        self._matched_template_2d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.MatchedTemplate2D")
        self._template_matching_result_2d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.TemplateMatchingResult2D")
        self._template_matching_result_3d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.TemplateMatchingResult3D")
        self._matched_template_3d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.MatchedTemplate3D")
        self._named_pose_with_covariance_type = self._node.GetStructureType("com.robotraconteur.geometry.NamedPoseWithCovariance")
        self._pose2d_dtype = self._node.GetNamedArrayDType("com.robotraconteur.geometry.Pose2D")

        self._image_util = ImageUtil(node=self._node)
        self._geom_util = GeometryUtil(node=self._node)

        self.device_manager = device_manager
        self.device_manager.connect_device_type("tech.pyri.variable_storage.VariableStorage")
        self.device_manager.connect_device_type("com.robotraconteur.imaging.Camera")
        self.device_manager.device_added += self._device_added
        self.device_manager.device_removed += self._device_removed
        self.device_manager.refresh_devices(5)
示例#6
0
    def __init__(self,
                 device_manager,
                 device_info=None,
                 node: RR.RobotRaconteurNode = None):
        if node is None:
            self._node = RR.RobotRaconteurNode.s
        else:
            self._node = node
        self.device_info = device_info

        self.service_path = None
        self.ctx = None

        self._pose2d_dtype = self._node.GetNamedArrayDType(
            "com.robotraconteur.geometry.Pose2D")
        self._point2d_dtype = self._node.GetNamedArrayDType(
            "com.robotraconteur.geometry.Point2D")

        self._geom_util = GeometryUtil(node=self._node)
        self._robot_util = RobotUtil(node=self._node)

        self.device_manager = device_manager
        self.device_manager.connect_device_type(
            "com.robotraconteur.robotics.robot.Robot")
        self.device_manager.connect_device_type(
            "com.robotraconteur.robotics.tool.Tool")
        self.device_manager.connect_device_type(
            "tech.pyri.variable_storage.VariableStorage")
        self.device_manager.device_added += self._device_added
        self.device_manager.device_removed += self._device_removed
        self.device_manager.refresh_devices(5)
def geometry_pose_component_get(pose, component_name):
    """
    Get an XYZ-RPY component of a pose.

    Parameters:

    * pose (Pose): The pose
    * component_name (str): The component to get. May be `X`, `Y`, `Z`, `R_R`, `R_P`, or `R_Y`

    Return (float): The pose value
    """

    geom_util = GeometryUtil(node = PyriSandboxContext.node)
    xyz,rpy = geom_util.pose_to_xyz_rpy(_convert_to_pose(pose))
    rpy = np.rad2deg(rpy)

    if component_name == "X":
        return float(xyz[0])
    if component_name == "Y":
        return float(xyz[1])
    if component_name == "Z":
        return float(xyz[2])
    if component_name == "R_R":
        return float(rpy[0])
    if component_name == "R_P":
        return float(rpy[1])
    if component_name == "R_Y":
        return float(rpy[2])
    assert False, "Invalid pose component"
def geometry_pose_component_set(pose, component_name, value):
    """
    Set an XYZ-RPY component of a pose. This function does not modify in place.
    It returns a new pose.

    Parameters:

    * pose (Pose): The pose
    * component_name (str): The component to get. May be `X`, `Y`, `Z`, `R_R`, `R_P`, or `R_Y`
    * value (float): The new pose component value in meters or degrees

    Return (Pose): The new pose with updated value

    """
    geom_util = GeometryUtil(node = PyriSandboxContext.node)
    xyz,rpy = geom_util.pose_to_xyz_rpy(_convert_to_pose(pose))
    rpy = np.rad2deg(rpy)

    if component_name == "X":
        xyz[0] = value
    elif component_name == "Y":
        xyz[1] = value
    elif component_name == "Z":
        xyz[2] = value
    elif component_name == "R_R":
        rpy[0] = value
    elif component_name == "R_P":
        rpy[1] = value
    elif component_name == "R_Y":
        rpy[2] = value
    else:
        assert False, "Invalid pose component"

    rpy = np.deg2rad(rpy)
    return geom_util.xyz_rpy_to_pose(xyz,rpy)
示例#9
0
def _calibrate_camera_extrinsic(intrinsic_calib, image, board,
                                camera_local_device_name):

    # TODO: verify calibration data

    mtx = intrinsic_calib.K
    dist_rr = intrinsic_calib.distortion_info.data
    dist = np.array(
        [dist_rr.k1, dist_rr.k2, dist_rr.p1, dist_rr.p2, dist_rr.k3],
        dtype=np.float64)

    image_util = ImageUtil()
    frame = image_util.compressed_image_to_array(image)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    if board == "chessboard":
        width = 7
        height = 6
        square_size = 0.03
    else:
        raise RR.InvalidOperationException(
            f"Invalid calibration board {board}")

    ret, corners = cv2.findChessboardCorners(gray, (width, height), None)
    assert ret, "Could not find calibration target"

    objp = np.zeros((height * width, 3), np.float32)
    objp[:, :2] = np.mgrid[0:width, 0:height].T.reshape(-1, 2)

    objp = objp * square_size

    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)

    ret, rvecs, tvecs = cv2.solvePnP(objp, corners2, mtx, dist)

    R = cv2.Rodrigues(rvecs.flatten())[0]

    R_landmark = np.array([[0, 1, 0], [1, 0, 0], [0, 0, -1]], dtype=np.float64)

    R_cam1 = R.transpose()
    p_cam1 = -R.transpose() @ tvecs

    R_cam = R_landmark.transpose() @ R_cam1
    p_cam = R_landmark.transpose() @ p_cam1

    cv_image2 = cv2.aruco.drawAxis(frame, mtx, dist,
                                   cv2.Rodrigues(R_cam.transpose())[0],
                                   -R_cam.transpose() @ p_cam, 0.1)

    T = rox.Transform(R_cam, p_cam, "world", camera_local_device_name)

    geom_util = GeometryUtil()
    cov = np.eye(6) * 1e-5
    return geom_util.rox_transform_to_named_pose(
        T), cov, image_util.array_to_compressed_image_jpg(cv_image2), 0.0
def do_show_new_camera_calibration_extrinsic_dialog2(new_name: str, camera_pose, display_image, core: "PyriWebUIBrowser"):
    try:
        camera_calib = core.device_manager.get_device_subscription("vision_camera_calibration").GetDefaultClient()

        dialog2_html = importlib_resources.read_text(__package__,"new_calibrate_extrinsic_dialog2.html")

        el = js.document.createElement('div')
        el.id = "new_calibrate_extrinsic_dialog2_wrapper"
        js.document.getElementById("wrapper").appendChild(el)

        def handle_hidden(*args):
            try:
                el.parentElement.removeChild(el)
            except:
                traceback.print_exc()
        geom_util = GeometryUtil(client_obj=camera_calib)
        xyz, rpy1, _, _ = geom_util.named_pose_to_xyz_rpy(camera_pose.pose)
        rpy = np.rad2deg(rpy1)
        
        x = f"{xyz[0]:4e}"
        y = f"{xyz[1]:4e}"
        z = f"{xyz[2]:4e}"
        r_r = f"{rpy[0]:4e}"
        r_p = f"{rpy[1]:4e}"
        r_y = f"{rpy[2]:4e}"

        i=0
        
        d_encoded = str(base64.b64encode(display_image.data))[2:-1]
        disp_img_src = "data:image/jpeg;base64," + d_encoded
        # TODO: check for png?
        
        dialog = js.Vue.new(js.python_to_js({
            "el": "#new_calibrate_extrinsic_dialog2_wrapper",
            "template": dialog2_html,
            "data":
            {
                "x": x,
                "y": y,
                "z": z,
                "r_r": r_r,
                "r_p": r_p,
                "r_y": r_y,
                "disp_img": disp_img_src
            },
            "methods":
            {
                "handle_hidden": handle_hidden
            }

        }))

        dialog["$bvModal"].show("new_vision_camera_calibrate_extrinsic2")
        
    except:
        traceback.print_exc()
def geometry_pose_inv(pose):
    """
    Invert a pose

    Parameters:

    pose (Pose): The pose to invert

    Return (Pose): The inverted pose
    """
    geom_util = GeometryUtil(node = PyriSandboxContext.node)
    T_rox = geom_util.pose_to_rox_transform(_convert_to_pose(pose))
    T_res = T_rox.inv()
    return geom_util.rox_transform_to_pose(T_res)
def geometry_pose_multiply(pose_a, pose_b):
    """
    Multiply one pose with another

    Parameters:

    * pose_a (Pose): The first pose
    * pose_b (Pose): The second pose

    Return (Pose): The result of the multiplication
    """


    geom_util = GeometryUtil(node = PyriSandboxContext.node)
    T_a = geom_util.pose_to_rox_transform(_convert_to_pose(pose_a))
    T_b = geom_util.pose_to_rox_transform(_convert_to_pose(pose_b))
    T_res = T_a * T_b
    return geom_util.rox_transform_to_pose(T_res)
示例#13
0
    def __init__(self, device_manager, device_info = None, node : RR.RobotRaconteurNode = None):
        if node is None:
            self._node = RR.RobotRaconteurNode.s
        else:
            self._node = node
        self.device_info = device_info
                
        self.service_path = None
        self.ctx = None

        self.device_manager = device_manager
        self.device_manager.connect_device_type("tech.pyri.variable_storage.VariableStorage")
        self.device_manager.connect_device_type("com.robotraconteur.robotics.robot.Robot")
        self.device_manager.device_added += self._device_added
        self.device_manager.device_removed += self._device_removed
        self.device_manager.refresh_devices(5)

        self.robot_util = RobotUtil(self._node)
        self.geom_util = GeometryUtil(self._node)
        self.image_util = ImageUtil(self._node)
def test_geometry_util_array_types():
    node = RR.RobotRaconteurNode()
    node.SetLogLevelFromString("DEBUG")
    node.Init()

    try:
        RRC.RegisterStdRobDefServiceTypes(node)
        geom_util = GeometryUtil(node)
        _do_array_test(geom_util.xy_to_vector2, geom_util.vector2_to_xy, (2, ),
                       "Vector2", node)
        _do_array_test(geom_util.xyz_to_vector3, geom_util.vector3_to_xyz,
                       (3, ), "Vector3", node)
        _do_array_test(geom_util.abgxyz_to_vector6,
                       geom_util.vector6_to_abgxyz, (6, ), "Vector6", node)
        _do_array_test(geom_util.xy_to_point2d, geom_util.point2d_to_xy, (2, ),
                       "Point2D", node)
        _do_array_test(geom_util.xyz_to_point, geom_util.point_to_xyz, (3, ),
                       "Point", node)
        _do_array_test(geom_util.wh_to_size2d, geom_util.size2d_to_wh, (2, ),
                       "Size2D", node)
        _do_array_test(geom_util.whd_to_size, geom_util.size_to_whd, (3, ),
                       "Size", node)
        _do_array_test(geom_util.q_to_quaternion, geom_util.quaternion_to_q,
                       (3, ), "Quaternion", node,
                       lambda a: rox.R2q(rox.rpy2R(a)))
        _do_array_test(geom_util.R_to_quaternion, geom_util.quaternion_to_R,
                       (3, ), "Quaternion", node, lambda a: rox.rpy2R(a))
        _do_array_test(geom_util.rpy_to_quaternion,
                       geom_util.quaternion_to_rpy, (3, ), "Quaternion", node)
        _do_array_test(geom_util.array_to_spatial_velocity,
                       geom_util.spatial_velocity_to_array, (6, ),
                       "SpatialVelocity", node)
        _do_array_test(geom_util.array_to_spatial_acceleration,
                       geom_util.spatial_acceleration_to_array, (6, ),
                       "SpatialAcceleration", node)
        _do_array_test(geom_util.array_to_wrench, geom_util.wrench_to_array,
                       (6, ), "Wrench", node)

    finally:
        node.Shutdown()
示例#15
0
    def movel(self,
              robot_local_device_name,
              pose_final,
              frame,
              robot_origin_calib_global_name,
              speed_perc,
              final_seed=None):

        robot = self.device_manager.get_device_client(robot_local_device_name)
        geom_util = GeometryUtil(client_obj=robot)

        if frame.lower() == "world":
            var_storage = self.device_manager.get_device_client(
                "variable_storage")
            robot_origin_pose = var_storage.getf_variable_value(
                "globals", robot_origin_calib_global_name).data
            T_rob = geom_util.named_pose_to_rox_transform(
                robot_origin_pose.pose)
            T_des1 = geom_util.pose_to_rox_transform(pose_final)
            T_des = T_rob.inv() * T_des1
            pose_final = geom_util.rox_transform_to_pose(T_des)
        elif frame.lower() == "robot":
            T_des = geom_util.pose_to_rox_transform(pose_final)
        else:
            assert False, "Unknown parent frame for movel"

        robot_info = robot.robot_info
        rox_robot = self._robot_util.robot_info_to_rox_robot(robot_info, 0)

        robot_state = robot.robot_state.PeekInValue()[0]

        q_initial = robot_state.joint_position

        traj = self._generate_movel_trajectory(robot, rox_robot, q_initial,
                                               T_des, speed_perc, final_seed)

        if traj is None:
            return EmptyGenerator()

        return TrajectoryMoveGenerator(robot, rox_robot, traj, self._node)
示例#16
0
    def jog_joints_to_pose(self, pose, speed_perc):
        print("Jog Joints to Pose is called")
        # Similar to jog_joints_with_limits. But,
        # Moves the robot to the specified joint angles with max speed
        robot = self.robot
        if robot is not None:
            robot_state, _ = self.robot.robot_state.PeekInValue()
            q_current = robot_state.joint_position

            geom_util = GeometryUtil(client_obj=robot)
            T_des = geom_util.pose_to_rox_transform(pose)

            q_des, res = invkin.update_ik_info3(self.robot_rox, T_des,
                                                q_current)
            assert res, "Inverse kinematics failed"

            self.jog_joints_with_limits(
                q_des,
                float(speed_perc) * 0.01 * self.joint_vel_limits, True)

        else:
            # Give an error message to show that the robot is not connected
            print("Robot is not connected to RoboticsJog service yet!")
def geometry_pose_new(x,y,z,r_r,r_p,r_y):
    """
    Create a new pose using XYZ-RPY format. Units are meters and degrees

    Parameters:

    * x (float): X position in meters
    * y (float): Y position in meters
    * z (float): Z position in meters
    * r_r (float): Roll in degrees
    * r_p (float): Pitch in degrees
    * r_y (float): Yaw in degrees

    Return (Pose): Pose named array
    """


    xyz = np.array([x,y,z],dtype=np.float64)
    rpy = np.deg2rad(np.array([r_r,r_p,r_y],dtype=np.float64))

    geom_util = GeometryUtil(node = PyriSandboxContext.node)

    return geom_util.xyz_rpy_to_pose(xyz,rpy)    
def robot_get_end_pose(frame):
    """
    Returns the end pose of a robot. This end pose is reported by the
    robot driver. It is typically defined as the flange of the robot,
    but may be the tool if the driver is configured to report
    the tool pose.

    Parameters:

    * frame (str): The frame to return the pose in. May be `robot` or `world`.

    Return (Pose): The robot end pose in the requested frame
    """
    robot_name = _get_active_robot_name()

    device_manager = PyriSandboxContext.device_manager
    robot = device_manager.get_device_client(robot_name,1)
    robot_state, _ = robot.robot_state.PeekInValue()

    robot_util = RobotUtil(client_obj = robot)
    geom_util = GeometryUtil(client_obj = robot)

    # TODO: cache robot_info
    rox_robot = robot_util.robot_info_to_rox_robot(robot.robot_info,0)
    T1 = rox.fwdkin(rox_robot,robot_state.joint_position)

    if frame =="ROBOT":
        return geom_util.rox_transform_to_pose(T1)
    elif frame == "WORLD":
        var_storage = device_manager.get_device_client("variable_storage")
        # TODO: don't hard code robot origin
        robot_origin_pose = var_storage.getf_variable_value("globals",_get_robot_origin_calibration()).data
        T_rob = geom_util.named_pose_to_rox_transform(robot_origin_pose.pose)
        T2 = T_rob * T1
        return geom_util.rox_transform_to_pose(T2)
    else:
        assert False, "Invalid frame"
extrinsic_var = var_storage.getf_variable_value("globals", calib_name)
intrinsic_var = var_storage.getf_variable_value(
    "globals", "camera_calibration_extrinsic")
robot_var = var_storage.getf_variable_value("globals",
                                            "robot_origin_calibration")

calib = extrinsic_var.data

print("Camera Matrix")
print(calib.K)
d = calib.distortion_info.data
dist = np.array([d.k1, d.k2, d.p1, d.p2, d.k3])
print("Camera distortion")
print(dist)

geom_util = GeometryUtil(client_obj=var_storage)

T_cam = geom_util.named_pose_to_rox_transform(intrinsic_var.data.pose)
T_rob = geom_util.named_pose_to_rox_transform(robot_var.data.pose)

print("T_cam")
print(T_cam.R)
print(T_cam.p)

print("T_rob")
print(T_rob.R)
print(T_rob.p)

print("T")
T = (T_cam.inv() * T_rob).inv()
print(T.R)
示例#20
0
import time
from RobotRaconteurCompanion.Util.ImageUtil import ImageUtil
from RobotRaconteurCompanion.Util.GeometryUtil import GeometryUtil
import cv2
import numpy as np

d = DeviceManagerClient('rr+tcp://localhost:59902?service=device_manager',
                        autoconnect=False)

d.refresh_devices(1)

d.connect_device("robotics_motion")

c = d.get_device_client("robotics_motion", 1)

geom_util = GeometryUtil(client_obj=c)


def _run_grab(gen):
    while True:
        try:
            res = gen.Next()
            print(res)
        except RR.StopIterationException:
            break


for i in range(5):
    pose2d_dtype = RRN.GetNamedArrayDType("com.robotraconteur.geometry.Pose2D",
                                          c)
    obj_pose = np.zeros((1, ), dtype=pose2d_dtype)
import time
from RobotRaconteurCompanion.Util.ImageUtil import ImageUtil
from RobotRaconteurCompanion.Util.GeometryUtil import GeometryUtil
import numpy as np
import cv2

d = DeviceManagerClient('rr+tcp://localhost:59902?service=device_manager',
                        autoconnect=False)

d.refresh_devices(1)

d.connect_device("vision_robot_calibration")

calibration_service = d.get_device_client("vision_robot_calibration", 1)

geom_util = GeometryUtil(client_obj=calibration_service)
marker_pose = geom_util.xyz_rpy_to_pose(
    np.array([0.0393, -0.0091, 0.055]), np.array(np.deg2rad([90.0, 0.0,
                                                             180.0])))

ret = calibration_service.calibrate_robot_origin(
    "robot", "camera_intrinsic_calibration", "camera_extrinsic_calibration",
    "robot_calib0", "DICT_6X6_250", 150, 0.0316, marker_pose,
    "")  # "robot_origin_calibration0"

image_util = ImageUtil(client_obj=calibration_service)
geom_util = GeometryUtil(client_obj=calibration_service)

T = geom_util.named_pose_to_rox_transform(ret.robot_pose.pose)
print(T)
print(ret)
示例#22
0
print(jog_service.device_info.device.name)

jog = jog_service.get_jog("robot")

jog.setf_jog_mode()

#for x in range(100):
#jog.jog_joints3(1,1)
#jog.setf_halt_mode()

robot = d.get_device_client("robot", 1)

robot_state, _ = robot.robot_state.PeekInValue()
q_current = robot_state.joint_position
robot_util = RobotUtil(client_obj=robot)
rox_robot = robot_util.robot_info_to_rox_robot(robot.robot_info, 0)
geom_util = GeometryUtil(client_obj=jog_service)
T = rox.fwdkin(rox_robot, q_current)
print(f"Current xyz = {T.p}, rpy = {np.rad2deg(rox.R2rpy(T.R))}")
T2 = copy.deepcopy(T)
T2.p[1] += 0.1
T3 = copy.deepcopy(T)
T3.p[1] -= 0.1
pose_des = geom_util.rox_transform_to_pose(T2)
pose_des2 = geom_util.rox_transform_to_pose(T3)

for i in range(10):
    jog.jog_joints_to_pose(pose_des, 50)
    jog.jog_joints_to_pose(pose_des2, 50)
def calibrate(images, joint_poses, aruco_dict, aruco_id, aruco_markersize,
              flange_to_marker, mtx, dist, cam_pose, rox_robot,
              robot_local_device_name):
    """ Apply extrinsic camera calibration operation for images in the given directory path 
    using opencv aruco marker detection, the extrinsic marker poses given in a json file, 
    and the given intrinsic camera parameters."""

    assert aruco_dict.startswith("DICT_"), "Invalid aruco dictionary name"

    aruco_dict = getattr(aruco, aruco_dict)  # convert string to python
    aruco_dict = cv2.aruco.Dictionary_get(aruco_dict)
    aruco_params = cv2.aruco.DetectorParameters_create()

    i = 0

    imgs_out = []

    geom_util = GeometryUtil()
    image_util = ImageUtil()

    object_points = []
    image_points = []

    for img, joints in zip(images, joint_poses):

        # Find the aruco tag corners
        # corners, ids, rejected = cv2.aruco.detectMarkers(img, aruco_dict, parameters=aruco_params,cameraMatrix=mtx, distCoeff=dist)
        corners, ids, rejected = cv2.aruco.detectMarkers(
            img, aruco_dict, parameters=aruco_params)

        # #debug
        # print(str(type(corners))) # <class 'list'>
        # print(str(corners))  # list of numpy arrays of corners
        # print(str(type(ids))) # <class 'numpy.ndarray'>
        # print(str(ids))

        if len(corners) > 0:
            # Only find the id that we desire
            indexes = np.flatnonzero(ids.flatten() == aruco_id).tolist()
            corners = [corners[index] for index in indexes]
            ids = np.asarray([ids[index] for index in indexes])

            # #debug
            # print(str(type(corners))) # <class 'list'>
            # print(str(corners))  # list of numpy arrays of corners
            # print(str(type(ids))) # <class 'numpy.ndarray'>
            # print(str(ids))

            if len(ids) > 0:  # if there exist at least one id that we desire
                # Select the first detected one, discard the others
                corners = corners[0]  # now corners is 1 by 4

                # # extract the marker corners (which are always returned
                # # in top-left, top-right, bottom-right, and bottom-left
                # # order)
                # corners = corners.reshape((4, 2))
                # (topLeft, topRight, bottomRight, bottomLeft) = corners

                # Estimate the pose of the detected marker in camera frame
                rvec, tvec, markerPoints = cv2.aruco.estimatePoseSingleMarkers(
                    corners, aruco_markersize, mtx, dist)

                # # Debug: Show the detected tag and axis in the image
                # # # cv2.aruco.drawDetectedMarkers(img, corners)  # Draw A square around the markers (Does not work)
                img1 = img.copy()
                img_out = cv2.aruco.drawAxis(img1, mtx, dist, rvec, tvec,
                                             aruco_markersize *
                                             0.75)  # Draw Axis
                imgs_out.append(img_out)

                transform_base_2_flange = rox.fwdkin(rox_robot, joints)
                transform_flange_2_marker = geom_util.pose_to_rox_transform(
                    flange_to_marker)
                transform_base_2_marker = transform_base_2_flange * transform_flange_2_marker
                transform_base_2_marker_corners = _marker_corner_poses(
                    transform_base_2_marker, aruco_markersize)
                # Structure of this disctionary is "filename":[[R_base2marker],[T_base2marker],[R_cam2marker],[T_cam2marker]]
                for j in range(4):
                    object_points.append(transform_base_2_marker_corners[j].p)
                    image_points.append(corners[0, j])
                #pose_pairs_dict[i] = (transform_base_2_marker_corners, corners)
                i += 1

    object_points_np = np.array(object_points, dtype=np.float64)
    image_points_np = np.array(image_points, dtype=np.float32)

    # Finally execute the calibration
    retval, rvec, tvec = cv2.solvePnP(object_points_np, image_points_np, mtx,
                                      dist)
    R_cam2base = cv2.Rodrigues(rvec)[0]
    T_cam2base = tvec

    # Add another display image of marker at robot base
    img_out = cv2.aruco.drawAxis(img, mtx, dist,
                                 cv2.Rodrigues(R_cam2base)[0], T_cam2base,
                                 aruco_markersize * 0.75)  # Draw Axis
    imgs_out.append(img_out)

    rox_transform_cam2base = rox.Transform(R_cam2base, T_cam2base,
                                           cam_pose.parent_frame_id,
                                           robot_local_device_name)
    rox_transform_world2base = cam_pose * rox_transform_cam2base

    #R_base2cam = R_cam2base.T
    #T_base2cam = - R_base2cam @ T_cam2base

    R_base2cam = rox_transform_world2base.inv().R
    T_base2cam = rox_transform_world2base.inv().p

    #debug
    print("FINAL RESULTS: ")
    print("str(R_cam2base)")
    # print(str(type(R_cam2base)))
    print(str(R_cam2base))
    print("str(T_cam2base)")
    # print(str(type(T_cam2base.flatten())))
    print(str(T_cam2base))

    print("str(R_base2cam)")
    # print(str(type(R_base2cam)))
    print(str(R_base2cam))
    print("str(T_base2cam)")
    # print(str(type(T_base2cam.flatten())))
    print(str(T_base2cam))

    pose_res = geom_util.rox_transform_to_named_pose(rox_transform_world2base)
    cov = np.eye(6) * 1e-5

    imgs_out2 = [
        image_util.array_to_compressed_image_jpg(i, 70) for i in imgs_out
    ]

    return pose_res, cov, imgs_out2, 0.0
示例#24
0
def do_show_new_robot_origin_calibration_dialog2(new_name: str, robot_pose,
                                                 display_images,
                                                 core: "PyriWebUIBrowser"):
    try:
        dialog2_html = importlib_resources.read_text(
            __package__, "new_calibrate_robot_origin_dialog2.html")

        robot_calib = core.device_manager.get_device_subscription(
            "vision_robot_calibration").GetDefaultClient()
        geom_util = GeometryUtil(client_obj=robot_calib)
        marker_xyz, marker_rpy, _, _ = geom_util.named_pose_to_xyz_rpy(
            robot_pose.pose)

        el = js.document.createElement('div')
        el.id = "new_calibrate_robot_origin_dialog2_wrapper"
        js.document.getElementById("wrapper").appendChild(el)

        def handle_hidden(*args):
            try:
                el.parentElement.removeChild(el)
            except:
                traceback.print_exc()

        x = f"{marker_xyz[0]:4e}"
        y = f"{marker_xyz[1]:4e}"
        z = f"{marker_xyz[2]:4e}"
        r_r = f"{marker_rpy[0]:4e}"
        r_p = f"{marker_rpy[1]:4e}"
        r_y = f"{marker_rpy[2]:4e}"

        imgs = []
        i = 0
        for d in display_images:
            d_encoded = str(base64.b64encode(d.data))[2:-1]
            d2 = {
                "id": i,
                "caption": f"Calibration result {i+1}",
                "img": "data:image/jpeg;base64," + d_encoded
            }
            del d_encoded
            imgs.append(d2)
            i += 1
            #TODO: check for png?

        dialog = js.Vue.new(
            js.python_to_js({
                "el": "#new_calibrate_robot_origin_dialog2_wrapper",
                "template": dialog2_html,
                "data": {
                    "x": x,
                    "y": y,
                    "z": z,
                    "r_r": r_r,
                    "r_p": r_p,
                    "r_y": r_y,
                    "display_images": imgs
                },
                "methods": {
                    "handle_hidden": handle_hidden
                }
            }))

        dialog["$bvModal"].show("new_vision_camera_calibrate_robot_origin2")

    except:
        traceback.print_exc()