예제 #1
0
def geometry_pose_component_get(pose, component_name):
    """
    Get an XYZ-RPY component of a pose.

    Parameters:

    * pose (Pose): The pose
    * component_name (str): The component to get. May be `X`, `Y`, `Z`, `R_R`, `R_P`, or `R_Y`

    Return (float): The pose value
    """

    geom_util = GeometryUtil(node = PyriSandboxContext.node)
    xyz,rpy = geom_util.pose_to_xyz_rpy(_convert_to_pose(pose))
    rpy = np.rad2deg(rpy)

    if component_name == "X":
        return float(xyz[0])
    if component_name == "Y":
        return float(xyz[1])
    if component_name == "Z":
        return float(xyz[2])
    if component_name == "R_R":
        return float(rpy[0])
    if component_name == "R_P":
        return float(rpy[1])
    if component_name == "R_Y":
        return float(rpy[2])
    assert False, "Invalid pose component"
예제 #2
0
    def __init__(self,
                 device_manager,
                 device_info=None,
                 node: RR.RobotRaconteurNode = None):
        if node is None:
            self._node = RR.RobotRaconteurNode.s
        else:
            self._node = node
        self.device_info = device_info

        self.service_path = None
        self.ctx = None

        self._pose2d_dtype = self._node.GetNamedArrayDType(
            "com.robotraconteur.geometry.Pose2D")
        self._point2d_dtype = self._node.GetNamedArrayDType(
            "com.robotraconteur.geometry.Point2D")

        self._geom_util = GeometryUtil(node=self._node)
        self._robot_util = RobotUtil(node=self._node)

        self.device_manager = device_manager
        self.device_manager.connect_device_type(
            "com.robotraconteur.robotics.robot.Robot")
        self.device_manager.connect_device_type(
            "com.robotraconteur.robotics.tool.Tool")
        self.device_manager.connect_device_type(
            "tech.pyri.variable_storage.VariableStorage")
        self.device_manager.device_added += self._device_added
        self.device_manager.device_removed += self._device_removed
        self.device_manager.refresh_devices(5)
예제 #3
0
    def __init__(self,
                 device_manager,
                 device_info=None,
                 node: RR.RobotRaconteurNode = None):
        if node is None:
            self._node = RR.RobotRaconteurNode.s
        else:
            self._node = node
        self.device_info = device_info

        self.service_path = None
        self.ctx = None

        self._detected_marker = self._node.GetStructureType(
            "tech.pyri.vision.aruco_detection.DetectedMarker")
        self._aruco_detection_result = self._node.GetStructureType(
            "tech.pyri.vision.aruco_detection.ArucoDetectionResult")
        self._pose2d_dtype = self._node.GetNamedArrayDType(
            "com.robotraconteur.geometry.Pose2D")
        self._point2d_dtype = self._node.GetNamedArrayDType(
            "com.robotraconteur.geometry.Point2D")

        self._image_util = ImageUtil(node=self._node)
        self._geom_util = GeometryUtil(node=self._node)

        self.device_manager = device_manager
        self.device_manager.connect_device_type(
            "tech.pyri.variable_storage.VariableStorage")
        self.device_manager.connect_device_type(
            "com.robotraconteur.imaging.Camera")
        self.device_manager.device_added += self._device_added
        self.device_manager.device_removed += self._device_removed
        self.device_manager.refresh_devices(5)
예제 #4
0
def _calibrate_camera_intrinsic(images, calibration_target):
    ret, mtx, dist, rvecs, tvecs, mean_error, imgs = _calibrate_camera_intrinsic2(
        images, calibration_target)
    if not ret:
        raise RR.OperationFailedException(
            "Camera intrinsic calibration failed")

    geom_util = GeometryUtil()

    calib = RRN.NewStructure(
        "com.robotraconteur.imaging.camerainfo.CameraCalibration")
    calib.image_size = geom_util.wh_to_size2d(
        [images[0].image_info.width, images[0].image_info.height],
        dtype=np.int32)

    calib.K = mtx

    dist_rr = RRN.NewStructure(
        "com.robotraconteur.imaging.camerainfo.PlumbBobDistortionInfo")
    dist_rr.k1 = dist[0]
    dist_rr.k2 = dist[1]
    dist_rr.p1 = dist[2]
    dist_rr.p2 = dist[3]
    dist_rr.k3 = dist[4]

    calib.distortion_info = RR.VarValue(
        dist_rr,
        "com.robotraconteur.imaging.camerainfo.PlumbBobDistortionInfo")

    imgs2 = []
    for img in imgs:
        imgs2.append(_cv_img_to_rr_display_img(img))

    return calib, imgs2, mean_error
예제 #5
0
    def __init__(self, device_manager, device_info = None, node: RR.RobotRaconteurNode = None):
        if node is None:
            self._node = RR.RobotRaconteurNode.s
        else:
            self._node = node
        self.device_info = device_info
                
        self.service_path = None
        self.ctx = None

        self._matched_template_2d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.MatchedTemplate2D")
        self._template_matching_result_2d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.TemplateMatchingResult2D")
        self._template_matching_result_3d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.TemplateMatchingResult3D")
        self._matched_template_3d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.MatchedTemplate3D")
        self._named_pose_with_covariance_type = self._node.GetStructureType("com.robotraconteur.geometry.NamedPoseWithCovariance")
        self._pose2d_dtype = self._node.GetNamedArrayDType("com.robotraconteur.geometry.Pose2D")

        self._image_util = ImageUtil(node=self._node)
        self._geom_util = GeometryUtil(node=self._node)

        self.device_manager = device_manager
        self.device_manager.connect_device_type("tech.pyri.variable_storage.VariableStorage")
        self.device_manager.connect_device_type("com.robotraconteur.imaging.Camera")
        self.device_manager.device_added += self._device_added
        self.device_manager.device_removed += self._device_removed
        self.device_manager.refresh_devices(5)
예제 #6
0
    def handle_create(self, *args):
        try:
            robot_local_device_name = self.vue["$data"].robot_selected
            intrinsic_calib = self.vue["$data"].camera_intrinsic_selected
            extrinsic_calib = self.vue["$data"].camera_extrinsic_selected
            image_sequence_global_name = self.vue[
                "$data"].image_sequence_selected
            aruco_dict = self.vue["$data"].aruco_dict_selected
            aruco_tag_id = int(self.vue["$data"].aruco_tag_id)
            aruco_tag_size = float(self.vue["$data"].aruco_tag_size)
            xyz = np.zeros((3, ), dtype=np.float64)
            rpy = np.zeros((3, ), dtype=np.float64)
            xyz[0] = float(self.vue["$data"].marker_pose_x)
            xyz[1] = float(self.vue["$data"].marker_pose_y)
            xyz[2] = float(self.vue["$data"].marker_pose_z)
            rpy[0] = float(self.vue["$data"].marker_pose_r_r)
            rpy[1] = float(self.vue["$data"].marker_pose_r_p)
            rpy[2] = float(self.vue["$data"].marker_pose_r_y)

            rpy = np.deg2rad(rpy)

            robot_calib = self.core.device_manager.get_device_subscription(
                "vision_robot_calibration").GetDefaultClient()
            geom_util = GeometryUtil(client_obj=robot_calib)
            marker_pose = geom_util.xyz_rpy_to_pose(xyz, rpy)

            self.core.create_task(do_calibration(robot_local_device_name,intrinsic_calib,extrinsic_calib,\
                image_sequence_global_name,aruco_dict,aruco_tag_id, aruco_tag_size, marker_pose, self.new_name,self.core))
        except:
            traceback.print_exc()
예제 #7
0
def geometry_pose_component_set(pose, component_name, value):
    """
    Set an XYZ-RPY component of a pose. This function does not modify in place.
    It returns a new pose.

    Parameters:

    * pose (Pose): The pose
    * component_name (str): The component to get. May be `X`, `Y`, `Z`, `R_R`, `R_P`, or `R_Y`
    * value (float): The new pose component value in meters or degrees

    Return (Pose): The new pose with updated value

    """
    geom_util = GeometryUtil(node = PyriSandboxContext.node)
    xyz,rpy = geom_util.pose_to_xyz_rpy(_convert_to_pose(pose))
    rpy = np.rad2deg(rpy)

    if component_name == "X":
        xyz[0] = value
    elif component_name == "Y":
        xyz[1] = value
    elif component_name == "Z":
        xyz[2] = value
    elif component_name == "R_R":
        rpy[0] = value
    elif component_name == "R_P":
        rpy[1] = value
    elif component_name == "R_Y":
        rpy[2] = value
    else:
        assert False, "Invalid pose component"

    rpy = np.deg2rad(rpy)
    return geom_util.xyz_rpy_to_pose(xyz,rpy)
예제 #8
0
def _calibrate_camera_extrinsic(intrinsic_calib, image, board,
                                camera_local_device_name):

    # TODO: verify calibration data

    mtx = intrinsic_calib.K
    dist_rr = intrinsic_calib.distortion_info.data
    dist = np.array(
        [dist_rr.k1, dist_rr.k2, dist_rr.p1, dist_rr.p2, dist_rr.k3],
        dtype=np.float64)

    image_util = ImageUtil()
    frame = image_util.compressed_image_to_array(image)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    if board == "chessboard":
        width = 7
        height = 6
        square_size = 0.03
    else:
        raise RR.InvalidOperationException(
            f"Invalid calibration board {board}")

    ret, corners = cv2.findChessboardCorners(gray, (width, height), None)
    assert ret, "Could not find calibration target"

    objp = np.zeros((height * width, 3), np.float32)
    objp[:, :2] = np.mgrid[0:width, 0:height].T.reshape(-1, 2)

    objp = objp * square_size

    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)

    ret, rvecs, tvecs = cv2.solvePnP(objp, corners2, mtx, dist)

    R = cv2.Rodrigues(rvecs.flatten())[0]

    R_landmark = np.array([[0, 1, 0], [1, 0, 0], [0, 0, -1]], dtype=np.float64)

    R_cam1 = R.transpose()
    p_cam1 = -R.transpose() @ tvecs

    R_cam = R_landmark.transpose() @ R_cam1
    p_cam = R_landmark.transpose() @ p_cam1

    cv_image2 = cv2.aruco.drawAxis(frame, mtx, dist,
                                   cv2.Rodrigues(R_cam.transpose())[0],
                                   -R_cam.transpose() @ p_cam, 0.1)

    T = rox.Transform(R_cam, p_cam, "world", camera_local_device_name)

    geom_util = GeometryUtil()
    cov = np.eye(6) * 1e-5
    return geom_util.rox_transform_to_named_pose(
        T), cov, image_util.array_to_compressed_image_jpg(cv_image2), 0.0
def do_show_new_camera_calibration_extrinsic_dialog2(new_name: str, camera_pose, display_image, core: "PyriWebUIBrowser"):
    try:
        camera_calib = core.device_manager.get_device_subscription("vision_camera_calibration").GetDefaultClient()

        dialog2_html = importlib_resources.read_text(__package__,"new_calibrate_extrinsic_dialog2.html")

        el = js.document.createElement('div')
        el.id = "new_calibrate_extrinsic_dialog2_wrapper"
        js.document.getElementById("wrapper").appendChild(el)

        def handle_hidden(*args):
            try:
                el.parentElement.removeChild(el)
            except:
                traceback.print_exc()
        geom_util = GeometryUtil(client_obj=camera_calib)
        xyz, rpy1, _, _ = geom_util.named_pose_to_xyz_rpy(camera_pose.pose)
        rpy = np.rad2deg(rpy1)
        
        x = f"{xyz[0]:4e}"
        y = f"{xyz[1]:4e}"
        z = f"{xyz[2]:4e}"
        r_r = f"{rpy[0]:4e}"
        r_p = f"{rpy[1]:4e}"
        r_y = f"{rpy[2]:4e}"

        i=0
        
        d_encoded = str(base64.b64encode(display_image.data))[2:-1]
        disp_img_src = "data:image/jpeg;base64," + d_encoded
        # TODO: check for png?
        
        dialog = js.Vue.new(js.python_to_js({
            "el": "#new_calibrate_extrinsic_dialog2_wrapper",
            "template": dialog2_html,
            "data":
            {
                "x": x,
                "y": y,
                "z": z,
                "r_r": r_r,
                "r_p": r_p,
                "r_y": r_y,
                "disp_img": disp_img_src
            },
            "methods":
            {
                "handle_hidden": handle_hidden
            }

        }))

        dialog["$bvModal"].show("new_vision_camera_calibrate_extrinsic2")
        
    except:
        traceback.print_exc()
예제 #10
0
def geometry_pose_inv(pose):
    """
    Invert a pose

    Parameters:

    pose (Pose): The pose to invert

    Return (Pose): The inverted pose
    """
    geom_util = GeometryUtil(node = PyriSandboxContext.node)
    T_rox = geom_util.pose_to_rox_transform(_convert_to_pose(pose))
    T_res = T_rox.inv()
    return geom_util.rox_transform_to_pose(T_res)
def test_geometry_util_transform_types():
    node = RR.RobotRaconteurNode()
    node.SetLogLevelFromString("DEBUG")
    node.Init()

    try:
        RRC.RegisterStdRobDefServiceTypes(node)
        geom_util = GeometryUtil(node)

        _do_transform_test(geom_util.rox_transform_to_transform,
                           geom_util.transform_to_rox_transform, "Transform",
                           node)
        _do_transform_test(geom_util.rox_transform_to_pose,
                           geom_util.pose_to_rox_transform, "Pose", node)
        _do_named_transform_test(geom_util.rox_transform_to_named_transform,
                                 geom_util.named_transform_to_rox_transform,
                                 "NamedTransform", node)
        _do_named_transform_test(geom_util.rox_transform_to_named_pose,
                                 geom_util.named_pose_to_rox_transform,
                                 "NamedPose", node)

        _do_transform_test_xyz_rpy(geom_util.xyz_rpy_to_transform,
                                   geom_util.transform_to_xyz_rpy, "Transform",
                                   node)
        _do_transform_test_xyz_rpy(geom_util.xyz_rpy_to_pose,
                                   geom_util.pose_to_xyz_rpy, "Pose", node)
        _do_named_transform_test_xyz_rpy(geom_util.xyz_rpy_to_named_transform,
                                         geom_util.named_transform_to_xyz_rpy,
                                         "NamedTransform", node)
        _do_named_transform_test_xyz_rpy(geom_util.xyz_rpy_to_named_pose,
                                         geom_util.named_pose_to_xyz_rpy,
                                         "NamedPose", node)

    finally:
        node.Shutdown()
예제 #12
0
def geometry_pose_multiply(pose_a, pose_b):
    """
    Multiply one pose with another

    Parameters:

    * pose_a (Pose): The first pose
    * pose_b (Pose): The second pose

    Return (Pose): The result of the multiplication
    """


    geom_util = GeometryUtil(node = PyriSandboxContext.node)
    T_a = geom_util.pose_to_rox_transform(_convert_to_pose(pose_a))
    T_b = geom_util.pose_to_rox_transform(_convert_to_pose(pose_b))
    T_res = T_a * T_b
    return geom_util.rox_transform_to_pose(T_res)
예제 #13
0
    def __init__(self, device_manager, device_info = None, node : RR.RobotRaconteurNode = None):
        if node is None:
            self._node = RR.RobotRaconteurNode.s
        else:
            self._node = node
        self.device_info = device_info
                
        self.service_path = None
        self.ctx = None

        self.device_manager = device_manager
        self.device_manager.connect_device_type("tech.pyri.variable_storage.VariableStorage")
        self.device_manager.connect_device_type("com.robotraconteur.robotics.robot.Robot")
        self.device_manager.device_added += self._device_added
        self.device_manager.device_removed += self._device_removed
        self.device_manager.refresh_devices(5)

        self.robot_util = RobotUtil(self._node)
        self.geom_util = GeometryUtil(self._node)
        self.image_util = ImageUtil(self._node)
예제 #14
0
def geometry_pose_new(x,y,z,r_r,r_p,r_y):
    """
    Create a new pose using XYZ-RPY format. Units are meters and degrees

    Parameters:

    * x (float): X position in meters
    * y (float): Y position in meters
    * z (float): Z position in meters
    * r_r (float): Roll in degrees
    * r_p (float): Pitch in degrees
    * r_y (float): Yaw in degrees

    Return (Pose): Pose named array
    """


    xyz = np.array([x,y,z],dtype=np.float64)
    rpy = np.deg2rad(np.array([r_r,r_p,r_y],dtype=np.float64))

    geom_util = GeometryUtil(node = PyriSandboxContext.node)

    return geom_util.xyz_rpy_to_pose(xyz,rpy)    
예제 #15
0
    def jog_joints_to_pose(self, pose, speed_perc):
        print("Jog Joints to Pose is called")
        # Similar to jog_joints_with_limits. But,
        # Moves the robot to the specified joint angles with max speed
        robot = self.robot
        if robot is not None:
            robot_state, _ = self.robot.robot_state.PeekInValue()
            q_current = robot_state.joint_position

            geom_util = GeometryUtil(client_obj=robot)
            T_des = geom_util.pose_to_rox_transform(pose)

            q_des, res = invkin.update_ik_info3(self.robot_rox, T_des,
                                                q_current)
            assert res, "Inverse kinematics failed"

            self.jog_joints_with_limits(
                q_des,
                float(speed_perc) * 0.01 * self.joint_vel_limits, True)

        else:
            # Give an error message to show that the robot is not connected
            print("Robot is not connected to RoboticsJog service yet!")
def robot_get_end_pose(frame):
    """
    Returns the end pose of a robot. This end pose is reported by the
    robot driver. It is typically defined as the flange of the robot,
    but may be the tool if the driver is configured to report
    the tool pose.

    Parameters:

    * frame (str): The frame to return the pose in. May be `robot` or `world`.

    Return (Pose): The robot end pose in the requested frame
    """
    robot_name = _get_active_robot_name()

    device_manager = PyriSandboxContext.device_manager
    robot = device_manager.get_device_client(robot_name,1)
    robot_state, _ = robot.robot_state.PeekInValue()

    robot_util = RobotUtil(client_obj = robot)
    geom_util = GeometryUtil(client_obj = robot)

    # TODO: cache robot_info
    rox_robot = robot_util.robot_info_to_rox_robot(robot.robot_info,0)
    T1 = rox.fwdkin(rox_robot,robot_state.joint_position)

    if frame =="ROBOT":
        return geom_util.rox_transform_to_pose(T1)
    elif frame == "WORLD":
        var_storage = device_manager.get_device_client("variable_storage")
        # TODO: don't hard code robot origin
        robot_origin_pose = var_storage.getf_variable_value("globals",_get_robot_origin_calibration()).data
        T_rob = geom_util.named_pose_to_rox_transform(robot_origin_pose.pose)
        T2 = T_rob * T1
        return geom_util.rox_transform_to_pose(T2)
    else:
        assert False, "Invalid frame"
def test_geometry_util_array_types():
    node = RR.RobotRaconteurNode()
    node.SetLogLevelFromString("DEBUG")
    node.Init()

    try:
        RRC.RegisterStdRobDefServiceTypes(node)
        geom_util = GeometryUtil(node)
        _do_array_test(geom_util.xy_to_vector2, geom_util.vector2_to_xy, (2, ),
                       "Vector2", node)
        _do_array_test(geom_util.xyz_to_vector3, geom_util.vector3_to_xyz,
                       (3, ), "Vector3", node)
        _do_array_test(geom_util.abgxyz_to_vector6,
                       geom_util.vector6_to_abgxyz, (6, ), "Vector6", node)
        _do_array_test(geom_util.xy_to_point2d, geom_util.point2d_to_xy, (2, ),
                       "Point2D", node)
        _do_array_test(geom_util.xyz_to_point, geom_util.point_to_xyz, (3, ),
                       "Point", node)
        _do_array_test(geom_util.wh_to_size2d, geom_util.size2d_to_wh, (2, ),
                       "Size2D", node)
        _do_array_test(geom_util.whd_to_size, geom_util.size_to_whd, (3, ),
                       "Size", node)
        _do_array_test(geom_util.q_to_quaternion, geom_util.quaternion_to_q,
                       (3, ), "Quaternion", node,
                       lambda a: rox.R2q(rox.rpy2R(a)))
        _do_array_test(geom_util.R_to_quaternion, geom_util.quaternion_to_R,
                       (3, ), "Quaternion", node, lambda a: rox.rpy2R(a))
        _do_array_test(geom_util.rpy_to_quaternion,
                       geom_util.quaternion_to_rpy, (3, ), "Quaternion", node)
        _do_array_test(geom_util.array_to_spatial_velocity,
                       geom_util.spatial_velocity_to_array, (6, ),
                       "SpatialVelocity", node)
        _do_array_test(geom_util.array_to_spatial_acceleration,
                       geom_util.spatial_acceleration_to_array, (6, ),
                       "SpatialAcceleration", node)
        _do_array_test(geom_util.array_to_wrench, geom_util.wrench_to_array,
                       (6, ), "Wrench", node)

    finally:
        node.Shutdown()
예제 #18
0
    def movel(self,
              robot_local_device_name,
              pose_final,
              frame,
              robot_origin_calib_global_name,
              speed_perc,
              final_seed=None):

        robot = self.device_manager.get_device_client(robot_local_device_name)
        geom_util = GeometryUtil(client_obj=robot)

        if frame.lower() == "world":
            var_storage = self.device_manager.get_device_client(
                "variable_storage")
            robot_origin_pose = var_storage.getf_variable_value(
                "globals", robot_origin_calib_global_name).data
            T_rob = geom_util.named_pose_to_rox_transform(
                robot_origin_pose.pose)
            T_des1 = geom_util.pose_to_rox_transform(pose_final)
            T_des = T_rob.inv() * T_des1
            pose_final = geom_util.rox_transform_to_pose(T_des)
        elif frame.lower() == "robot":
            T_des = geom_util.pose_to_rox_transform(pose_final)
        else:
            assert False, "Unknown parent frame for movel"

        robot_info = robot.robot_info
        rox_robot = self._robot_util.robot_info_to_rox_robot(robot_info, 0)

        robot_state = robot.robot_state.PeekInValue()[0]

        q_initial = robot_state.joint_position

        traj = self._generate_movel_trajectory(robot, rox_robot, q_initial,
                                               T_des, speed_perc, final_seed)

        if traj is None:
            return EmptyGenerator()

        return TrajectoryMoveGenerator(robot, rox_robot, traj, self._node)
예제 #19
0
d.connect_device("robot")

c = d.get_device_client("robotics_motion", 1)

#p_target = np.array([-np.random.uniform(0.4,0.8),np.random.uniform(-0.1,0.1),np.random.uniform(0.0,0.4)])
p_target = np.array([
    -np.random.uniform(-0.1, 0.1),
    np.random.uniform(-0.1, 0.1),
    np.random.uniform(0.0, 0.4)
])
rpy_target = np.random.randn(3) * 0.5
rpy_target[0] += np.pi
R_target = rox.rpy2R(rpy_target)
# p_target = np.array([-0.6, 0.0, 0.1])
# R_target = np.array([[0,1,0],[1,0,0],[0,0,-1]])
T_target = rox.Transform(R_target, p_target)

r = d.get_device_client("robot", 1)

geom_util = GeometryUtil(client_obj=r)
p_target = geom_util.rox_transform_to_pose(T_target)

print("Begin movel")
gen = c.movel("robot", p_target, "world", "robot_origin_calibration", 50)

while True:
    try:
        gen.Next()
    except RR.StopIterationException:
        break
print("End movel")
예제 #20
0
class VisionTemplateMatching_impl(object):
    def __init__(self, device_manager, device_info = None, node: RR.RobotRaconteurNode = None):
        if node is None:
            self._node = RR.RobotRaconteurNode.s
        else:
            self._node = node
        self.device_info = device_info
                
        self.service_path = None
        self.ctx = None

        self._matched_template_2d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.MatchedTemplate2D")
        self._template_matching_result_2d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.TemplateMatchingResult2D")
        self._template_matching_result_3d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.TemplateMatchingResult3D")
        self._matched_template_3d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.MatchedTemplate3D")
        self._named_pose_with_covariance_type = self._node.GetStructureType("com.robotraconteur.geometry.NamedPoseWithCovariance")
        self._pose2d_dtype = self._node.GetNamedArrayDType("com.robotraconteur.geometry.Pose2D")

        self._image_util = ImageUtil(node=self._node)
        self._geom_util = GeometryUtil(node=self._node)

        self.device_manager = device_manager
        self.device_manager.connect_device_type("tech.pyri.variable_storage.VariableStorage")
        self.device_manager.connect_device_type("com.robotraconteur.imaging.Camera")
        self.device_manager.device_added += self._device_added
        self.device_manager.device_removed += self._device_removed
        self.device_manager.refresh_devices(5)
        
    def RRServiceObjectInit(self, ctx, service_path):
        self.service_path = service_path
        self.ctx = ctx
        
    def _device_added(self, local_device_name):
       pass 

    def _device_removed(self, local_device_name):
        pass

    def match_template_stored_image(self, image_global_name, template_global_name, roi):
       
        var_storage = self.device_manager.get_device_client("variable_storage",1)

        image_var = var_storage.getf_variable_value("globals", image_global_name)
        image = self._image_util.compressed_image_to_array(image_var.data)

        template_var = var_storage.getf_variable_value("globals", template_global_name)
        template = self._image_util.compressed_image_to_array(template_var.data)

        return self._do_template_match(template, image, roi)

    def match_template_camera_capture(self, camera_local_device_name, template_global_name, roi):
       
        var_storage = self.device_manager.get_device_client("variable_storage",1)
        
        template_var = var_storage.getf_variable_value("globals", template_global_name)
        template = self._image_util.compressed_image_to_array(template_var.data)

        camera_device = self.device_manager.get_device_client(camera_local_device_name)
        image_compressed = camera_device.capture_frame_compressed()
        image = self._image_util.compressed_image_to_array(image_compressed)

        return self._do_template_match(template, image, roi)

    def _do_template_match(self, template, image, roi):

        matcher_roi = None

        if roi is not None:
            roi_x = roi.center.pose[0]["position"]["x"]
            roi_y = roi.center.pose[0]["position"]["y"]
            roi_theta = roi.center.pose[0]["orientation"]
            roi_w = roi.size[0]["width"]
            roi_h = roi.size[0]["height"]
            matcher_roi = (roi_x, roi_y, roi_w, roi_h, -roi_theta)


        # execute the image detection using opencv
        matcher = TemplateMatchingMultiAngleWithROI(template,image,matcher_roi)
        # return_result_image = True
        
        match_center, template_wh, match_angle, detection_result_img = matcher.detect_object(True)
                
        # return the pose of the object
        print("the object is found..:")
        print("center coordinates in img frame(x,y): " + str(match_center))
        print("(w,h): " + str(template_wh))
        print("angle: " + str(match_angle))


        match_result = self._template_matching_result_2d_type()

        matched_template_result = self._matched_template_2d_type()
        centroid = np.zeros((1,),dtype=self._pose2d_dtype)
        centroid[0]["position"]["x"] = match_center[0]
        centroid[0]["position"]["y"] = match_center[1]
        centroid[0]["orientation"] = match_angle
        matched_template_result.match_centroid = centroid
        matched_template_result.template_size = self._geom_util.wh_to_size2d(template_wh,dtype=np.int32)
        matched_template_result.confidence = 0

        match_result.template_matches =[matched_template_result]

        match_result.display_image = self._image_util.array_to_compressed_image_jpg(detection_result_img, 70)

        return match_result

    def _do_match_with_pose(self,image, template, intrinsic_calib, extrinsic_calib, object_z, roi):
        
        matcher_roi = None

        if roi is not None:
            roi_x = roi.center.pose[0]["position"]["x"]
            roi_y = roi.center.pose[0]["position"]["y"]
            roi_theta = roi.center.pose[0]["orientation"]
            roi_w = roi.size[0]["width"]
            roi_h = roi.size[0]["height"]
            matcher_roi = (roi_x, roi_y, roi_w, roi_h, -roi_theta)


        # execute the image detection using opencv
        matcher = TemplateMatchingMultiAngleWithROI(template,image,matcher_roi)
        # return_result_image = True
        
        match_center, template_wh, match_angle, detection_result_img = matcher.detect_object(True)
           
        # detection_result.width
        # detection_result.height
        x = match_center[0]
        y = match_center[1]
        theta = match_angle
        src = np.asarray([x,y], dtype=np.float32)
        src = np.reshape(src,(-1,1,2)) # Rehsape as opencv requires (N,1,2)
        # print(src)
        # Now the detection results in image frame is found,
        # Hence, find the detected pose in camera frame with given z distance to camera
        # To do that first we need to get the camera parameters
        # Load the camera matrix and distortion coefficients from the calibration result.
        

        mtx = intrinsic_calib.K
        d = intrinsic_calib.distortion_info.data
        dist = np.array([d.k1,d.k2,d.p1,d.p2,d.k3])

        T_cam = self._geom_util.named_pose_to_rox_transform(extrinsic_calib.pose)

        #TODO: Figure out a better value for this
        object_z_cam_dist = abs(T_cam.p[2]) - object_z

        # Find the corresponding world pose of the detected pose in camera frame
        dst = cv2.undistortPoints(src,mtx,dist) # dst is Xc/Zc and Yc/Zc in the same shape of src
        dst = dst * float(object_z_cam_dist) * 1000.0 # Multiply by given Zc distance to find all cordinates, multiply by 1000 is because of Zc is given in meters but others are in millimeters
        dst = np.squeeze(dst) * 0.001 # Xc and Yc as vector

        # Finally the translation between the detected object center and the camera frame represented in camera frame is T = [Xc,Yc,Zc]
        Xc = dst[0]
        Yc = dst[1]
        Zc = float(object_z_cam_dist)
        T = np.asarray([Xc,Yc,Zc])

        # Now lets find the orientation of the detected object with respect to camera 
        # We are assuming +z axis is looking towards the camera and xy axes of the both object and camera are parallel planes
        # So the rotation matrix would be
        theta = np.deg2rad(theta) #convert theta from degrees to radian
        R_co = np.asarray([[math.cos(theta),-math.sin(theta),0],[-math.sin(theta),-math.cos(theta),0],[0,0,-1]])

        T_obj_cam_frame = rox.Transform(R_co, T, "camera", "object")

        T_obj = T_cam * T_obj_cam_frame
        
        # TODO: Better adjustment of Z height?
        T_obj.p[2] = object_z

        ret1 = self._matched_template_3d_type()
        ret1.pose = self._named_pose_with_covariance_type()
        ret1.pose.pose = self._geom_util.rox_transform_to_named_pose(T_obj)
        ret1.pose.covariance= np.zeros((6,6))
        ret1.confidence = 0

        ret = self._template_matching_result_3d_type()
        ret.template_matches = [ret1]
        ret.display_image = self._image_util.array_to_compressed_image_jpg(detection_result_img,70)

        return ret

    def _do_match_template_world_pose(self, image, template_global_name,
        camera_calibration_intrinsic, camera_calibration_extrinsic, object_z_height, roi, var_storage):

        template_var = var_storage.getf_variable_value("globals", template_global_name)
        template = self._image_util.compressed_image_to_array(template_var.data)

        intrinsic_calib = var_storage.getf_variable_value("globals", camera_calibration_intrinsic).data
        extrinsic_calib = var_storage.getf_variable_value("globals", camera_calibration_extrinsic).data

        return self._do_match_with_pose(image,template,intrinsic_calib,extrinsic_calib,object_z_height,roi)

    def match_template_world_pose_stored_image(self, image_global_name, template_global_name,
        camera_calibration_intrinsic, camera_calibration_extrinsic, object_z_height, roi):

        var_storage = self.device_manager.get_device_client("variable_storage",1)

        image_var = var_storage.getf_variable_value("globals", image_global_name)
        image = self._image_util.compressed_image_to_array(image_var.data)

        return self._do_match_template_world_pose(image, template_global_name, camera_calibration_intrinsic,
            camera_calibration_extrinsic, object_z_height, roi, var_storage)

    def match_template_world_pose_camera_capture(self, camera_local_device_name, template_global_name,
        camera_calibration_intrinsic, camera_calibration_extrinsic, object_z_height, roi):

        var_storage = self.device_manager.get_device_client("variable_storage",1)

        camera_device = self.device_manager.get_device_client(camera_local_device_name,1)
        img_rr = camera_device.capture_frame_compressed()
        image = self._image_util.compressed_image_to_array(img_rr)

        return self._do_match_template_world_pose(image, template_global_name, camera_calibration_intrinsic,
            camera_calibration_extrinsic, object_z_height, roi, var_storage)
예제 #21
0
class CameraRobotCalibrationService_impl(object):
    def __init__(self, device_manager, device_info = None, node : RR.RobotRaconteurNode = None):
        if node is None:
            self._node = RR.RobotRaconteurNode.s
        else:
            self._node = node
        self.device_info = device_info
                
        self.service_path = None
        self.ctx = None

        self.device_manager = device_manager
        self.device_manager.connect_device_type("tech.pyri.variable_storage.VariableStorage")
        self.device_manager.connect_device_type("com.robotraconteur.robotics.robot.Robot")
        self.device_manager.device_added += self._device_added
        self.device_manager.device_removed += self._device_removed
        self.device_manager.refresh_devices(5)

        self.robot_util = RobotUtil(self._node)
        self.geom_util = GeometryUtil(self._node)
        self.image_util = ImageUtil(self._node)
        
    def RRServiceObjectInit(self, ctx, service_path):
        self.service_path = service_path
        self.ctx = ctx
        
    def _device_added(self, local_device_name):
       pass 

    def _device_removed(self, local_device_name):
        pass

    def calibrate_robot_origin(self, robot_local_device_name, camera_intrinsic_calibration_global_name, camera_extrinsic_calibration_global_name, \
        image_sequence_global_name, aruco_dict, aruco_id, aruco_markersize, flange_to_marker, output_global_name):
        

        var_storage = self.device_manager.get_device_client("variable_storage",1)

        var_consts = var_storage.RRGetNode().GetConstants('tech.pyri.variable_storage', var_storage)
        variable_persistence = var_consts["VariablePersistence"]
        variable_protection_level = var_consts["VariableProtectionLevel"]

        if len(output_global_name) > 0:
            if len(var_storage.filter_variables("globals",output_global_name,[])) > 0:
                raise RR.InvalidOperationException(f"Global {output_global_name} already exists")

        image_sequence = []
        joint_pos_sequence = []

        image_sequence_vars = var_storage.getf_variable_value("globals",image_sequence_global_name)
        for image_var in image_sequence_vars.data.splitlines():
            var2 = var_storage.getf_variable_value("globals",image_var)
            image_sequence.append(self.image_util.compressed_image_to_array(var2.data))
            var2_tags = var_storage.getf_variable_attributes("globals", image_var)
            var2_state_var_name = var2_tags["system_state"]
            var2_state = var_storage.getf_variable_value("globals", var2_state_var_name)
            joint_pos = None 
            for s in var2_state.data.devices_states[robot_local_device_name].state:
                if s.type == "com.robotraconteur.robotics.robot.RobotState":
                    joint_pos = s.state_data.data.joint_position
            assert joint_pos is not None, "Could not find joint position in state sequence"
            joint_pos_sequence.append(joint_pos)

        cam_intrinsic_calib = var_storage.getf_variable_value("globals",camera_intrinsic_calibration_global_name).data
        cam_extrinsic_calib = var_storage.getf_variable_value("globals",camera_extrinsic_calibration_global_name).data

        mtx = cam_intrinsic_calib.K
        dist_rr = cam_intrinsic_calib.distortion_info.data
        dist = np.array([dist_rr.k1, dist_rr.k2, dist_rr.p1, dist_rr.p2, dist_rr.k3],dtype=np.float64)

        cam_pose = self.geom_util.named_pose_to_rox_transform(cam_extrinsic_calib.pose)

        robot = self.device_manager.get_device_client(robot_local_device_name,1)
        robot_info = robot.robot_info
        rox_robot = self.robot_util.robot_info_to_rox_robot(robot_info,0)
        

        # Calibrate
        robot_pose1, robot_pose_cov, img, calibration_error = calibrator.calibrate(image_sequence, joint_pos_sequence, aruco_dict, aruco_id, aruco_markersize, flange_to_marker, mtx, dist, cam_pose, rox_robot, robot_local_device_name)
        
        robot_pose = self._node.NewStructure("com.robotraconteur.geometry.NamedPoseWithCovariance")
        robot_pose.pose = robot_pose1
        robot_pose.covariance = robot_pose_cov

        if len(output_global_name) > 0:
            var_storage.add_variable2("globals",output_global_name,"com.robotraconteur.geometry.NamedPoseWithCovariance", \
                RR.VarValue(robot_pose,"com.robotraconteur.geometry.NamedPoseWithCovariance"), ["robot_origin_pose_calibration"], 
                {"device": robot_local_device_name}, variable_persistence["const"], None, variable_protection_level["read_write"], \
                [], f"Robot \"{robot_local_device_name}\" origin pose calibration", False)

        ret = RRN.NewStructure("tech.pyri.vision.robot_calibration.CameraRobotBaseCalibrateResult")
        ret.robot_pose = robot_pose
        ret.display_images = img
        ret.calibration_error = calibration_error

        return ret
import time
from RobotRaconteurCompanion.Util.ImageUtil import ImageUtil
from RobotRaconteurCompanion.Util.GeometryUtil import GeometryUtil
import numpy as np
import cv2

d = DeviceManagerClient('rr+tcp://localhost:59902?service=device_manager',
                        autoconnect=False)

d.refresh_devices(1)

d.connect_device("vision_robot_calibration")

calibration_service = d.get_device_client("vision_robot_calibration", 1)

geom_util = GeometryUtil(client_obj=calibration_service)
marker_pose = geom_util.xyz_rpy_to_pose(
    np.array([0.0393, -0.0091, 0.055]), np.array(np.deg2rad([90.0, 0.0,
                                                             180.0])))

ret = calibration_service.calibrate_robot_origin(
    "robot", "camera_intrinsic_calibration", "camera_extrinsic_calibration",
    "robot_calib0", "DICT_6X6_250", 150, 0.0316, marker_pose,
    "")  # "robot_origin_calibration0"

image_util = ImageUtil(client_obj=calibration_service)
geom_util = GeometryUtil(client_obj=calibration_service)

T = geom_util.named_pose_to_rox_transform(ret.robot_pose.pose)
print(T)
print(ret)
예제 #23
0
import time
from RobotRaconteurCompanion.Util.ImageUtil import ImageUtil
from RobotRaconteurCompanion.Util.GeometryUtil import GeometryUtil
import cv2
import numpy as np

d = DeviceManagerClient('rr+tcp://localhost:59902?service=device_manager',
                        autoconnect=False)

d.refresh_devices(1)

d.connect_device("robotics_motion")

c = d.get_device_client("robotics_motion", 1)

geom_util = GeometryUtil(client_obj=c)


def _run_grab(gen):
    while True:
        try:
            res = gen.Next()
            print(res)
        except RR.StopIterationException:
            break


for i in range(5):
    pose2d_dtype = RRN.GetNamedArrayDType("com.robotraconteur.geometry.Pose2D",
                                          c)
    obj_pose = np.zeros((1, ), dtype=pose2d_dtype)
예제 #24
0
print(jog_service.device_info.device.name)

jog = jog_service.get_jog("robot")

jog.setf_jog_mode()

#for x in range(100):
#jog.jog_joints3(1,1)
#jog.setf_halt_mode()

robot = d.get_device_client("robot", 1)

robot_state, _ = robot.robot_state.PeekInValue()
q_current = robot_state.joint_position
robot_util = RobotUtil(client_obj=robot)
rox_robot = robot_util.robot_info_to_rox_robot(robot.robot_info, 0)
geom_util = GeometryUtil(client_obj=jog_service)
T = rox.fwdkin(rox_robot, q_current)
print(f"Current xyz = {T.p}, rpy = {np.rad2deg(rox.R2rpy(T.R))}")
T2 = copy.deepcopy(T)
T2.p[1] += 0.1
T3 = copy.deepcopy(T)
T3.p[1] -= 0.1
pose_des = geom_util.rox_transform_to_pose(T2)
pose_des2 = geom_util.rox_transform_to_pose(T3)

for i in range(10):
    jog.jog_joints_to_pose(pose_des, 50)
    jog.jog_joints_to_pose(pose_des2, 50)
import time
from RobotRaconteurCompanion.Util.ImageUtil import ImageUtil
from RobotRaconteurCompanion.Util.GeometryUtil import GeometryUtil
import cv2

d = DeviceManagerClient('rr+tcp://localhost:59902?service=device_manager',
                        autoconnect=False)

d.refresh_devices(1)

d.connect_device("vision_camera_calibration")

calibration_service = d.get_device_client("vision_camera_calibration", 1)

ret = calibration_service.calibrate_camera_extrinsic(
    "camera", "camera_intrinsic_calibration", "extrinsic_image0", "chessboard",
    "camera_extrinsic_calibration0")  # "camera_extrinsic_calibration1")

image_util = ImageUtil(client_obj=calibration_service)
geom_util = GeometryUtil(client_obj=calibration_service)

T = geom_util.named_pose_to_rox_transform(ret.camera_pose.pose)
print(T)
print(ret)

img = image_util.compressed_image_to_array(ret.display_image)
cv2.imshow(f"img", img)
cv2.waitKey()

cv2.destroyAllWindows()
예제 #26
0
def do_show_new_robot_origin_calibration_dialog2(new_name: str, robot_pose,
                                                 display_images,
                                                 core: "PyriWebUIBrowser"):
    try:
        dialog2_html = importlib_resources.read_text(
            __package__, "new_calibrate_robot_origin_dialog2.html")

        robot_calib = core.device_manager.get_device_subscription(
            "vision_robot_calibration").GetDefaultClient()
        geom_util = GeometryUtil(client_obj=robot_calib)
        marker_xyz, marker_rpy, _, _ = geom_util.named_pose_to_xyz_rpy(
            robot_pose.pose)

        el = js.document.createElement('div')
        el.id = "new_calibrate_robot_origin_dialog2_wrapper"
        js.document.getElementById("wrapper").appendChild(el)

        def handle_hidden(*args):
            try:
                el.parentElement.removeChild(el)
            except:
                traceback.print_exc()

        x = f"{marker_xyz[0]:4e}"
        y = f"{marker_xyz[1]:4e}"
        z = f"{marker_xyz[2]:4e}"
        r_r = f"{marker_rpy[0]:4e}"
        r_p = f"{marker_rpy[1]:4e}"
        r_y = f"{marker_rpy[2]:4e}"

        imgs = []
        i = 0
        for d in display_images:
            d_encoded = str(base64.b64encode(d.data))[2:-1]
            d2 = {
                "id": i,
                "caption": f"Calibration result {i+1}",
                "img": "data:image/jpeg;base64," + d_encoded
            }
            del d_encoded
            imgs.append(d2)
            i += 1
            #TODO: check for png?

        dialog = js.Vue.new(
            js.python_to_js({
                "el": "#new_calibrate_robot_origin_dialog2_wrapper",
                "template": dialog2_html,
                "data": {
                    "x": x,
                    "y": y,
                    "z": z,
                    "r_r": r_r,
                    "r_p": r_p,
                    "r_y": r_y,
                    "display_images": imgs
                },
                "methods": {
                    "handle_hidden": handle_hidden
                }
            }))

        dialog["$bvModal"].show("new_vision_camera_calibrate_robot_origin2")

    except:
        traceback.print_exc()
예제 #27
0
c = d.get_device_client("vision_template_matching", 1)

bounding_box2d_type = RRN.GetStructureType(
    'com.robotraconteur.geometry.BoundingBox2D', c)
named_pose2d_type = RRN.GetStructureType(
    'com.robotraconteur.geometry.NamedPose2D', c)
pose2d_dtype = RRN.GetNamedArrayDType('com.robotraconteur.geometry.Pose2D', c)

var_storage = d.get_device_client("variable_storage", 1)

roi = var_storage.getf_variable_value("globals", roi_name).data

#res = c.match_template_stored_image("extrinsic_image0", "test10", None)
res = c.match_template_world_pose_camera_capture(
    "camera", template_name, "camera_calibration_intrinsic",
    "camera_calibration_extrinsic", 0, None)

img_util = ImageUtil(client_obj=c)
res_img = img_util.compressed_image_to_array(res.display_image)

geom_util = GeometryUtil(client_obj=c)

xyz, rpy, _, _ = geom_util.named_pose_to_xyz_rpy(
    res.template_matches[0].pose.pose)

cv2.imshow("", res_img)
cv2.waitKey()
cv2.destroyAllWindows()

print(res)
예제 #28
0
class VisionArucoDetection_impl(object):
    def __init__(self,
                 device_manager,
                 device_info=None,
                 node: RR.RobotRaconteurNode = None):
        if node is None:
            self._node = RR.RobotRaconteurNode.s
        else:
            self._node = node
        self.device_info = device_info

        self.service_path = None
        self.ctx = None

        self._detected_marker = self._node.GetStructureType(
            "tech.pyri.vision.aruco_detection.DetectedMarker")
        self._aruco_detection_result = self._node.GetStructureType(
            "tech.pyri.vision.aruco_detection.ArucoDetectionResult")
        self._pose2d_dtype = self._node.GetNamedArrayDType(
            "com.robotraconteur.geometry.Pose2D")
        self._point2d_dtype = self._node.GetNamedArrayDType(
            "com.robotraconteur.geometry.Point2D")

        self._image_util = ImageUtil(node=self._node)
        self._geom_util = GeometryUtil(node=self._node)

        self.device_manager = device_manager
        self.device_manager.connect_device_type(
            "tech.pyri.variable_storage.VariableStorage")
        self.device_manager.connect_device_type(
            "com.robotraconteur.imaging.Camera")
        self.device_manager.device_added += self._device_added
        self.device_manager.device_removed += self._device_removed
        self.device_manager.refresh_devices(5)

    def RRServiceObjectInit(self, ctx, service_path):
        self.service_path = service_path
        self.ctx = ctx

    def _device_added(self, local_device_name):
        pass

    def _device_removed(self, local_device_name):
        pass

    def _do_aruco_detection(self, img, intrinsic_global_name,
                            extrinsic_global_name, aruco_dict_str, aruco_id,
                            aruco_markersize, roi):

        intrinsic_calib = None
        extrinsic_calib = None

        if intrinsic_global_name is not None and extrinsic_global_name is not None:
            var_storage = self.device_manager.get_device_client(
                "variable_storage", 0.1)
            intrinsic_calib = var_storage.getf_variable_value(
                "globals", intrinsic_global_name).data
            extrinsic_calib = var_storage.getf_variable_value(
                "globals", extrinsic_global_name).data

        display_img = img.copy()

        assert aruco_dict_str.startswith(
            "DICT_"), "Invalid aruco dictionary name"

        aruco_dict_i = getattr(aruco,
                               aruco_dict_str)  # convert string to python
        aruco_dict = cv2.aruco.Dictionary_get(aruco_dict_i)
        aruco_params = cv2.aruco.DetectorParameters_create()
        aruco_params.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX

        corners1, ids1, rejected = cv2.aruco.detectMarkers(
            img, aruco_dict, parameters=aruco_params)

        if corners1 is None:
            corners1 = []
        if ids1 is None:
            ids1 = []

        if aruco_id < 0:
            corners2 = corners1
            ids2 = ids1
        else:
            corners2 = []
            ids2 = []
            for id2, corner2 in zip(ids1, corners1):
                if id2 == aruco_id:
                    corners2.append(corner2)
                    ids2.append(id2)
            ids2 = np.array(ids2)

        if roi is None:
            corners = corners2
            ids = ids2
        else:
            roi_x = roi.center.pose[0]["position"]["x"]
            roi_y = roi.center.pose[0]["position"]["y"]
            roi_theta = roi.center.pose[0]["orientation"]
            roi_w = roi.size[0]["width"]
            roi_h = roi.size[0]["height"]
            geom_roi1 = shapely.geometry.box(-roi_w / 2.,
                                             -roi_h / 2.,
                                             roi_w / 2.,
                                             roi_h / 2.,
                                             ccw=True)
            geom_roi2 = shapely.affinity.translate(geom_roi1,
                                                   xoff=roi_x,
                                                   yoff=roi_y)
            geom_roi = shapely.affinity.rotate(geom_roi2,
                                               roi_theta,
                                               origin='centroid',
                                               use_radians=True)

            corners = []
            ids = []

            for id3, corner3 in zip(ids2, corners2):
                centroid = shapely.geometry.Polygon([
                    shapely.geometry.Point(corner3[0, i, 0], corner3[0, i, 1])
                    for i in range(4)
                ])
                if geom_roi.contains(centroid):
                    corners.append(corner3)
                    ids.append(id3)
            ids = np.array(ids)

            roi_outline = np.array([geom_roi.exterior.coords], dtype=np.int32)
            display_img = cv2.polylines(display_img,
                                        roi_outline,
                                        True,
                                        color=(255, 255, 0))

        if len(ids) > 0:
            display_img = aruco.drawDetectedMarkers(display_img, corners, ids)

        poses = None
        if intrinsic_calib is not None and extrinsic_calib is not None:
            poses = []
            mtx = intrinsic_calib.K
            d = intrinsic_calib.distortion_info.data
            dist = np.array([d.k1, d.k2, d.p1, d.p2, d.k3])

            T_cam = self._geom_util.named_pose_to_rox_transform(
                extrinsic_calib.pose)

            for id4, corner4 in zip(ids, corners):
                rvec, tvec, markerPoints = cv2.aruco.estimatePoseSingleMarkers(
                    corner4, aruco_markersize, mtx, dist)

                display_img = cv2.aruco.drawAxis(display_img, mtx, dist, rvec,
                                                 tvec, 0.05)

                # R_marker1 = cv2.Rodrigues(rvec.flatten())[0]
                # TODO: 3D pose estimation from rvec is very innaccurate. Use a simple trigonometry
                # to estimate the Z rotation of the tag

                # compute vectors from opposite corners
                v1 = corner4[0, 2, :] - corner4[0, 0, :]
                v2 = corner4[0, 3, :] - corner4[0, 1, :]

                # Use atan2 on each vector and average
                theta1 = (np.arctan2(v1[1], v1[0]) - np.deg2rad(45)) % (2. *
                                                                        np.pi)
                theta2 = (np.arctan2(v2[1], v2[0]) - np.deg2rad(135)) % (2. *
                                                                         np.pi)

                theta = (theta1 + theta2) / 2.

                R_marker1 = rox.rot([1., 0., 0.], np.pi) @ rox.rot(
                    [0., 0., -1.], theta)

                p_marker1 = tvec.flatten()

                T_marker1 = rox.Transform(R_marker1, p_marker1, "camera",
                                          f"aruco{int(id4)}")
                T_marker = T_cam * T_marker1
                rr_marker_pose = self._geom_util.rox_transform_to_named_pose(
                    T_marker)
                poses.append(rr_marker_pose)

        ret_markers = []
        if poses is None:
            poses = [None] * len(ids)
        for id_, corner, pose in zip(ids, corners, poses):
            m = self._detected_marker()
            m.marker_id = int(id_)
            m.marker_corners = np.zeros((4, ), dtype=self._point2d_dtype)
            for i in range(4):
                m.marker_corners[i] = self._geom_util.xy_to_point2d(
                    corner[0, i, :])

            m.marker_pose = pose

            ret_markers.append(m)

        ret = self._aruco_detection_result()
        ret.detected_markers = ret_markers
        ret.display_image = self._image_util.array_to_compressed_image_jpg(
            display_img, 70)
        return ret

    def detect_aruco_stored_image(self, image_global_name,
                                  camera_calibration_intrinsic,
                                  camera_calibration_extrinsic, aruco_dict,
                                  aruco_id, aruco_markersize, roi):

        if len(camera_calibration_intrinsic) == 0:
            camera_calibration_intrinsic = None

        if len(camera_calibration_extrinsic) == 0:
            camera_calibration_extrinsic = None

        var_storage = self.device_manager.get_device_client(
            "variable_storage", 0.1)

        img_rr = var_storage.getf_variable_value("globals", image_global_name)
        img = self._image_util.compressed_image_to_array(img_rr.data)

        return self._do_aruco_detection(img, camera_calibration_intrinsic,
                                        camera_calibration_extrinsic,
                                        aruco_dict, aruco_id, aruco_markersize,
                                        roi)

    def detect_aruco_camera_capture(self, camera_local_device_name,
                                    camera_calibration_intrinsic,
                                    camera_calibration_extrinsic, aruco_dict,
                                    aruco_id, aruco_markersize, roi):

        if len(camera_calibration_intrinsic) == 0:
            camera_calibration_intrinsic = None

        if len(camera_calibration_extrinsic) == 0:
            camera_calibration_extrinsic = None

        camera_device = self.device_manager.get_device_client(
            camera_local_device_name, 1)
        img_rr = camera_device.capture_frame_compressed()
        img = self._image_util.compressed_image_to_array(img_rr)

        return self._do_aruco_detection(img, camera_calibration_intrinsic,
                                        camera_calibration_extrinsic,
                                        aruco_dict, aruco_id, aruco_markersize,
                                        roi)
def calibrate(images, joint_poses, aruco_dict, aruco_id, aruco_markersize,
              flange_to_marker, mtx, dist, cam_pose, rox_robot,
              robot_local_device_name):
    """ Apply extrinsic camera calibration operation for images in the given directory path 
    using opencv aruco marker detection, the extrinsic marker poses given in a json file, 
    and the given intrinsic camera parameters."""

    assert aruco_dict.startswith("DICT_"), "Invalid aruco dictionary name"

    aruco_dict = getattr(aruco, aruco_dict)  # convert string to python
    aruco_dict = cv2.aruco.Dictionary_get(aruco_dict)
    aruco_params = cv2.aruco.DetectorParameters_create()

    i = 0

    imgs_out = []

    geom_util = GeometryUtil()
    image_util = ImageUtil()

    object_points = []
    image_points = []

    for img, joints in zip(images, joint_poses):

        # Find the aruco tag corners
        # corners, ids, rejected = cv2.aruco.detectMarkers(img, aruco_dict, parameters=aruco_params,cameraMatrix=mtx, distCoeff=dist)
        corners, ids, rejected = cv2.aruco.detectMarkers(
            img, aruco_dict, parameters=aruco_params)

        # #debug
        # print(str(type(corners))) # <class 'list'>
        # print(str(corners))  # list of numpy arrays of corners
        # print(str(type(ids))) # <class 'numpy.ndarray'>
        # print(str(ids))

        if len(corners) > 0:
            # Only find the id that we desire
            indexes = np.flatnonzero(ids.flatten() == aruco_id).tolist()
            corners = [corners[index] for index in indexes]
            ids = np.asarray([ids[index] for index in indexes])

            # #debug
            # print(str(type(corners))) # <class 'list'>
            # print(str(corners))  # list of numpy arrays of corners
            # print(str(type(ids))) # <class 'numpy.ndarray'>
            # print(str(ids))

            if len(ids) > 0:  # if there exist at least one id that we desire
                # Select the first detected one, discard the others
                corners = corners[0]  # now corners is 1 by 4

                # # extract the marker corners (which are always returned
                # # in top-left, top-right, bottom-right, and bottom-left
                # # order)
                # corners = corners.reshape((4, 2))
                # (topLeft, topRight, bottomRight, bottomLeft) = corners

                # Estimate the pose of the detected marker in camera frame
                rvec, tvec, markerPoints = cv2.aruco.estimatePoseSingleMarkers(
                    corners, aruco_markersize, mtx, dist)

                # # Debug: Show the detected tag and axis in the image
                # # # cv2.aruco.drawDetectedMarkers(img, corners)  # Draw A square around the markers (Does not work)
                img1 = img.copy()
                img_out = cv2.aruco.drawAxis(img1, mtx, dist, rvec, tvec,
                                             aruco_markersize *
                                             0.75)  # Draw Axis
                imgs_out.append(img_out)

                transform_base_2_flange = rox.fwdkin(rox_robot, joints)
                transform_flange_2_marker = geom_util.pose_to_rox_transform(
                    flange_to_marker)
                transform_base_2_marker = transform_base_2_flange * transform_flange_2_marker
                transform_base_2_marker_corners = _marker_corner_poses(
                    transform_base_2_marker, aruco_markersize)
                # Structure of this disctionary is "filename":[[R_base2marker],[T_base2marker],[R_cam2marker],[T_cam2marker]]
                for j in range(4):
                    object_points.append(transform_base_2_marker_corners[j].p)
                    image_points.append(corners[0, j])
                #pose_pairs_dict[i] = (transform_base_2_marker_corners, corners)
                i += 1

    object_points_np = np.array(object_points, dtype=np.float64)
    image_points_np = np.array(image_points, dtype=np.float32)

    # Finally execute the calibration
    retval, rvec, tvec = cv2.solvePnP(object_points_np, image_points_np, mtx,
                                      dist)
    R_cam2base = cv2.Rodrigues(rvec)[0]
    T_cam2base = tvec

    # Add another display image of marker at robot base
    img_out = cv2.aruco.drawAxis(img, mtx, dist,
                                 cv2.Rodrigues(R_cam2base)[0], T_cam2base,
                                 aruco_markersize * 0.75)  # Draw Axis
    imgs_out.append(img_out)

    rox_transform_cam2base = rox.Transform(R_cam2base, T_cam2base,
                                           cam_pose.parent_frame_id,
                                           robot_local_device_name)
    rox_transform_world2base = cam_pose * rox_transform_cam2base

    #R_base2cam = R_cam2base.T
    #T_base2cam = - R_base2cam @ T_cam2base

    R_base2cam = rox_transform_world2base.inv().R
    T_base2cam = rox_transform_world2base.inv().p

    #debug
    print("FINAL RESULTS: ")
    print("str(R_cam2base)")
    # print(str(type(R_cam2base)))
    print(str(R_cam2base))
    print("str(T_cam2base)")
    # print(str(type(T_cam2base.flatten())))
    print(str(T_cam2base))

    print("str(R_base2cam)")
    # print(str(type(R_base2cam)))
    print(str(R_base2cam))
    print("str(T_base2cam)")
    # print(str(type(T_base2cam.flatten())))
    print(str(T_base2cam))

    pose_res = geom_util.rox_transform_to_named_pose(rox_transform_world2base)
    cov = np.eye(6) * 1e-5

    imgs_out2 = [
        image_util.array_to_compressed_image_jpg(i, 70) for i in imgs_out
    ]

    return pose_res, cov, imgs_out2, 0.0
예제 #30
0
from RobotRaconteur.Client import *
import time
from RobotRaconteurCompanion.Util.ImageUtil import ImageUtil
from RobotRaconteurCompanion.Util.GeometryUtil import GeometryUtil
import cv2
import numpy as np

d = DeviceManagerClient('rr+tcp://localhost:59902?service=device_manager', autoconnect=False)

d.refresh_devices(1)

d.connect_device("vision_aruco_detection")

c = d.get_device_client("vision_aruco_detection",1)

geom_util = GeometryUtil(client_obj = c)

b = RRN.NewStructure("com.robotraconteur.geometry.BoundingBox2D", c)
center = RRN.NewStructure("com.robotraconteur.geometry.NamedPose2D", c)
pose2d_dtype = RRN.GetNamedArrayDType("com.robotraconteur.geometry.Pose2D", c)
size2d_dtype = RRN.GetNamedArrayDType("com.robotraconteur.geometry.Size2D", c)
center.pose = np.zeros((1,),dtype=pose2d_dtype)
center.pose[0]["position"]["x"] = 990
center.pose[0]["position"]["y"] = 64
center.pose[0]["orientation"] = np.deg2rad(10)
b.center = center
size = np.zeros((1,),dtype=size2d_dtype)
size[0]["width"] = 100
size[0]["height"] = 100
b.size = size