コード例 #1
0
    def movel(self,
              robot_local_device_name,
              pose_final,
              frame,
              robot_origin_calib_global_name,
              speed_perc,
              final_seed=None):

        robot = self.device_manager.get_device_client(robot_local_device_name)
        geom_util = GeometryUtil(client_obj=robot)

        if frame.lower() == "world":
            var_storage = self.device_manager.get_device_client(
                "variable_storage")
            robot_origin_pose = var_storage.getf_variable_value(
                "globals", robot_origin_calib_global_name).data
            T_rob = geom_util.named_pose_to_rox_transform(
                robot_origin_pose.pose)
            T_des1 = geom_util.pose_to_rox_transform(pose_final)
            T_des = T_rob.inv() * T_des1
            pose_final = geom_util.rox_transform_to_pose(T_des)
        elif frame.lower() == "robot":
            T_des = geom_util.pose_to_rox_transform(pose_final)
        else:
            assert False, "Unknown parent frame for movel"

        robot_info = robot.robot_info
        rox_robot = self._robot_util.robot_info_to_rox_robot(robot_info, 0)

        robot_state = robot.robot_state.PeekInValue()[0]

        q_initial = robot_state.joint_position

        traj = self._generate_movel_trajectory(robot, rox_robot, q_initial,
                                               T_des, speed_perc, final_seed)

        if traj is None:
            return EmptyGenerator()

        return TrajectoryMoveGenerator(robot, rox_robot, traj, self._node)
コード例 #2
0
def robot_get_end_pose(frame):
    """
    Returns the end pose of a robot. This end pose is reported by the
    robot driver. It is typically defined as the flange of the robot,
    but may be the tool if the driver is configured to report
    the tool pose.

    Parameters:

    * frame (str): The frame to return the pose in. May be `robot` or `world`.

    Return (Pose): The robot end pose in the requested frame
    """
    robot_name = _get_active_robot_name()

    device_manager = PyriSandboxContext.device_manager
    robot = device_manager.get_device_client(robot_name,1)
    robot_state, _ = robot.robot_state.PeekInValue()

    robot_util = RobotUtil(client_obj = robot)
    geom_util = GeometryUtil(client_obj = robot)

    # TODO: cache robot_info
    rox_robot = robot_util.robot_info_to_rox_robot(robot.robot_info,0)
    T1 = rox.fwdkin(rox_robot,robot_state.joint_position)

    if frame =="ROBOT":
        return geom_util.rox_transform_to_pose(T1)
    elif frame == "WORLD":
        var_storage = device_manager.get_device_client("variable_storage")
        # TODO: don't hard code robot origin
        robot_origin_pose = var_storage.getf_variable_value("globals",_get_robot_origin_calibration()).data
        T_rob = geom_util.named_pose_to_rox_transform(robot_origin_pose.pose)
        T2 = T_rob * T1
        return geom_util.rox_transform_to_pose(T2)
    else:
        assert False, "Invalid frame"
コード例 #3
0
class VisionTemplateMatching_impl(object):
    def __init__(self, device_manager, device_info = None, node: RR.RobotRaconteurNode = None):
        if node is None:
            self._node = RR.RobotRaconteurNode.s
        else:
            self._node = node
        self.device_info = device_info
                
        self.service_path = None
        self.ctx = None

        self._matched_template_2d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.MatchedTemplate2D")
        self._template_matching_result_2d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.TemplateMatchingResult2D")
        self._template_matching_result_3d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.TemplateMatchingResult3D")
        self._matched_template_3d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.MatchedTemplate3D")
        self._named_pose_with_covariance_type = self._node.GetStructureType("com.robotraconteur.geometry.NamedPoseWithCovariance")
        self._pose2d_dtype = self._node.GetNamedArrayDType("com.robotraconteur.geometry.Pose2D")

        self._image_util = ImageUtil(node=self._node)
        self._geom_util = GeometryUtil(node=self._node)

        self.device_manager = device_manager
        self.device_manager.connect_device_type("tech.pyri.variable_storage.VariableStorage")
        self.device_manager.connect_device_type("com.robotraconteur.imaging.Camera")
        self.device_manager.device_added += self._device_added
        self.device_manager.device_removed += self._device_removed
        self.device_manager.refresh_devices(5)
        
    def RRServiceObjectInit(self, ctx, service_path):
        self.service_path = service_path
        self.ctx = ctx
        
    def _device_added(self, local_device_name):
       pass 

    def _device_removed(self, local_device_name):
        pass

    def match_template_stored_image(self, image_global_name, template_global_name, roi):
       
        var_storage = self.device_manager.get_device_client("variable_storage",1)

        image_var = var_storage.getf_variable_value("globals", image_global_name)
        image = self._image_util.compressed_image_to_array(image_var.data)

        template_var = var_storage.getf_variable_value("globals", template_global_name)
        template = self._image_util.compressed_image_to_array(template_var.data)

        return self._do_template_match(template, image, roi)

    def match_template_camera_capture(self, camera_local_device_name, template_global_name, roi):
       
        var_storage = self.device_manager.get_device_client("variable_storage",1)
        
        template_var = var_storage.getf_variable_value("globals", template_global_name)
        template = self._image_util.compressed_image_to_array(template_var.data)

        camera_device = self.device_manager.get_device_client(camera_local_device_name)
        image_compressed = camera_device.capture_frame_compressed()
        image = self._image_util.compressed_image_to_array(image_compressed)

        return self._do_template_match(template, image, roi)

    def _do_template_match(self, template, image, roi):

        matcher_roi = None

        if roi is not None:
            roi_x = roi.center.pose[0]["position"]["x"]
            roi_y = roi.center.pose[0]["position"]["y"]
            roi_theta = roi.center.pose[0]["orientation"]
            roi_w = roi.size[0]["width"]
            roi_h = roi.size[0]["height"]
            matcher_roi = (roi_x, roi_y, roi_w, roi_h, -roi_theta)


        # execute the image detection using opencv
        matcher = TemplateMatchingMultiAngleWithROI(template,image,matcher_roi)
        # return_result_image = True
        
        match_center, template_wh, match_angle, detection_result_img = matcher.detect_object(True)
                
        # return the pose of the object
        print("the object is found..:")
        print("center coordinates in img frame(x,y): " + str(match_center))
        print("(w,h): " + str(template_wh))
        print("angle: " + str(match_angle))


        match_result = self._template_matching_result_2d_type()

        matched_template_result = self._matched_template_2d_type()
        centroid = np.zeros((1,),dtype=self._pose2d_dtype)
        centroid[0]["position"]["x"] = match_center[0]
        centroid[0]["position"]["y"] = match_center[1]
        centroid[0]["orientation"] = match_angle
        matched_template_result.match_centroid = centroid
        matched_template_result.template_size = self._geom_util.wh_to_size2d(template_wh,dtype=np.int32)
        matched_template_result.confidence = 0

        match_result.template_matches =[matched_template_result]

        match_result.display_image = self._image_util.array_to_compressed_image_jpg(detection_result_img, 70)

        return match_result

    def _do_match_with_pose(self,image, template, intrinsic_calib, extrinsic_calib, object_z, roi):
        
        matcher_roi = None

        if roi is not None:
            roi_x = roi.center.pose[0]["position"]["x"]
            roi_y = roi.center.pose[0]["position"]["y"]
            roi_theta = roi.center.pose[0]["orientation"]
            roi_w = roi.size[0]["width"]
            roi_h = roi.size[0]["height"]
            matcher_roi = (roi_x, roi_y, roi_w, roi_h, -roi_theta)


        # execute the image detection using opencv
        matcher = TemplateMatchingMultiAngleWithROI(template,image,matcher_roi)
        # return_result_image = True
        
        match_center, template_wh, match_angle, detection_result_img = matcher.detect_object(True)
           
        # detection_result.width
        # detection_result.height
        x = match_center[0]
        y = match_center[1]
        theta = match_angle
        src = np.asarray([x,y], dtype=np.float32)
        src = np.reshape(src,(-1,1,2)) # Rehsape as opencv requires (N,1,2)
        # print(src)
        # Now the detection results in image frame is found,
        # Hence, find the detected pose in camera frame with given z distance to camera
        # To do that first we need to get the camera parameters
        # Load the camera matrix and distortion coefficients from the calibration result.
        

        mtx = intrinsic_calib.K
        d = intrinsic_calib.distortion_info.data
        dist = np.array([d.k1,d.k2,d.p1,d.p2,d.k3])

        T_cam = self._geom_util.named_pose_to_rox_transform(extrinsic_calib.pose)

        #TODO: Figure out a better value for this
        object_z_cam_dist = abs(T_cam.p[2]) - object_z

        # Find the corresponding world pose of the detected pose in camera frame
        dst = cv2.undistortPoints(src,mtx,dist) # dst is Xc/Zc and Yc/Zc in the same shape of src
        dst = dst * float(object_z_cam_dist) * 1000.0 # Multiply by given Zc distance to find all cordinates, multiply by 1000 is because of Zc is given in meters but others are in millimeters
        dst = np.squeeze(dst) * 0.001 # Xc and Yc as vector

        # Finally the translation between the detected object center and the camera frame represented in camera frame is T = [Xc,Yc,Zc]
        Xc = dst[0]
        Yc = dst[1]
        Zc = float(object_z_cam_dist)
        T = np.asarray([Xc,Yc,Zc])

        # Now lets find the orientation of the detected object with respect to camera 
        # We are assuming +z axis is looking towards the camera and xy axes of the both object and camera are parallel planes
        # So the rotation matrix would be
        theta = np.deg2rad(theta) #convert theta from degrees to radian
        R_co = np.asarray([[math.cos(theta),-math.sin(theta),0],[-math.sin(theta),-math.cos(theta),0],[0,0,-1]])

        T_obj_cam_frame = rox.Transform(R_co, T, "camera", "object")

        T_obj = T_cam * T_obj_cam_frame
        
        # TODO: Better adjustment of Z height?
        T_obj.p[2] = object_z

        ret1 = self._matched_template_3d_type()
        ret1.pose = self._named_pose_with_covariance_type()
        ret1.pose.pose = self._geom_util.rox_transform_to_named_pose(T_obj)
        ret1.pose.covariance= np.zeros((6,6))
        ret1.confidence = 0

        ret = self._template_matching_result_3d_type()
        ret.template_matches = [ret1]
        ret.display_image = self._image_util.array_to_compressed_image_jpg(detection_result_img,70)

        return ret

    def _do_match_template_world_pose(self, image, template_global_name,
        camera_calibration_intrinsic, camera_calibration_extrinsic, object_z_height, roi, var_storage):

        template_var = var_storage.getf_variable_value("globals", template_global_name)
        template = self._image_util.compressed_image_to_array(template_var.data)

        intrinsic_calib = var_storage.getf_variable_value("globals", camera_calibration_intrinsic).data
        extrinsic_calib = var_storage.getf_variable_value("globals", camera_calibration_extrinsic).data

        return self._do_match_with_pose(image,template,intrinsic_calib,extrinsic_calib,object_z_height,roi)

    def match_template_world_pose_stored_image(self, image_global_name, template_global_name,
        camera_calibration_intrinsic, camera_calibration_extrinsic, object_z_height, roi):

        var_storage = self.device_manager.get_device_client("variable_storage",1)

        image_var = var_storage.getf_variable_value("globals", image_global_name)
        image = self._image_util.compressed_image_to_array(image_var.data)

        return self._do_match_template_world_pose(image, template_global_name, camera_calibration_intrinsic,
            camera_calibration_extrinsic, object_z_height, roi, var_storage)

    def match_template_world_pose_camera_capture(self, camera_local_device_name, template_global_name,
        camera_calibration_intrinsic, camera_calibration_extrinsic, object_z_height, roi):

        var_storage = self.device_manager.get_device_client("variable_storage",1)

        camera_device = self.device_manager.get_device_client(camera_local_device_name,1)
        img_rr = camera_device.capture_frame_compressed()
        image = self._image_util.compressed_image_to_array(img_rr)

        return self._do_match_template_world_pose(image, template_global_name, camera_calibration_intrinsic,
            camera_calibration_extrinsic, object_z_height, roi, var_storage)
コード例 #4
0
d.refresh_devices(1)

d.connect_device("vision_robot_calibration")

calibration_service = d.get_device_client("vision_robot_calibration", 1)

geom_util = GeometryUtil(client_obj=calibration_service)
marker_pose = geom_util.xyz_rpy_to_pose(
    np.array([0.0393, -0.0091, 0.055]), np.array(np.deg2rad([90.0, 0.0,
                                                             180.0])))

ret = calibration_service.calibrate_robot_origin(
    "robot", "camera_intrinsic_calibration", "camera_extrinsic_calibration",
    "robot_calib0", "DICT_6X6_250", 150, 0.0316, marker_pose,
    "")  # "robot_origin_calibration0"

image_util = ImageUtil(client_obj=calibration_service)
geom_util = GeometryUtil(client_obj=calibration_service)

T = geom_util.named_pose_to_rox_transform(ret.robot_pose.pose)
print(T)
print(ret)

for img1 in ret.display_images:
    img = image_util.compressed_image_to_array(img1)
    cv2.imshow(f"img", img)
    cv2.waitKey()

cv2.destroyAllWindows()
コード例 #5
0
import time
from RobotRaconteurCompanion.Util.ImageUtil import ImageUtil
from RobotRaconteurCompanion.Util.GeometryUtil import GeometryUtil
import cv2

d = DeviceManagerClient('rr+tcp://localhost:59902?service=device_manager',
                        autoconnect=False)

d.refresh_devices(1)

d.connect_device("vision_camera_calibration")

calibration_service = d.get_device_client("vision_camera_calibration", 1)

ret = calibration_service.calibrate_camera_extrinsic(
    "camera", "camera_intrinsic_calibration", "extrinsic_image0", "chessboard",
    "camera_extrinsic_calibration0")  # "camera_extrinsic_calibration1")

image_util = ImageUtil(client_obj=calibration_service)
geom_util = GeometryUtil(client_obj=calibration_service)

T = geom_util.named_pose_to_rox_transform(ret.camera_pose.pose)
print(T)
print(ret)

img = image_util.compressed_image_to_array(ret.display_image)
cv2.imshow(f"img", img)
cv2.waitKey()

cv2.destroyAllWindows()
コード例 #6
0
class VisionArucoDetection_impl(object):
    def __init__(self,
                 device_manager,
                 device_info=None,
                 node: RR.RobotRaconteurNode = None):
        if node is None:
            self._node = RR.RobotRaconteurNode.s
        else:
            self._node = node
        self.device_info = device_info

        self.service_path = None
        self.ctx = None

        self._detected_marker = self._node.GetStructureType(
            "tech.pyri.vision.aruco_detection.DetectedMarker")
        self._aruco_detection_result = self._node.GetStructureType(
            "tech.pyri.vision.aruco_detection.ArucoDetectionResult")
        self._pose2d_dtype = self._node.GetNamedArrayDType(
            "com.robotraconteur.geometry.Pose2D")
        self._point2d_dtype = self._node.GetNamedArrayDType(
            "com.robotraconteur.geometry.Point2D")

        self._image_util = ImageUtil(node=self._node)
        self._geom_util = GeometryUtil(node=self._node)

        self.device_manager = device_manager
        self.device_manager.connect_device_type(
            "tech.pyri.variable_storage.VariableStorage")
        self.device_manager.connect_device_type(
            "com.robotraconteur.imaging.Camera")
        self.device_manager.device_added += self._device_added
        self.device_manager.device_removed += self._device_removed
        self.device_manager.refresh_devices(5)

    def RRServiceObjectInit(self, ctx, service_path):
        self.service_path = service_path
        self.ctx = ctx

    def _device_added(self, local_device_name):
        pass

    def _device_removed(self, local_device_name):
        pass

    def _do_aruco_detection(self, img, intrinsic_global_name,
                            extrinsic_global_name, aruco_dict_str, aruco_id,
                            aruco_markersize, roi):

        intrinsic_calib = None
        extrinsic_calib = None

        if intrinsic_global_name is not None and extrinsic_global_name is not None:
            var_storage = self.device_manager.get_device_client(
                "variable_storage", 0.1)
            intrinsic_calib = var_storage.getf_variable_value(
                "globals", intrinsic_global_name).data
            extrinsic_calib = var_storage.getf_variable_value(
                "globals", extrinsic_global_name).data

        display_img = img.copy()

        assert aruco_dict_str.startswith(
            "DICT_"), "Invalid aruco dictionary name"

        aruco_dict_i = getattr(aruco,
                               aruco_dict_str)  # convert string to python
        aruco_dict = cv2.aruco.Dictionary_get(aruco_dict_i)
        aruco_params = cv2.aruco.DetectorParameters_create()
        aruco_params.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX

        corners1, ids1, rejected = cv2.aruco.detectMarkers(
            img, aruco_dict, parameters=aruco_params)

        if corners1 is None:
            corners1 = []
        if ids1 is None:
            ids1 = []

        if aruco_id < 0:
            corners2 = corners1
            ids2 = ids1
        else:
            corners2 = []
            ids2 = []
            for id2, corner2 in zip(ids1, corners1):
                if id2 == aruco_id:
                    corners2.append(corner2)
                    ids2.append(id2)
            ids2 = np.array(ids2)

        if roi is None:
            corners = corners2
            ids = ids2
        else:
            roi_x = roi.center.pose[0]["position"]["x"]
            roi_y = roi.center.pose[0]["position"]["y"]
            roi_theta = roi.center.pose[0]["orientation"]
            roi_w = roi.size[0]["width"]
            roi_h = roi.size[0]["height"]
            geom_roi1 = shapely.geometry.box(-roi_w / 2.,
                                             -roi_h / 2.,
                                             roi_w / 2.,
                                             roi_h / 2.,
                                             ccw=True)
            geom_roi2 = shapely.affinity.translate(geom_roi1,
                                                   xoff=roi_x,
                                                   yoff=roi_y)
            geom_roi = shapely.affinity.rotate(geom_roi2,
                                               roi_theta,
                                               origin='centroid',
                                               use_radians=True)

            corners = []
            ids = []

            for id3, corner3 in zip(ids2, corners2):
                centroid = shapely.geometry.Polygon([
                    shapely.geometry.Point(corner3[0, i, 0], corner3[0, i, 1])
                    for i in range(4)
                ])
                if geom_roi.contains(centroid):
                    corners.append(corner3)
                    ids.append(id3)
            ids = np.array(ids)

            roi_outline = np.array([geom_roi.exterior.coords], dtype=np.int32)
            display_img = cv2.polylines(display_img,
                                        roi_outline,
                                        True,
                                        color=(255, 255, 0))

        if len(ids) > 0:
            display_img = aruco.drawDetectedMarkers(display_img, corners, ids)

        poses = None
        if intrinsic_calib is not None and extrinsic_calib is not None:
            poses = []
            mtx = intrinsic_calib.K
            d = intrinsic_calib.distortion_info.data
            dist = np.array([d.k1, d.k2, d.p1, d.p2, d.k3])

            T_cam = self._geom_util.named_pose_to_rox_transform(
                extrinsic_calib.pose)

            for id4, corner4 in zip(ids, corners):
                rvec, tvec, markerPoints = cv2.aruco.estimatePoseSingleMarkers(
                    corner4, aruco_markersize, mtx, dist)

                display_img = cv2.aruco.drawAxis(display_img, mtx, dist, rvec,
                                                 tvec, 0.05)

                # R_marker1 = cv2.Rodrigues(rvec.flatten())[0]
                # TODO: 3D pose estimation from rvec is very innaccurate. Use a simple trigonometry
                # to estimate the Z rotation of the tag

                # compute vectors from opposite corners
                v1 = corner4[0, 2, :] - corner4[0, 0, :]
                v2 = corner4[0, 3, :] - corner4[0, 1, :]

                # Use atan2 on each vector and average
                theta1 = (np.arctan2(v1[1], v1[0]) - np.deg2rad(45)) % (2. *
                                                                        np.pi)
                theta2 = (np.arctan2(v2[1], v2[0]) - np.deg2rad(135)) % (2. *
                                                                         np.pi)

                theta = (theta1 + theta2) / 2.

                R_marker1 = rox.rot([1., 0., 0.], np.pi) @ rox.rot(
                    [0., 0., -1.], theta)

                p_marker1 = tvec.flatten()

                T_marker1 = rox.Transform(R_marker1, p_marker1, "camera",
                                          f"aruco{int(id4)}")
                T_marker = T_cam * T_marker1
                rr_marker_pose = self._geom_util.rox_transform_to_named_pose(
                    T_marker)
                poses.append(rr_marker_pose)

        ret_markers = []
        if poses is None:
            poses = [None] * len(ids)
        for id_, corner, pose in zip(ids, corners, poses):
            m = self._detected_marker()
            m.marker_id = int(id_)
            m.marker_corners = np.zeros((4, ), dtype=self._point2d_dtype)
            for i in range(4):
                m.marker_corners[i] = self._geom_util.xy_to_point2d(
                    corner[0, i, :])

            m.marker_pose = pose

            ret_markers.append(m)

        ret = self._aruco_detection_result()
        ret.detected_markers = ret_markers
        ret.display_image = self._image_util.array_to_compressed_image_jpg(
            display_img, 70)
        return ret

    def detect_aruco_stored_image(self, image_global_name,
                                  camera_calibration_intrinsic,
                                  camera_calibration_extrinsic, aruco_dict,
                                  aruco_id, aruco_markersize, roi):

        if len(camera_calibration_intrinsic) == 0:
            camera_calibration_intrinsic = None

        if len(camera_calibration_extrinsic) == 0:
            camera_calibration_extrinsic = None

        var_storage = self.device_manager.get_device_client(
            "variable_storage", 0.1)

        img_rr = var_storage.getf_variable_value("globals", image_global_name)
        img = self._image_util.compressed_image_to_array(img_rr.data)

        return self._do_aruco_detection(img, camera_calibration_intrinsic,
                                        camera_calibration_extrinsic,
                                        aruco_dict, aruco_id, aruco_markersize,
                                        roi)

    def detect_aruco_camera_capture(self, camera_local_device_name,
                                    camera_calibration_intrinsic,
                                    camera_calibration_extrinsic, aruco_dict,
                                    aruco_id, aruco_markersize, roi):

        if len(camera_calibration_intrinsic) == 0:
            camera_calibration_intrinsic = None

        if len(camera_calibration_extrinsic) == 0:
            camera_calibration_extrinsic = None

        camera_device = self.device_manager.get_device_client(
            camera_local_device_name, 1)
        img_rr = camera_device.capture_frame_compressed()
        img = self._image_util.compressed_image_to_array(img_rr)

        return self._do_aruco_detection(img, camera_calibration_intrinsic,
                                        camera_calibration_extrinsic,
                                        aruco_dict, aruco_id, aruco_markersize,
                                        roi)
コード例 #7
0
ファイル: __main__.py プロジェクト: pyri-project/pyri-vision
class CameraRobotCalibrationService_impl(object):
    def __init__(self, device_manager, device_info = None, node : RR.RobotRaconteurNode = None):
        if node is None:
            self._node = RR.RobotRaconteurNode.s
        else:
            self._node = node
        self.device_info = device_info
                
        self.service_path = None
        self.ctx = None

        self.device_manager = device_manager
        self.device_manager.connect_device_type("tech.pyri.variable_storage.VariableStorage")
        self.device_manager.connect_device_type("com.robotraconteur.robotics.robot.Robot")
        self.device_manager.device_added += self._device_added
        self.device_manager.device_removed += self._device_removed
        self.device_manager.refresh_devices(5)

        self.robot_util = RobotUtil(self._node)
        self.geom_util = GeometryUtil(self._node)
        self.image_util = ImageUtil(self._node)
        
    def RRServiceObjectInit(self, ctx, service_path):
        self.service_path = service_path
        self.ctx = ctx
        
    def _device_added(self, local_device_name):
       pass 

    def _device_removed(self, local_device_name):
        pass

    def calibrate_robot_origin(self, robot_local_device_name, camera_intrinsic_calibration_global_name, camera_extrinsic_calibration_global_name, \
        image_sequence_global_name, aruco_dict, aruco_id, aruco_markersize, flange_to_marker, output_global_name):
        

        var_storage = self.device_manager.get_device_client("variable_storage",1)

        var_consts = var_storage.RRGetNode().GetConstants('tech.pyri.variable_storage', var_storage)
        variable_persistence = var_consts["VariablePersistence"]
        variable_protection_level = var_consts["VariableProtectionLevel"]

        if len(output_global_name) > 0:
            if len(var_storage.filter_variables("globals",output_global_name,[])) > 0:
                raise RR.InvalidOperationException(f"Global {output_global_name} already exists")

        image_sequence = []
        joint_pos_sequence = []

        image_sequence_vars = var_storage.getf_variable_value("globals",image_sequence_global_name)
        for image_var in image_sequence_vars.data.splitlines():
            var2 = var_storage.getf_variable_value("globals",image_var)
            image_sequence.append(self.image_util.compressed_image_to_array(var2.data))
            var2_tags = var_storage.getf_variable_attributes("globals", image_var)
            var2_state_var_name = var2_tags["system_state"]
            var2_state = var_storage.getf_variable_value("globals", var2_state_var_name)
            joint_pos = None 
            for s in var2_state.data.devices_states[robot_local_device_name].state:
                if s.type == "com.robotraconteur.robotics.robot.RobotState":
                    joint_pos = s.state_data.data.joint_position
            assert joint_pos is not None, "Could not find joint position in state sequence"
            joint_pos_sequence.append(joint_pos)

        cam_intrinsic_calib = var_storage.getf_variable_value("globals",camera_intrinsic_calibration_global_name).data
        cam_extrinsic_calib = var_storage.getf_variable_value("globals",camera_extrinsic_calibration_global_name).data

        mtx = cam_intrinsic_calib.K
        dist_rr = cam_intrinsic_calib.distortion_info.data
        dist = np.array([dist_rr.k1, dist_rr.k2, dist_rr.p1, dist_rr.p2, dist_rr.k3],dtype=np.float64)

        cam_pose = self.geom_util.named_pose_to_rox_transform(cam_extrinsic_calib.pose)

        robot = self.device_manager.get_device_client(robot_local_device_name,1)
        robot_info = robot.robot_info
        rox_robot = self.robot_util.robot_info_to_rox_robot(robot_info,0)
        

        # Calibrate
        robot_pose1, robot_pose_cov, img, calibration_error = calibrator.calibrate(image_sequence, joint_pos_sequence, aruco_dict, aruco_id, aruco_markersize, flange_to_marker, mtx, dist, cam_pose, rox_robot, robot_local_device_name)
        
        robot_pose = self._node.NewStructure("com.robotraconteur.geometry.NamedPoseWithCovariance")
        robot_pose.pose = robot_pose1
        robot_pose.covariance = robot_pose_cov

        if len(output_global_name) > 0:
            var_storage.add_variable2("globals",output_global_name,"com.robotraconteur.geometry.NamedPoseWithCovariance", \
                RR.VarValue(robot_pose,"com.robotraconteur.geometry.NamedPoseWithCovariance"), ["robot_origin_pose_calibration"], 
                {"device": robot_local_device_name}, variable_persistence["const"], None, variable_protection_level["read_write"], \
                [], f"Robot \"{robot_local_device_name}\" origin pose calibration", False)

        ret = RRN.NewStructure("tech.pyri.vision.robot_calibration.CameraRobotBaseCalibrateResult")
        ret.robot_pose = robot_pose
        ret.display_images = img
        ret.calibration_error = calibration_error

        return ret
コード例 #8
0
class RoboticsMotion_impl(object):
    def __init__(self,
                 device_manager,
                 device_info=None,
                 node: RR.RobotRaconteurNode = None):
        if node is None:
            self._node = RR.RobotRaconteurNode.s
        else:
            self._node = node
        self.device_info = device_info

        self.service_path = None
        self.ctx = None

        self._pose2d_dtype = self._node.GetNamedArrayDType(
            "com.robotraconteur.geometry.Pose2D")
        self._point2d_dtype = self._node.GetNamedArrayDType(
            "com.robotraconteur.geometry.Point2D")

        self._geom_util = GeometryUtil(node=self._node)
        self._robot_util = RobotUtil(node=self._node)

        self.device_manager = device_manager
        self.device_manager.connect_device_type(
            "com.robotraconteur.robotics.robot.Robot")
        self.device_manager.connect_device_type(
            "com.robotraconteur.robotics.tool.Tool")
        self.device_manager.connect_device_type(
            "tech.pyri.variable_storage.VariableStorage")
        self.device_manager.device_added += self._device_added
        self.device_manager.device_removed += self._device_removed
        self.device_manager.refresh_devices(5)

    def RRServiceObjectInit(self, ctx, service_path):
        self.service_path = service_path
        self.ctx = ctx

    def _device_added(self, local_device_name):
        pass

    def _device_removed(self, local_device_name):
        pass

    def _generate_movej_trajectory(self, robot_client, rox_robot, q_initial,
                                   q_final, speed_perc):

        JointTrajectoryWaypoint = RRN.GetStructureType(
            "com.robotraconteur.robotics.trajectory.JointTrajectoryWaypoint",
            robot_client)
        JointTrajectory = RRN.GetStructureType(
            "com.robotraconteur.robotics.trajectory.JointTrajectory",
            robot_client)

        dof = len(rox_robot.joint_type)

        N_samples = math.ceil(
            np.max(np.abs(q_final - q_initial)) * 0.2 * 180 / np.pi)
        N_samples = np.max((N_samples, 20))

        ss = np.linspace(0, 1, N_samples)

        ss = np.linspace(0, 1, N_samples)

        way_pts = np.zeros((N_samples, dof), dtype=np.float64)
        for i in range(dof):
            way_pts[:, i] = np.linspace(q_initial[i], q_final[i], N_samples)

        vlims = rox_robot.joint_vel_limit
        alims = rox_robot.joint_acc_limit

        path = ta.SplineInterpolator(ss, way_pts)
        pc_vel = constraint.JointVelocityConstraint(vlims * 0.95 *
                                                    (speed_perc / 100.0))
        pc_acc = constraint.JointAccelerationConstraint(alims)

        instance = algo.TOPPRA([pc_vel, pc_acc],
                               path,
                               parametrizer="ParametrizeConstAccel")
        jnt_traj = instance.compute_trajectory()

        if jnt_traj is None:
            return None

        ts_sample = np.linspace(0, jnt_traj.duration, N_samples)
        qs_sample = jnt_traj(ts_sample)

        waypoints = []

        for i in range(N_samples):
            wp = JointTrajectoryWaypoint()
            wp.joint_position = qs_sample[i, :]
            wp.time_from_start = ts_sample[i]
            waypoints.append(wp)

        traj = JointTrajectory()
        traj.joint_names = rox_robot.joint_names
        traj.waypoints = waypoints

        return traj

    def movej(self, robot_local_device_name, q_final, speed_perc):

        # Convert to radians
        q_final = np.deg2rad(q_final)

        robot = self.device_manager.get_device_client(robot_local_device_name)
        robot_info = robot.robot_info
        rox_robot = self._robot_util.robot_info_to_rox_robot(robot_info, 0)

        robot_state = robot.robot_state.PeekInValue()[0]

        q_initial = robot_state.joint_position

        traj = self._generate_movej_trajectory(robot, rox_robot, q_initial,
                                               q_final, speed_perc)
        if traj is None:
            return EmptyGenerator()

        return TrajectoryMoveGenerator(robot, rox_robot, traj, self._node)

    def _generate_movel_trajectory(self, robot_client, rox_robot,
                                   initial_q_or_T, T_final, speed_perc,
                                   q_final_seed):

        JointTrajectoryWaypoint = RRN.GetStructureType(
            "com.robotraconteur.robotics.trajectory.JointTrajectoryWaypoint",
            robot_client)
        JointTrajectory = RRN.GetStructureType(
            "com.robotraconteur.robotics.trajectory.JointTrajectory",
            robot_client)

        dof = len(rox_robot.joint_type)

        if isinstance(initial_q_or_T, rox.Robot):
            T_initial = initial_q_or_T
            q_initial = invkin.update_ik_info3(rox_robot, initial_q_or_T,
                                               np.random.randn((dof, )))
        else:
            q_initial = initial_q_or_T
            T_initial = rox.fwdkin(rox_robot, q_initial)

        p_diff = T_final.p - T_initial.p
        R_diff = np.transpose(T_initial.R) @ T_final.R
        k, theta = rox.R2rot(R_diff)

        N_samples_translate = np.linalg.norm(p_diff) * 100.0
        N_samples_rot = np.abs(theta) * 0.25 * 180 / np.pi

        N_samples_translate = np.linalg.norm(p_diff) * 100.0
        N_samples_rot = np.abs(theta) * 0.2 * 180 / np.pi

        N_samples = math.ceil(np.max((N_samples_translate, N_samples_rot, 20)))

        ss = np.linspace(0, 1, N_samples)

        if q_final_seed is None:
            q_final, res = invkin.update_ik_info3(rox_robot, T_final,
                                                  q_initial)
        else:
            q_final, res = invkin.update_ik_info3(rox_robot, T_final,
                                                  q_final_seed)
        #assert res, "Inverse kinematics failed"

        way_pts = np.zeros((N_samples, dof), dtype=np.float64)
        way_pts[0, :] = q_initial
        for i in range(1, N_samples):
            s = float(i) / (N_samples - 1)
            R_i = T_initial.R @ rox.rot(k, theta * s)
            p_i = (p_diff * s) + T_initial.p
            T_i = rox.Transform(R_i, p_i)
            #try:
            #    q, res = invkin.update_ik_info3(rox_robot, T_i, way_pts[i-1,:])
            #except AssertionError:
            ik_seed = (1.0 - s) * q_initial + s * q_final
            q, res = invkin.update_ik_info3(rox_robot, T_i, ik_seed)
            assert res, "Inverse kinematics failed"
            way_pts[i, :] = q

        vlims = rox_robot.joint_vel_limit
        alims = rox_robot.joint_acc_limit

        path = ta.SplineInterpolator(ss, way_pts)
        pc_vel = constraint.JointVelocityConstraint(vlims * 0.95 * speed_perc /
                                                    100.0)
        pc_acc = constraint.JointAccelerationConstraint(alims)

        instance = algo.TOPPRA([pc_vel, pc_acc],
                               path,
                               parametrizer="ParametrizeConstAccel")
        jnt_traj = instance.compute_trajectory()

        if jnt_traj is None:
            return None

        ts_sample = np.linspace(0, jnt_traj.duration, N_samples)
        qs_sample = jnt_traj(ts_sample)

        waypoints = []

        for i in range(N_samples):
            wp = JointTrajectoryWaypoint()
            wp.joint_position = qs_sample[i, :]
            wp.time_from_start = ts_sample[i]
            waypoints.append(wp)

        traj = JointTrajectory()
        traj.joint_names = rox_robot.joint_names
        traj.waypoints = waypoints

        return traj

    def movel(self,
              robot_local_device_name,
              pose_final,
              frame,
              robot_origin_calib_global_name,
              speed_perc,
              final_seed=None):

        robot = self.device_manager.get_device_client(robot_local_device_name)
        geom_util = GeometryUtil(client_obj=robot)

        if frame.lower() == "world":
            var_storage = self.device_manager.get_device_client(
                "variable_storage")
            robot_origin_pose = var_storage.getf_variable_value(
                "globals", robot_origin_calib_global_name).data
            T_rob = geom_util.named_pose_to_rox_transform(
                robot_origin_pose.pose)
            T_des1 = geom_util.pose_to_rox_transform(pose_final)
            T_des = T_rob.inv() * T_des1
            pose_final = geom_util.rox_transform_to_pose(T_des)
        elif frame.lower() == "robot":
            T_des = geom_util.pose_to_rox_transform(pose_final)
        else:
            assert False, "Unknown parent frame for movel"

        robot_info = robot.robot_info
        rox_robot = self._robot_util.robot_info_to_rox_robot(robot_info, 0)

        robot_state = robot.robot_state.PeekInValue()[0]

        q_initial = robot_state.joint_position

        traj = self._generate_movel_trajectory(robot, rox_robot, q_initial,
                                               T_des, speed_perc, final_seed)

        if traj is None:
            return EmptyGenerator()

        return TrajectoryMoveGenerator(robot, rox_robot, traj, self._node)

    def _generate_movel_trajectory_tool_j_range(self, robot_client, rox_robot,
                                                initial_q_or_T, T_final,
                                                speed_perc, final_seed):

        # Rotate joint 6 initial position to try to converge
        try:
            return self._generate_movel_trajectory(robot_client, rox_robot,
                                                   initial_q_or_T, T_final,
                                                   speed_perc, final_seed)
        except AssertionError:
            if not isinstance(initial_q_or_T, np.ndarray):
                raise
            #raise
        p = final_seed
        p1 = float(p[-1])
        p2 = float(p[-1])
        while True:

            if p1 > rox_robot.joint_upper_limit[
                    -1] and p2 < rox_robot.joint_lower_limit[-1]:
                assert False, "Could not solve inverse kinematics for grab or place operation"
            p1 = p1 + np.deg2rad(30)
            if p1 <= rox_robot.joint_upper_limit[-1]:
                p[-1] = p1
                try:
                    res = self._generate_movel_trajectory(
                        robot_client, rox_robot, initial_q_or_T, T_final,
                        speed_perc, p)
                    return res
                except AssertionError:
                    pass
            p2 = p2 - np.deg2rad(30)
            if p2 >= rox_robot.joint_lower_limit[-1]:
                p[-1] = p2
                try:
                    res = self._generate_movel_trajectory(
                        robot_client, rox_robot, initial_q_or_T, T_final,
                        speed_perc, p)
                    return res
                except AssertionError:
                    pass

    def _do_grab_place_object_planar(self, robot_local_device_name,
                                     tool_local_device_name,
                                     robot_origin_calib_global_name,
                                     reference_pose_global_name, object_x,
                                     object_y, object_theta, z_offset_before,
                                     z_offset_grab, speed_perc, grab):

        var_storage = self.device_manager.get_device_client(
            "variable_storage", 0.1)

        robot = self.device_manager.get_device_client(robot_local_device_name)
        tool = self.device_manager.get_device_client(tool_local_device_name)

        reference_pose = var_storage.getf_variable_value(
            "globals", reference_pose_global_name)
        robot_origin_calib = var_storage.getf_variable_value(
            "globals", robot_origin_calib_global_name)

        T_robot = self._geom_util.named_pose_to_rox_transform(
            robot_origin_calib.data.pose)

        robot_info = robot.robot_info
        rox_robot = self._robot_util.robot_info_to_rox_robot(robot_info, 0)

        # The object's pose in world coordinates
        T_object = rox.Transform(rox.rot([0., 0., 1.], object_theta),
                                 [object_x, object_y, 0.])

        # The robot's reference pose in its own frame
        T_reference_pose_r = rox.fwdkin(rox_robot,
                                        np.deg2rad(reference_pose.data))

        # The robot's reference pose in world coordinates
        T_reference_pose = T_robot * T_reference_pose_r

        # The nominal grab pose copy
        T_grab_pose_n = copy.deepcopy(T_reference_pose)

        # Translate the reference pose in x and y to the nominal grab point
        T_grab_pose_n.p[0] = T_object.p[0]
        T_grab_pose_n.p[1] = T_object.p[1]

        # Rotate the reference pose to align with the object
        T_grab_pose_n.R = T_object.R @ T_grab_pose_n.R

        # Before pose
        T_grab_pose_before = copy.deepcopy(T_grab_pose_n)
        T_grab_pose_before.p[2] += z_offset_before
        # Transform to robot frame
        T_grab_pose_before_r = T_robot.inv() * T_grab_pose_before

        # Grab pose
        T_grab_pose = copy.deepcopy(T_grab_pose_n)
        T_grab_pose.p[2] += z_offset_grab
        # Transform to robot frame
        T_grab_pose_r = T_robot.inv() * T_grab_pose

        robot_state, _ = robot.robot_state.PeekInValue()
        q_current = robot_state.joint_position

        ## Compute inverse kinematics
        # q_grab_before, res = invkin.update_ik_info3(rox_robot, T_grab_pose_before_r, q_current)
        # assert res, "Invalid setpoint: invkin did not converge"
        # q_grab, res = invkin.update_ik_info3(rox_robot, T_grab_pose_r, q_current)
        # assert res, "Invalid setpoint: invkin did not converge"

        # print(f"q_grab_before: {q_grab_before}")
        # print(f"q_grab: {q_grab}")
        # print()

        final_seed = np.deg2rad(reference_pose.data)
        traj_before = self._generate_movel_trajectory_tool_j_range(
            robot, rox_robot, q_current, T_grab_pose_before_r, speed_perc,
            final_seed)

        q_init_grab = traj_before.waypoints[-1].joint_position
        traj_grab = self._generate_movel_trajectory_tool_j_range(
            robot, rox_robot, q_init_grab, T_grab_pose_r, speed_perc,
            q_init_grab)

        q_init_after = traj_grab.waypoints[-1].joint_position
        traj_after = self._generate_movel_trajectory_tool_j_range(
            robot, rox_robot, q_init_after, T_grab_pose_before_r, speed_perc,
            q_init_grab)

        gen = PickPlaceMotionGenerator(robot, rox_robot, tool, traj_before,
                                       traj_grab, traj_after, grab, self._node)

        # robot.jog_freespace(q_grab_before,max_velocity,True)
        # time.sleep(0.5)
        # robot.jog_freespace(q_grab,max_velocity*.25,True)
        # time.sleep(0.5)
        # robot.jog_freespace(q_grab_before,max_velocity*.25,True)
        return gen

    def grab_object_planar(self, robot_local_device_name,
                           tool_local_device_name,
                           robot_origin_calib_global_name,
                           reference_pose_global_name, object_world_pose,
                           z_offset_before, z_offset_grab, speed_perc):

        object_x = object_world_pose["position"]["x"]
        object_y = object_world_pose["position"]["y"]
        object_theta = object_world_pose["orientation"]

        return self._do_grab_place_object_planar(
            robot_local_device_name, tool_local_device_name,
            robot_origin_calib_global_name, reference_pose_global_name,
            object_x, object_y, object_theta, z_offset_before, z_offset_grab,
            speed_perc, True)

    def grab_object_planar2(self, robot_local_device_name,
                            tool_local_device_name,
                            robot_origin_calib_global_name,
                            reference_pose_global_name, object_world_pose,
                            z_offset_before, z_offset_grab, speed_perc):

        object_x = object_world_pose["position"]["x"]
        object_y = object_world_pose["position"]["y"]
        object_rpy = self._geom_util.quaternion_to_rpy(
            object_world_pose["orientation"])
        assert np.abs(
            object_rpy[0]) < np.deg2rad(15), "Object is not flat on surface!"
        assert np.abs(
            object_rpy[1]) < np.deg2rad(15), "Object is not flat on surface!"
        object_theta = object_rpy[2]

        return self._do_grab_place_object_planar(
            robot_local_device_name, tool_local_device_name,
            robot_origin_calib_global_name, reference_pose_global_name,
            object_x, object_y, object_theta, z_offset_before, z_offset_grab,
            speed_perc, True)

    def place_object_planar(self, robot_local_device_name,
                            tool_local_device_name,
                            robot_origin_calib_global_name,
                            reference_pose_global_name, object_world_pose,
                            z_offset_before, z_offset_grab, speed_perc):

        object_x = object_world_pose["position"]["x"]
        object_y = object_world_pose["position"]["y"]
        object_theta = object_world_pose["orientation"]

        return self._do_grab_place_object_planar(
            robot_local_device_name, tool_local_device_name,
            robot_origin_calib_global_name, reference_pose_global_name,
            object_x, object_y, object_theta, z_offset_before, z_offset_grab,
            speed_perc, False)

    def place_object_planar2(self, robot_local_device_name,
                             tool_local_device_name,
                             robot_origin_calib_global_name,
                             reference_pose_global_name, object_world_pose,
                             z_offset_before, z_offset_grab, speed_perc):

        object_x = object_world_pose["position"]["x"]
        object_y = object_world_pose["position"]["y"]
        object_rpy = self._geom_util.quaternion_to_rpy(
            object_world_pose["orientation"])
        assert np.abs(object_rpy[0]) < np.deg2rad(
            5), "Target location is not flat on surface!"
        assert np.abs(object_rpy[1]) < np.deg2rad(
            5), "Target location is not flat on surface!"
        object_theta = object_rpy[2]

        return self._do_grab_place_object_planar(
            robot_local_device_name, tool_local_device_name,
            robot_origin_calib_global_name, reference_pose_global_name,
            object_x, object_y, object_theta, z_offset_before, z_offset_grab,
            speed_perc, False)

    def move_joint_trajectory(self, robot_local_device_name, trajectory,
                              speed_perc):

        if speed_perc != 100.0:
            t_scale = 100.0 / speed_perc
            traj2 = copy.deepcopy(trajectory)

            for wp in traj2.waypoints:
                wp.time_from_start *= t_scale

            trajectory = traj2

        robot = self.device_manager.get_device_client(robot_local_device_name)

        robot_info = robot.robot_info
        rox_robot = self._robot_util.robot_info_to_rox_robot(robot_info, 0)

        return TrajectoryMoveGenerator(robot, rox_robot, trajectory,
                                       self._node)
コード例 #9
0
intrinsic_var = var_storage.getf_variable_value(
    "globals", "camera_calibration_extrinsic")
robot_var = var_storage.getf_variable_value("globals",
                                            "robot_origin_calibration")

calib = extrinsic_var.data

print("Camera Matrix")
print(calib.K)
d = calib.distortion_info.data
dist = np.array([d.k1, d.k2, d.p1, d.p2, d.k3])
print("Camera distortion")
print(dist)

geom_util = GeometryUtil(client_obj=var_storage)

T_cam = geom_util.named_pose_to_rox_transform(intrinsic_var.data.pose)
T_rob = geom_util.named_pose_to_rox_transform(robot_var.data.pose)

print("T_cam")
print(T_cam.R)
print(T_cam.p)

print("T_rob")
print(T_rob.R)
print(T_rob.p)

print("T")
T = (T_cam.inv() * T_rob).inv()
print(T.R)
print(T.p)