예제 #1
0
    def __init__(self, device_manager, device_info = None, node: RR.RobotRaconteurNode = None):
        if node is None:
            self._node = RR.RobotRaconteurNode.s
        else:
            self._node = node
        self.device_info = device_info
                
        self.service_path = None
        self.ctx = None

        self._matched_template_2d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.MatchedTemplate2D")
        self._template_matching_result_2d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.TemplateMatchingResult2D")
        self._template_matching_result_3d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.TemplateMatchingResult3D")
        self._matched_template_3d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.MatchedTemplate3D")
        self._named_pose_with_covariance_type = self._node.GetStructureType("com.robotraconteur.geometry.NamedPoseWithCovariance")
        self._pose2d_dtype = self._node.GetNamedArrayDType("com.robotraconteur.geometry.Pose2D")

        self._image_util = ImageUtil(node=self._node)
        self._geom_util = GeometryUtil(node=self._node)

        self.device_manager = device_manager
        self.device_manager.connect_device_type("tech.pyri.variable_storage.VariableStorage")
        self.device_manager.connect_device_type("com.robotraconteur.imaging.Camera")
        self.device_manager.device_added += self._device_added
        self.device_manager.device_removed += self._device_removed
        self.device_manager.refresh_devices(5)
예제 #2
0
def show_image(image):
    image_util = ImageUtil(client_obj=var_storage)
    cv_image = image_util.compressed_image_to_array(image)
    #cv_image=image.data.reshape([image.image_info.height, image.image_info.width, int(len(image.data)/(image.image_info.height*image.image_info.width))], order='C')

    cv2.imshow("image", cv_image)
    cv2.waitKey()
예제 #3
0
    def __init__(self,
                 device_manager,
                 device_info=None,
                 node: RR.RobotRaconteurNode = None):
        if node is None:
            self._node = RR.RobotRaconteurNode.s
        else:
            self._node = node
        self.device_info = device_info

        self.service_path = None
        self.ctx = None

        self._detected_marker = self._node.GetStructureType(
            "tech.pyri.vision.aruco_detection.DetectedMarker")
        self._aruco_detection_result = self._node.GetStructureType(
            "tech.pyri.vision.aruco_detection.ArucoDetectionResult")
        self._pose2d_dtype = self._node.GetNamedArrayDType(
            "com.robotraconteur.geometry.Pose2D")
        self._point2d_dtype = self._node.GetNamedArrayDType(
            "com.robotraconteur.geometry.Point2D")

        self._image_util = ImageUtil(node=self._node)
        self._geom_util = GeometryUtil(node=self._node)

        self.device_manager = device_manager
        self.device_manager.connect_device_type(
            "tech.pyri.variable_storage.VariableStorage")
        self.device_manager.connect_device_type(
            "com.robotraconteur.imaging.Camera")
        self.device_manager.device_added += self._device_added
        self.device_manager.device_removed += self._device_removed
        self.device_manager.refresh_devices(5)
예제 #4
0
def _calibrate_camera_extrinsic(intrinsic_calib, image, board,
                                camera_local_device_name):

    # TODO: verify calibration data

    mtx = intrinsic_calib.K
    dist_rr = intrinsic_calib.distortion_info.data
    dist = np.array(
        [dist_rr.k1, dist_rr.k2, dist_rr.p1, dist_rr.p2, dist_rr.k3],
        dtype=np.float64)

    image_util = ImageUtil()
    frame = image_util.compressed_image_to_array(image)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    if board == "chessboard":
        width = 7
        height = 6
        square_size = 0.03
    else:
        raise RR.InvalidOperationException(
            f"Invalid calibration board {board}")

    ret, corners = cv2.findChessboardCorners(gray, (width, height), None)
    assert ret, "Could not find calibration target"

    objp = np.zeros((height * width, 3), np.float32)
    objp[:, :2] = np.mgrid[0:width, 0:height].T.reshape(-1, 2)

    objp = objp * square_size

    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)

    ret, rvecs, tvecs = cv2.solvePnP(objp, corners2, mtx, dist)

    R = cv2.Rodrigues(rvecs.flatten())[0]

    R_landmark = np.array([[0, 1, 0], [1, 0, 0], [0, 0, -1]], dtype=np.float64)

    R_cam1 = R.transpose()
    p_cam1 = -R.transpose() @ tvecs

    R_cam = R_landmark.transpose() @ R_cam1
    p_cam = R_landmark.transpose() @ p_cam1

    cv_image2 = cv2.aruco.drawAxis(frame, mtx, dist,
                                   cv2.Rodrigues(R_cam.transpose())[0],
                                   -R_cam.transpose() @ p_cam, 0.1)

    T = rox.Transform(R_cam, p_cam, "world", camera_local_device_name)

    geom_util = GeometryUtil()
    cov = np.eye(6) * 1e-5
    return geom_util.rox_transform_to_named_pose(
        T), cov, image_util.array_to_compressed_image_jpg(cv_image2), 0.0
예제 #5
0
    def __init__(self, parent, camera_sub):
        self._lock = threading.Lock()
        self._current_img = None
        self._camera_sub = camera_sub
        self._camera_sub.ClientConnected += self._client_connected
        self._parent = parent
        self._image_util = ImageUtil()

        try:
            c = self._camera_sub.GetDefaultClient()
            c.start_streaming()
        except:
            traceback.print_exc()
            pass

        self._camera_sub_preview_sub = None
예제 #6
0
def _cv_img_to_rr_display_img(img):
    height, width = img.shape[0:2]

    s = 1

    # Clamp image to 720p
    s1 = 1280.0 / width
    s2 = 720.0 / height
    s = min(s1, s2)
    if s < 1.0:
        width = int(width * s)
        height = int(height * s)
        img = cv2.resize(img, (width, height))

    img_util = ImageUtil()

    return img_util.array_to_compressed_image_jpg(img, 70)
예제 #7
0
    def __init__(self, device_manager, device_info = None, node : RR.RobotRaconteurNode = None):
        if node is None:
            self._node = RR.RobotRaconteurNode.s
        else:
            self._node = node
        self.device_info = device_info
                
        self.service_path = None
        self.ctx = None

        self.device_manager = device_manager
        self.device_manager.connect_device_type("tech.pyri.variable_storage.VariableStorage")
        self.device_manager.connect_device_type("com.robotraconteur.robotics.robot.Robot")
        self.device_manager.device_added += self._device_added
        self.device_manager.device_removed += self._device_removed
        self.device_manager.refresh_devices(5)

        self.robot_util = RobotUtil(self._node)
        self.geom_util = GeometryUtil(self._node)
        self.image_util = ImageUtil(self._node)
예제 #8
0
d = DeviceManagerClient('rr+tcp://localhost:59902?service=device_manager',
                        autoconnect=False)

d.refresh_devices(1)

d.connect_device("vision_template_matching")
d.connect_device("variable_storage")

c = d.get_device_client("vision_template_matching", 1)

bounding_box2d_type = RRN.GetStructureType(
    'com.robotraconteur.geometry.BoundingBox2D', c)
named_pose2d_type = RRN.GetStructureType(
    'com.robotraconteur.geometry.NamedPose2D', c)
pose2d_dtype = RRN.GetNamedArrayDType('com.robotraconteur.geometry.Pose2D', c)

var_storage = d.get_device_client("variable_storage", 1)

roi = var_storage.getf_variable_value("globals", roi_name).data

#res = c.match_template_stored_image("extrinsic_image0", "test10", None)
res = c.match_template_camera_capture("camera", template_name, None)

img_util = ImageUtil(client_obj=c)
res_img = img_util.compressed_image_to_array(res.display_image)

cv2.imshow("", res_img)
cv2.waitKey()
cv2.destroyAllWindows()

print(res)
예제 #9
0
class VisionTemplateMatching_impl(object):
    def __init__(self, device_manager, device_info = None, node: RR.RobotRaconteurNode = None):
        if node is None:
            self._node = RR.RobotRaconteurNode.s
        else:
            self._node = node
        self.device_info = device_info
                
        self.service_path = None
        self.ctx = None

        self._matched_template_2d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.MatchedTemplate2D")
        self._template_matching_result_2d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.TemplateMatchingResult2D")
        self._template_matching_result_3d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.TemplateMatchingResult3D")
        self._matched_template_3d_type = self._node.GetStructureType("tech.pyri.vision.template_matching.MatchedTemplate3D")
        self._named_pose_with_covariance_type = self._node.GetStructureType("com.robotraconteur.geometry.NamedPoseWithCovariance")
        self._pose2d_dtype = self._node.GetNamedArrayDType("com.robotraconteur.geometry.Pose2D")

        self._image_util = ImageUtil(node=self._node)
        self._geom_util = GeometryUtil(node=self._node)

        self.device_manager = device_manager
        self.device_manager.connect_device_type("tech.pyri.variable_storage.VariableStorage")
        self.device_manager.connect_device_type("com.robotraconteur.imaging.Camera")
        self.device_manager.device_added += self._device_added
        self.device_manager.device_removed += self._device_removed
        self.device_manager.refresh_devices(5)
        
    def RRServiceObjectInit(self, ctx, service_path):
        self.service_path = service_path
        self.ctx = ctx
        
    def _device_added(self, local_device_name):
       pass 

    def _device_removed(self, local_device_name):
        pass

    def match_template_stored_image(self, image_global_name, template_global_name, roi):
       
        var_storage = self.device_manager.get_device_client("variable_storage",1)

        image_var = var_storage.getf_variable_value("globals", image_global_name)
        image = self._image_util.compressed_image_to_array(image_var.data)

        template_var = var_storage.getf_variable_value("globals", template_global_name)
        template = self._image_util.compressed_image_to_array(template_var.data)

        return self._do_template_match(template, image, roi)

    def match_template_camera_capture(self, camera_local_device_name, template_global_name, roi):
       
        var_storage = self.device_manager.get_device_client("variable_storage",1)
        
        template_var = var_storage.getf_variable_value("globals", template_global_name)
        template = self._image_util.compressed_image_to_array(template_var.data)

        camera_device = self.device_manager.get_device_client(camera_local_device_name)
        image_compressed = camera_device.capture_frame_compressed()
        image = self._image_util.compressed_image_to_array(image_compressed)

        return self._do_template_match(template, image, roi)

    def _do_template_match(self, template, image, roi):

        matcher_roi = None

        if roi is not None:
            roi_x = roi.center.pose[0]["position"]["x"]
            roi_y = roi.center.pose[0]["position"]["y"]
            roi_theta = roi.center.pose[0]["orientation"]
            roi_w = roi.size[0]["width"]
            roi_h = roi.size[0]["height"]
            matcher_roi = (roi_x, roi_y, roi_w, roi_h, -roi_theta)


        # execute the image detection using opencv
        matcher = TemplateMatchingMultiAngleWithROI(template,image,matcher_roi)
        # return_result_image = True
        
        match_center, template_wh, match_angle, detection_result_img = matcher.detect_object(True)
                
        # return the pose of the object
        print("the object is found..:")
        print("center coordinates in img frame(x,y): " + str(match_center))
        print("(w,h): " + str(template_wh))
        print("angle: " + str(match_angle))


        match_result = self._template_matching_result_2d_type()

        matched_template_result = self._matched_template_2d_type()
        centroid = np.zeros((1,),dtype=self._pose2d_dtype)
        centroid[0]["position"]["x"] = match_center[0]
        centroid[0]["position"]["y"] = match_center[1]
        centroid[0]["orientation"] = match_angle
        matched_template_result.match_centroid = centroid
        matched_template_result.template_size = self._geom_util.wh_to_size2d(template_wh,dtype=np.int32)
        matched_template_result.confidence = 0

        match_result.template_matches =[matched_template_result]

        match_result.display_image = self._image_util.array_to_compressed_image_jpg(detection_result_img, 70)

        return match_result

    def _do_match_with_pose(self,image, template, intrinsic_calib, extrinsic_calib, object_z, roi):
        
        matcher_roi = None

        if roi is not None:
            roi_x = roi.center.pose[0]["position"]["x"]
            roi_y = roi.center.pose[0]["position"]["y"]
            roi_theta = roi.center.pose[0]["orientation"]
            roi_w = roi.size[0]["width"]
            roi_h = roi.size[0]["height"]
            matcher_roi = (roi_x, roi_y, roi_w, roi_h, -roi_theta)


        # execute the image detection using opencv
        matcher = TemplateMatchingMultiAngleWithROI(template,image,matcher_roi)
        # return_result_image = True
        
        match_center, template_wh, match_angle, detection_result_img = matcher.detect_object(True)
           
        # detection_result.width
        # detection_result.height
        x = match_center[0]
        y = match_center[1]
        theta = match_angle
        src = np.asarray([x,y], dtype=np.float32)
        src = np.reshape(src,(-1,1,2)) # Rehsape as opencv requires (N,1,2)
        # print(src)
        # Now the detection results in image frame is found,
        # Hence, find the detected pose in camera frame with given z distance to camera
        # To do that first we need to get the camera parameters
        # Load the camera matrix and distortion coefficients from the calibration result.
        

        mtx = intrinsic_calib.K
        d = intrinsic_calib.distortion_info.data
        dist = np.array([d.k1,d.k2,d.p1,d.p2,d.k3])

        T_cam = self._geom_util.named_pose_to_rox_transform(extrinsic_calib.pose)

        #TODO: Figure out a better value for this
        object_z_cam_dist = abs(T_cam.p[2]) - object_z

        # Find the corresponding world pose of the detected pose in camera frame
        dst = cv2.undistortPoints(src,mtx,dist) # dst is Xc/Zc and Yc/Zc in the same shape of src
        dst = dst * float(object_z_cam_dist) * 1000.0 # Multiply by given Zc distance to find all cordinates, multiply by 1000 is because of Zc is given in meters but others are in millimeters
        dst = np.squeeze(dst) * 0.001 # Xc and Yc as vector

        # Finally the translation between the detected object center and the camera frame represented in camera frame is T = [Xc,Yc,Zc]
        Xc = dst[0]
        Yc = dst[1]
        Zc = float(object_z_cam_dist)
        T = np.asarray([Xc,Yc,Zc])

        # Now lets find the orientation of the detected object with respect to camera 
        # We are assuming +z axis is looking towards the camera and xy axes of the both object and camera are parallel planes
        # So the rotation matrix would be
        theta = np.deg2rad(theta) #convert theta from degrees to radian
        R_co = np.asarray([[math.cos(theta),-math.sin(theta),0],[-math.sin(theta),-math.cos(theta),0],[0,0,-1]])

        T_obj_cam_frame = rox.Transform(R_co, T, "camera", "object")

        T_obj = T_cam * T_obj_cam_frame
        
        # TODO: Better adjustment of Z height?
        T_obj.p[2] = object_z

        ret1 = self._matched_template_3d_type()
        ret1.pose = self._named_pose_with_covariance_type()
        ret1.pose.pose = self._geom_util.rox_transform_to_named_pose(T_obj)
        ret1.pose.covariance= np.zeros((6,6))
        ret1.confidence = 0

        ret = self._template_matching_result_3d_type()
        ret.template_matches = [ret1]
        ret.display_image = self._image_util.array_to_compressed_image_jpg(detection_result_img,70)

        return ret

    def _do_match_template_world_pose(self, image, template_global_name,
        camera_calibration_intrinsic, camera_calibration_extrinsic, object_z_height, roi, var_storage):

        template_var = var_storage.getf_variable_value("globals", template_global_name)
        template = self._image_util.compressed_image_to_array(template_var.data)

        intrinsic_calib = var_storage.getf_variable_value("globals", camera_calibration_intrinsic).data
        extrinsic_calib = var_storage.getf_variable_value("globals", camera_calibration_extrinsic).data

        return self._do_match_with_pose(image,template,intrinsic_calib,extrinsic_calib,object_z_height,roi)

    def match_template_world_pose_stored_image(self, image_global_name, template_global_name,
        camera_calibration_intrinsic, camera_calibration_extrinsic, object_z_height, roi):

        var_storage = self.device_manager.get_device_client("variable_storage",1)

        image_var = var_storage.getf_variable_value("globals", image_global_name)
        image = self._image_util.compressed_image_to_array(image_var.data)

        return self._do_match_template_world_pose(image, template_global_name, camera_calibration_intrinsic,
            camera_calibration_extrinsic, object_z_height, roi, var_storage)

    def match_template_world_pose_camera_capture(self, camera_local_device_name, template_global_name,
        camera_calibration_intrinsic, camera_calibration_extrinsic, object_z_height, roi):

        var_storage = self.device_manager.get_device_client("variable_storage",1)

        camera_device = self.device_manager.get_device_client(camera_local_device_name,1)
        img_rr = camera_device.capture_frame_compressed()
        image = self._image_util.compressed_image_to_array(img_rr)

        return self._do_match_template_world_pose(image, template_global_name, camera_calibration_intrinsic,
            camera_calibration_extrinsic, object_z_height, roi, var_storage)
예제 #10
0
def _calibrate_camera_intrinsic2(images, board):
    # opencv_camera_calibration.py:6

    # TODO: Don't hardcode the calibration pattern
    if board == "chessboard":
        width = 7
        height = 6
        square_size = 0.03
    else:
        raise RR.InvalidOperationException(
            f"Invalid calibration board {board}")

    # termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(8,6,0)
    objp = np.zeros((height * width, 3), np.float32)
    objp[:, :2] = np.mgrid[0:width, 0:height].T.reshape(-1, 2)

    objp = objp * square_size

    # Arrays to store object points and image points from all the images.
    objpoints = []  # 3d point in real world space
    imgpoints = []  # 2d points in image plane.

    imgs = []

    image_util = ImageUtil()

    for image in images:

        frame = image_util.compressed_image_to_array(image)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Find the chess board corners
        ret, corners = cv2.findChessboardCorners(gray, (width, height), None)

        # If found, add object points, image points (after refining them)
        if ret:
            objpoints.append(objp)

            corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                                        criteria)
            imgpoints.append(corners2)

            # Draw and display the corners
            img = cv2.drawChessboardCorners(frame, (width, height), corners2,
                                            ret)
            imgs.append(img)
            # cv2.imshow("img", img)
            # cv2.waitKey(1000)

    # cv2.destroyAllWindows()

    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,
                                                       gray.shape[::-1], None,
                                                       None)

    mean_error = 0
    for i in range(len(objpoints)):
        imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i],
                                          mtx, dist)
        error = cv2.norm(imgpoints[i], imgpoints2,
                         cv2.NORM_L2) / len(imgpoints2)
        mean_error += error
    print("total error: {}".format(mean_error / len(objpoints)))

    return ret, mtx, dist.flatten(), rvecs, tvecs, mean_error, imgs
예제 #11
0
class CameraViewer_impl:
    def __init__(self, parent, camera_sub):
        self._lock = threading.Lock()
        self._current_img = None
        self._camera_sub = camera_sub
        self._camera_sub.ClientConnected += self._client_connected
        self._parent = parent
        self._image_util = ImageUtil()

        try:
            c = self._camera_sub.GetDefaultClient()
            c.start_streaming()
        except:
            traceback.print_exc()
            pass

        self._camera_sub_preview_sub = None

    def _client_connected(self, sub, subscription_id, c):
        try:
            c.start_streaming()
        except:
            pass

    def RRServiceObjectInit(self, ctx, service_path):
        self.service_path = service_path
        self._downsampler = RR.BroadcastDownsampler(ctx)
        self._downsampler.AddPipeBroadcaster(self.preview_stream)

        self._camera_sub_preview_sub = self._camera_sub.SubscribePipe(
            "preview_stream")
        self._camera_sub_preview_sub.PipePacketReceived += self._preview_new_packets

    def _preview_new_packets(self, sub):
        img = None
        with self._lock:
            while sub.Available > 0:
                img = sub.ReceivePacket()
            if img is None:
                return

            self._current_img = img
            self.preview_stream.AsyncSendPacket(img, lambda: None)

    def capture_frame_preview(self):
        with self._lock:
            if self._current_img is not None:
                return self._current_img

        for i in range(5):
            time.sleep(0.005)
            with self._lock:
                if self._current_img is not None:
                    return self._current_img
        raise RR.InvalidOperationException("No image available")

    def save_to_globals(self, global_name, save_system_state):
        assert len(global_name) > 0
        var_storage = self._parent.device_manager.get_device_client(
            "variable_storage", 0.1)

        var_consts = var_storage.RRGetNode().GetConstants(
            'tech.pyri.variable_storage', var_storage)
        variable_persistence = var_consts["VariablePersistence"]
        variable_protection_level = var_consts["VariableProtectionLevel"]

        if len(var_storage.filter_variables("globals", global_name, [])) > 0:
            raise RR.InvalidOperationException(
                f"Global {global_name} already exists")
        if save_system_state:
            if len(
                    var_storage.filter_variables(
                        "globals", f"{global_name}_system_state", [])) > 0:
                raise RR.InvalidOperationException(
                    f"Global {global_name}_system_state already exists")

        img1 = self._camera_sub.GetDefaultClient().capture_frame()
        img2 = self._image_util.image_to_array(img1)
        img = self._image_util.array_to_compressed_image_png(img2)

        attrs = {}

        if save_system_state:
            attrs["system_state"] = f"{global_name}_system_state"
            devices_states_c = self._parent.device_manager.get_device_client(
                "devices_states", 0.1)
            dev_states, _ = devices_states_c.devices_states.PeekInValue()
            var_storage.add_variable2("globals",f"{global_name}_system_state","tech.pyri.devices_states.PyriDevicesStates", \
                RR.VarValue(dev_states,"tech.pyri.devices_states.PyriDevicesStates"), ["system_state"], attrs, variable_persistence["const"],
                None, variable_protection_level["read_write"], \
                [], "Captured system state", False)

        var_storage.add_variable2("globals",global_name,"com.robotraconteur.image.CompressedImage", \
            RR.VarValue(img,"com.robotraconteur.image.CompressedImage"), ["image"], attrs, variable_persistence["const"],
            None, variable_protection_level["read_write"], \
            [], "Captured image", False)

    def save_sequence_to_globals(self, global_name, save_system_state):
        return SaveSequenceToGlobalsGenerator(self,
                                              self._parent.device_manager,
                                              global_name, save_system_state)
def calibrate(images, joint_poses, aruco_dict, aruco_id, aruco_markersize,
              flange_to_marker, mtx, dist, cam_pose, rox_robot,
              robot_local_device_name):
    """ Apply extrinsic camera calibration operation for images in the given directory path 
    using opencv aruco marker detection, the extrinsic marker poses given in a json file, 
    and the given intrinsic camera parameters."""

    assert aruco_dict.startswith("DICT_"), "Invalid aruco dictionary name"

    aruco_dict = getattr(aruco, aruco_dict)  # convert string to python
    aruco_dict = cv2.aruco.Dictionary_get(aruco_dict)
    aruco_params = cv2.aruco.DetectorParameters_create()

    i = 0

    imgs_out = []

    geom_util = GeometryUtil()
    image_util = ImageUtil()

    object_points = []
    image_points = []

    for img, joints in zip(images, joint_poses):

        # Find the aruco tag corners
        # corners, ids, rejected = cv2.aruco.detectMarkers(img, aruco_dict, parameters=aruco_params,cameraMatrix=mtx, distCoeff=dist)
        corners, ids, rejected = cv2.aruco.detectMarkers(
            img, aruco_dict, parameters=aruco_params)

        # #debug
        # print(str(type(corners))) # <class 'list'>
        # print(str(corners))  # list of numpy arrays of corners
        # print(str(type(ids))) # <class 'numpy.ndarray'>
        # print(str(ids))

        if len(corners) > 0:
            # Only find the id that we desire
            indexes = np.flatnonzero(ids.flatten() == aruco_id).tolist()
            corners = [corners[index] for index in indexes]
            ids = np.asarray([ids[index] for index in indexes])

            # #debug
            # print(str(type(corners))) # <class 'list'>
            # print(str(corners))  # list of numpy arrays of corners
            # print(str(type(ids))) # <class 'numpy.ndarray'>
            # print(str(ids))

            if len(ids) > 0:  # if there exist at least one id that we desire
                # Select the first detected one, discard the others
                corners = corners[0]  # now corners is 1 by 4

                # # extract the marker corners (which are always returned
                # # in top-left, top-right, bottom-right, and bottom-left
                # # order)
                # corners = corners.reshape((4, 2))
                # (topLeft, topRight, bottomRight, bottomLeft) = corners

                # Estimate the pose of the detected marker in camera frame
                rvec, tvec, markerPoints = cv2.aruco.estimatePoseSingleMarkers(
                    corners, aruco_markersize, mtx, dist)

                # # Debug: Show the detected tag and axis in the image
                # # # cv2.aruco.drawDetectedMarkers(img, corners)  # Draw A square around the markers (Does not work)
                img1 = img.copy()
                img_out = cv2.aruco.drawAxis(img1, mtx, dist, rvec, tvec,
                                             aruco_markersize *
                                             0.75)  # Draw Axis
                imgs_out.append(img_out)

                transform_base_2_flange = rox.fwdkin(rox_robot, joints)
                transform_flange_2_marker = geom_util.pose_to_rox_transform(
                    flange_to_marker)
                transform_base_2_marker = transform_base_2_flange * transform_flange_2_marker
                transform_base_2_marker_corners = _marker_corner_poses(
                    transform_base_2_marker, aruco_markersize)
                # Structure of this disctionary is "filename":[[R_base2marker],[T_base2marker],[R_cam2marker],[T_cam2marker]]
                for j in range(4):
                    object_points.append(transform_base_2_marker_corners[j].p)
                    image_points.append(corners[0, j])
                #pose_pairs_dict[i] = (transform_base_2_marker_corners, corners)
                i += 1

    object_points_np = np.array(object_points, dtype=np.float64)
    image_points_np = np.array(image_points, dtype=np.float32)

    # Finally execute the calibration
    retval, rvec, tvec = cv2.solvePnP(object_points_np, image_points_np, mtx,
                                      dist)
    R_cam2base = cv2.Rodrigues(rvec)[0]
    T_cam2base = tvec

    # Add another display image of marker at robot base
    img_out = cv2.aruco.drawAxis(img, mtx, dist,
                                 cv2.Rodrigues(R_cam2base)[0], T_cam2base,
                                 aruco_markersize * 0.75)  # Draw Axis
    imgs_out.append(img_out)

    rox_transform_cam2base = rox.Transform(R_cam2base, T_cam2base,
                                           cam_pose.parent_frame_id,
                                           robot_local_device_name)
    rox_transform_world2base = cam_pose * rox_transform_cam2base

    #R_base2cam = R_cam2base.T
    #T_base2cam = - R_base2cam @ T_cam2base

    R_base2cam = rox_transform_world2base.inv().R
    T_base2cam = rox_transform_world2base.inv().p

    #debug
    print("FINAL RESULTS: ")
    print("str(R_cam2base)")
    # print(str(type(R_cam2base)))
    print(str(R_cam2base))
    print("str(T_cam2base)")
    # print(str(type(T_cam2base.flatten())))
    print(str(T_cam2base))

    print("str(R_base2cam)")
    # print(str(type(R_base2cam)))
    print(str(R_base2cam))
    print("str(T_base2cam)")
    # print(str(type(T_base2cam.flatten())))
    print(str(T_base2cam))

    pose_res = geom_util.rox_transform_to_named_pose(rox_transform_world2base)
    cov = np.eye(6) * 1e-5

    imgs_out2 = [
        image_util.array_to_compressed_image_jpg(i, 70) for i in imgs_out
    ]

    return pose_res, cov, imgs_out2, 0.0
예제 #13
0
image_name="test14"
roi_name = "pick_roi4"


d = DeviceManagerClient('rr+tcp://localhost:59902?service=device_manager', autoconnect=False)

d.refresh_devices(1)

d.connect_device("variable_storage")

var_storage = d.get_device_client("variable_storage",1)

var = var_storage.getf_variable_value("globals",image_name)


image_util = ImageUtil(client_obj = var_storage)
display_img = image_util.compressed_image_to_array(var.data)

roi = var_storage.getf_variable_value("globals",roi_name).data

roi_x = roi.center.pose[0]["position"]["x"]
roi_y = roi.center.pose[0]["position"]["y"]
roi_theta = roi.center.pose[0]["orientation"]
roi_w = roi.size[0]["width"]
roi_h = roi.size[0]["height"]
geom_roi1 = shapely.geometry.box(-roi_w/2.,-roi_h/2.,roi_w/2.,roi_h/2.,ccw=True)
geom_roi2 = shapely.affinity.translate(geom_roi1, xoff = roi_x, yoff = roi_y)
geom_roi = shapely.affinity.rotate(geom_roi2, roi_theta, origin='centroid', use_radians=True)

roi_outline = np.array([geom_roi.exterior.coords],dtype=np.int32)
display_img = cv2.polylines(display_img, roi_outline, True, color=(255,255,0))
예제 #14
0
class VisionArucoDetection_impl(object):
    def __init__(self,
                 device_manager,
                 device_info=None,
                 node: RR.RobotRaconteurNode = None):
        if node is None:
            self._node = RR.RobotRaconteurNode.s
        else:
            self._node = node
        self.device_info = device_info

        self.service_path = None
        self.ctx = None

        self._detected_marker = self._node.GetStructureType(
            "tech.pyri.vision.aruco_detection.DetectedMarker")
        self._aruco_detection_result = self._node.GetStructureType(
            "tech.pyri.vision.aruco_detection.ArucoDetectionResult")
        self._pose2d_dtype = self._node.GetNamedArrayDType(
            "com.robotraconteur.geometry.Pose2D")
        self._point2d_dtype = self._node.GetNamedArrayDType(
            "com.robotraconteur.geometry.Point2D")

        self._image_util = ImageUtil(node=self._node)
        self._geom_util = GeometryUtil(node=self._node)

        self.device_manager = device_manager
        self.device_manager.connect_device_type(
            "tech.pyri.variable_storage.VariableStorage")
        self.device_manager.connect_device_type(
            "com.robotraconteur.imaging.Camera")
        self.device_manager.device_added += self._device_added
        self.device_manager.device_removed += self._device_removed
        self.device_manager.refresh_devices(5)

    def RRServiceObjectInit(self, ctx, service_path):
        self.service_path = service_path
        self.ctx = ctx

    def _device_added(self, local_device_name):
        pass

    def _device_removed(self, local_device_name):
        pass

    def _do_aruco_detection(self, img, intrinsic_global_name,
                            extrinsic_global_name, aruco_dict_str, aruco_id,
                            aruco_markersize, roi):

        intrinsic_calib = None
        extrinsic_calib = None

        if intrinsic_global_name is not None and extrinsic_global_name is not None:
            var_storage = self.device_manager.get_device_client(
                "variable_storage", 0.1)
            intrinsic_calib = var_storage.getf_variable_value(
                "globals", intrinsic_global_name).data
            extrinsic_calib = var_storage.getf_variable_value(
                "globals", extrinsic_global_name).data

        display_img = img.copy()

        assert aruco_dict_str.startswith(
            "DICT_"), "Invalid aruco dictionary name"

        aruco_dict_i = getattr(aruco,
                               aruco_dict_str)  # convert string to python
        aruco_dict = cv2.aruco.Dictionary_get(aruco_dict_i)
        aruco_params = cv2.aruco.DetectorParameters_create()
        aruco_params.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX

        corners1, ids1, rejected = cv2.aruco.detectMarkers(
            img, aruco_dict, parameters=aruco_params)

        if corners1 is None:
            corners1 = []
        if ids1 is None:
            ids1 = []

        if aruco_id < 0:
            corners2 = corners1
            ids2 = ids1
        else:
            corners2 = []
            ids2 = []
            for id2, corner2 in zip(ids1, corners1):
                if id2 == aruco_id:
                    corners2.append(corner2)
                    ids2.append(id2)
            ids2 = np.array(ids2)

        if roi is None:
            corners = corners2
            ids = ids2
        else:
            roi_x = roi.center.pose[0]["position"]["x"]
            roi_y = roi.center.pose[0]["position"]["y"]
            roi_theta = roi.center.pose[0]["orientation"]
            roi_w = roi.size[0]["width"]
            roi_h = roi.size[0]["height"]
            geom_roi1 = shapely.geometry.box(-roi_w / 2.,
                                             -roi_h / 2.,
                                             roi_w / 2.,
                                             roi_h / 2.,
                                             ccw=True)
            geom_roi2 = shapely.affinity.translate(geom_roi1,
                                                   xoff=roi_x,
                                                   yoff=roi_y)
            geom_roi = shapely.affinity.rotate(geom_roi2,
                                               roi_theta,
                                               origin='centroid',
                                               use_radians=True)

            corners = []
            ids = []

            for id3, corner3 in zip(ids2, corners2):
                centroid = shapely.geometry.Polygon([
                    shapely.geometry.Point(corner3[0, i, 0], corner3[0, i, 1])
                    for i in range(4)
                ])
                if geom_roi.contains(centroid):
                    corners.append(corner3)
                    ids.append(id3)
            ids = np.array(ids)

            roi_outline = np.array([geom_roi.exterior.coords], dtype=np.int32)
            display_img = cv2.polylines(display_img,
                                        roi_outline,
                                        True,
                                        color=(255, 255, 0))

        if len(ids) > 0:
            display_img = aruco.drawDetectedMarkers(display_img, corners, ids)

        poses = None
        if intrinsic_calib is not None and extrinsic_calib is not None:
            poses = []
            mtx = intrinsic_calib.K
            d = intrinsic_calib.distortion_info.data
            dist = np.array([d.k1, d.k2, d.p1, d.p2, d.k3])

            T_cam = self._geom_util.named_pose_to_rox_transform(
                extrinsic_calib.pose)

            for id4, corner4 in zip(ids, corners):
                rvec, tvec, markerPoints = cv2.aruco.estimatePoseSingleMarkers(
                    corner4, aruco_markersize, mtx, dist)

                display_img = cv2.aruco.drawAxis(display_img, mtx, dist, rvec,
                                                 tvec, 0.05)

                # R_marker1 = cv2.Rodrigues(rvec.flatten())[0]
                # TODO: 3D pose estimation from rvec is very innaccurate. Use a simple trigonometry
                # to estimate the Z rotation of the tag

                # compute vectors from opposite corners
                v1 = corner4[0, 2, :] - corner4[0, 0, :]
                v2 = corner4[0, 3, :] - corner4[0, 1, :]

                # Use atan2 on each vector and average
                theta1 = (np.arctan2(v1[1], v1[0]) - np.deg2rad(45)) % (2. *
                                                                        np.pi)
                theta2 = (np.arctan2(v2[1], v2[0]) - np.deg2rad(135)) % (2. *
                                                                         np.pi)

                theta = (theta1 + theta2) / 2.

                R_marker1 = rox.rot([1., 0., 0.], np.pi) @ rox.rot(
                    [0., 0., -1.], theta)

                p_marker1 = tvec.flatten()

                T_marker1 = rox.Transform(R_marker1, p_marker1, "camera",
                                          f"aruco{int(id4)}")
                T_marker = T_cam * T_marker1
                rr_marker_pose = self._geom_util.rox_transform_to_named_pose(
                    T_marker)
                poses.append(rr_marker_pose)

        ret_markers = []
        if poses is None:
            poses = [None] * len(ids)
        for id_, corner, pose in zip(ids, corners, poses):
            m = self._detected_marker()
            m.marker_id = int(id_)
            m.marker_corners = np.zeros((4, ), dtype=self._point2d_dtype)
            for i in range(4):
                m.marker_corners[i] = self._geom_util.xy_to_point2d(
                    corner[0, i, :])

            m.marker_pose = pose

            ret_markers.append(m)

        ret = self._aruco_detection_result()
        ret.detected_markers = ret_markers
        ret.display_image = self._image_util.array_to_compressed_image_jpg(
            display_img, 70)
        return ret

    def detect_aruco_stored_image(self, image_global_name,
                                  camera_calibration_intrinsic,
                                  camera_calibration_extrinsic, aruco_dict,
                                  aruco_id, aruco_markersize, roi):

        if len(camera_calibration_intrinsic) == 0:
            camera_calibration_intrinsic = None

        if len(camera_calibration_extrinsic) == 0:
            camera_calibration_extrinsic = None

        var_storage = self.device_manager.get_device_client(
            "variable_storage", 0.1)

        img_rr = var_storage.getf_variable_value("globals", image_global_name)
        img = self._image_util.compressed_image_to_array(img_rr.data)

        return self._do_aruco_detection(img, camera_calibration_intrinsic,
                                        camera_calibration_extrinsic,
                                        aruco_dict, aruco_id, aruco_markersize,
                                        roi)

    def detect_aruco_camera_capture(self, camera_local_device_name,
                                    camera_calibration_intrinsic,
                                    camera_calibration_extrinsic, aruco_dict,
                                    aruco_id, aruco_markersize, roi):

        if len(camera_calibration_intrinsic) == 0:
            camera_calibration_intrinsic = None

        if len(camera_calibration_extrinsic) == 0:
            camera_calibration_extrinsic = None

        camera_device = self.device_manager.get_device_client(
            camera_local_device_name, 1)
        img_rr = camera_device.capture_frame_compressed()
        img = self._image_util.compressed_image_to_array(img_rr)

        return self._do_aruco_detection(img, camera_calibration_intrinsic,
                                        camera_calibration_extrinsic,
                                        aruco_dict, aruco_id, aruco_markersize,
                                        roi)
    imgs.append(cv2.imread(str(p)))

# for img in imgs:
#     cv2.imshow("",img)
#     cv2.waitKey(250)

d = DeviceManagerClient('rr+tcp://localhost:59902?service=device_manager',
                        autoconnect=False)

d.refresh_devices(1)

d.connect_device("variable_storage")

var_storage = d.get_device_client("variable_storage", 1)

image_util = ImageUtil(client_obj=var_storage)

var_consts = var_storage.RRGetNode().GetConstants('tech.pyri.variable_storage',
                                                  var_storage)
variable_persistence = var_consts["VariablePersistence"]
variable_protection_level = var_consts["VariableProtectionLevel"]


def save_image(img, i):
    global_name = f"{seq_name}_{i}"

    if len(var_storage.filter_variables("globals", global_name, [])) > 0:
        raise RR.InvalidOperationException(
            f"Global {global_name} already exists")

    img = image_util.array_to_compressed_image_png(img)
from pyri.device_manager_client import DeviceManagerClient
from RobotRaconteur.Client import *
from RobotRaconteurCompanion.Util.ImageUtil import ImageUtil
import time
import cv2

d = DeviceManagerClient('rr+tcp://localhost:59902?service=device_manager',
                        autoconnect=False)

d.refresh_devices(1)

d.connect_device("vision_camera_calibration")

calibration_service = d.get_device_client("vision_camera_calibration", 1)

ret = calibration_service.calibrate_camera_intrinsic(
    "camera", "intrinsic_calib_dataset0", "chessboard",
    "camera_intrinsic_calibration")  #"camera_intrinsic_calibration")

image_util = ImageUtil(client_obj=calibration_service)
i = 0
for c_img in ret.display_images:
    img = image_util.compressed_image_to_array(c_img)
    cv2.imshow(f"img {i}", img)
    cv2.waitKey()

    i += 1

cv2.destroyAllWindows()
예제 #17
0
class CameraRobotCalibrationService_impl(object):
    def __init__(self, device_manager, device_info = None, node : RR.RobotRaconteurNode = None):
        if node is None:
            self._node = RR.RobotRaconteurNode.s
        else:
            self._node = node
        self.device_info = device_info
                
        self.service_path = None
        self.ctx = None

        self.device_manager = device_manager
        self.device_manager.connect_device_type("tech.pyri.variable_storage.VariableStorage")
        self.device_manager.connect_device_type("com.robotraconteur.robotics.robot.Robot")
        self.device_manager.device_added += self._device_added
        self.device_manager.device_removed += self._device_removed
        self.device_manager.refresh_devices(5)

        self.robot_util = RobotUtil(self._node)
        self.geom_util = GeometryUtil(self._node)
        self.image_util = ImageUtil(self._node)
        
    def RRServiceObjectInit(self, ctx, service_path):
        self.service_path = service_path
        self.ctx = ctx
        
    def _device_added(self, local_device_name):
       pass 

    def _device_removed(self, local_device_name):
        pass

    def calibrate_robot_origin(self, robot_local_device_name, camera_intrinsic_calibration_global_name, camera_extrinsic_calibration_global_name, \
        image_sequence_global_name, aruco_dict, aruco_id, aruco_markersize, flange_to_marker, output_global_name):
        

        var_storage = self.device_manager.get_device_client("variable_storage",1)

        var_consts = var_storage.RRGetNode().GetConstants('tech.pyri.variable_storage', var_storage)
        variable_persistence = var_consts["VariablePersistence"]
        variable_protection_level = var_consts["VariableProtectionLevel"]

        if len(output_global_name) > 0:
            if len(var_storage.filter_variables("globals",output_global_name,[])) > 0:
                raise RR.InvalidOperationException(f"Global {output_global_name} already exists")

        image_sequence = []
        joint_pos_sequence = []

        image_sequence_vars = var_storage.getf_variable_value("globals",image_sequence_global_name)
        for image_var in image_sequence_vars.data.splitlines():
            var2 = var_storage.getf_variable_value("globals",image_var)
            image_sequence.append(self.image_util.compressed_image_to_array(var2.data))
            var2_tags = var_storage.getf_variable_attributes("globals", image_var)
            var2_state_var_name = var2_tags["system_state"]
            var2_state = var_storage.getf_variable_value("globals", var2_state_var_name)
            joint_pos = None 
            for s in var2_state.data.devices_states[robot_local_device_name].state:
                if s.type == "com.robotraconteur.robotics.robot.RobotState":
                    joint_pos = s.state_data.data.joint_position
            assert joint_pos is not None, "Could not find joint position in state sequence"
            joint_pos_sequence.append(joint_pos)

        cam_intrinsic_calib = var_storage.getf_variable_value("globals",camera_intrinsic_calibration_global_name).data
        cam_extrinsic_calib = var_storage.getf_variable_value("globals",camera_extrinsic_calibration_global_name).data

        mtx = cam_intrinsic_calib.K
        dist_rr = cam_intrinsic_calib.distortion_info.data
        dist = np.array([dist_rr.k1, dist_rr.k2, dist_rr.p1, dist_rr.p2, dist_rr.k3],dtype=np.float64)

        cam_pose = self.geom_util.named_pose_to_rox_transform(cam_extrinsic_calib.pose)

        robot = self.device_manager.get_device_client(robot_local_device_name,1)
        robot_info = robot.robot_info
        rox_robot = self.robot_util.robot_info_to_rox_robot(robot_info,0)
        

        # Calibrate
        robot_pose1, robot_pose_cov, img, calibration_error = calibrator.calibrate(image_sequence, joint_pos_sequence, aruco_dict, aruco_id, aruco_markersize, flange_to_marker, mtx, dist, cam_pose, rox_robot, robot_local_device_name)
        
        robot_pose = self._node.NewStructure("com.robotraconteur.geometry.NamedPoseWithCovariance")
        robot_pose.pose = robot_pose1
        robot_pose.covariance = robot_pose_cov

        if len(output_global_name) > 0:
            var_storage.add_variable2("globals",output_global_name,"com.robotraconteur.geometry.NamedPoseWithCovariance", \
                RR.VarValue(robot_pose,"com.robotraconteur.geometry.NamedPoseWithCovariance"), ["robot_origin_pose_calibration"], 
                {"device": robot_local_device_name}, variable_persistence["const"], None, variable_protection_level["read_write"], \
                [], f"Robot \"{robot_local_device_name}\" origin pose calibration", False)

        ret = RRN.NewStructure("tech.pyri.vision.robot_calibration.CameraRobotBaseCalibrateResult")
        ret.robot_pose = robot_pose
        ret.display_images = img
        ret.calibration_error = calibration_error

        return ret