def get_corners(camera, camera_frame, intrinsic, charuco_board):
    ir_frame = camera_frame[camera][(rs.stream.infrared, 1)]
    depth_frame = post_process_depth_frame(
        camera_frame[camera][rs.stream.depth])
    depth_intrinsics = intrinsic[camera][rs.stream.depth]
    list_found_corners, IDs = hf.cv_find_chessboard(depth_frame, ir_frame,
                                                    charuco_board)
    validPoints = [False] * charuco_board.chessboardCorners.shape[0]
    corners3D = [IDs, None, None, validPoints]
    points3D = np.zeros((3, charuco_board.chessboardCorners.shape[0]))
    if list_found_corners:
        found_corners = list_found_corners[0]
        for index in range(len(found_corners)):
            theID = IDs[0][index][0]
            corner = found_corners[index].flatten()
            depth = hf.get_depth_at_pixel(depth_frame, corner[0], corner[1])
            if depth != 0 and depth is not None:
                validPoints[theID] = True
                [X, Y, Z] = hf.convert_depth_pixel_to_metric_coordinate(
                    depth, corner[0], corner[1], depth_intrinsics)
                points3D[0, theID] = X
                points3D[1, theID] = Y
                points3D[2, theID] = Z
        corners3D = IDs, found_corners, points3D, validPoints
    return corners3D
Exemple #2
0
	def find_chessboard_boundary_for_depth_image(self):
		boundary = {}

		for (serial, frameset) in self.frames.items():

			depth_frame = post_process_depth_frame(frameset[rs.stream.depth])
			infrared_frame = frameset[(rs.stream.infrared, 1)]
			found_corners, points2D = cv_find_chessboard(depth_frame, infrared_frame, self.chessboard_params)
			boundary[serial] = [np.floor(np.amin(points2D[0,:])).astype(int), np.floor(np.amax(points2D[0,:])).astype(int), np.floor(np.amin(points2D[1,:])).astype(int), np.floor(np.amax(points2D[1,:])).astype(int)]

		return boundary
Exemple #3
0
def calculate_cumulative_pointcloud(frames_devices, calibration_info_devices, roi_2d, depth_threshold = 0.01):
	"""
 Calculate the cumulative pointcloud from the multiple devices
	Parameters:
	-----------
	frames_devices : dict
		The frames from the different devices
		keys: Tuple of (serial, product-line)
			Serial number and product line of the device
		values: [frame]
			frame: rs.frame()
				The frameset obtained over the active pipeline from the realsense device
				
	calibration_info_devices : dict
		keys: str
			Serial number of the device
		values: [transformation_devices, intrinsics_devices]
			transformation_devices: Transformation object
					The transformation object containing the transformation information between the device and the world coordinate systems
			intrinsics_devices: rs.intrinscs
					The intrinsics of the depth_frame of the realsense device
					
	roi_2d : array
		The region of interest given in the following order [minX, maxX, minY, maxY]
		
	depth_threshold : double
		The threshold for the depth value (meters) in world-coordinates beyond which the point cloud information will not be used.
		Following the right-hand coordinate system, if the object is placed on the chessboard plane, the height of the object will increase along the negative Z-axis
	
	Return:
	----------
	point_cloud_cumulative : array
		The cumulative pointcloud from the multiple devices
	"""
	# Use a threshold of 5 centimeters from the chessboard as the area where useful points are found
	point_cloud_cumulative = np.array([-1, -1, -1]).transpose()
	for (device_info, frame) in frames_devices.items() :
		device = device_info[0]
		# Filter the depth_frame using the Temporal filter and get the corresponding pointcloud for each frame
		filtered_depth_frame = post_process_depth_frame(frame[rs.stream.depth], temporal_smooth_alpha=0.1, temporal_smooth_delta=80)	
		point_cloud = convert_depth_frame_to_pointcloud( np.asarray( filtered_depth_frame.get_data()), calibration_info_devices[device][1][rs.stream.depth])
		point_cloud = np.asanyarray(point_cloud)

		# Get the point cloud in the world-coordinates using the transformation
		point_cloud = calibration_info_devices[device][0].apply_transformation(point_cloud)

		# Filter the point cloud based on the depth of the object
		# The object placed has its height in the negative direction of z-axis due to the right-hand coordinate system
		point_cloud = get_clipped_pointcloud(point_cloud, roi_2d)
		point_cloud = point_cloud[:,point_cloud[2,:]<-depth_threshold]
		point_cloud_cumulative = np.column_stack( ( point_cloud_cumulative, point_cloud ) )
	point_cloud_cumulative = np.delete(point_cloud_cumulative, 0, 1)
	return point_cloud_cumulative
Exemple #4
0
    def get_chessboard_corners_in3d(self):
        """
		Searches the chessboard corners in the infrared images of 
		every connected device and uses the information in the 
		corresponding depth image to calculate the 3d 
		coordinates of the chessboard corners in the coordinate system of 
		the camera

		Returns:
		-----------
		corners3D : dict
			keys: str
				Serial number of the device
			values: [success, points3D, validDepths] 
				success: bool
					Indicates wether the operation was successfull
				points3d: array
					(3,N) matrix with the coordinates of the chessboard corners
					in the coordinate system of the camera. N is the number of corners
					in the chessboard. May contain points with invalid depth values
				validDephts: [bool]*
					Sequence with length N indicating which point in points3D has a valid depth value
		"""
        corners3D = {}
        for (info, frameset) in self.frames.items():
            serial = info[0]
            product_line = info[1]
            depth_frame = post_process_depth_frame(frameset[rs.stream.depth])
            if product_line == "L500":
                infrared_frame = frameset[(rs.stream.infrared, 0)]
            else:
                infrared_frame = frameset[(rs.stream.infrared, 1)]
            depth_intrinsics = self.intrinsic[serial][rs.stream.depth]
            found_corners, points2D = cv_find_chessboard(
                depth_frame, infrared_frame, self.chessboard_params)
            corners3D[serial] = [found_corners, None, None, None]
            if found_corners:
                points3D = np.zeros((3, len(points2D[0])))
                validPoints = [False] * len(points2D[0])
                for index in range(len(points2D[0])):
                    corner = points2D[:, index].flatten()
                    depth = get_depth_at_pixel(depth_frame, corner[0],
                                               corner[1])
                    if depth != 0 and depth is not None:
                        validPoints[index] = True
                        [X, Y, Z] = convert_depth_pixel_to_metric_coordinate(
                            depth, corner[0], corner[1], depth_intrinsics)
                        points3D[0, index] = X
                        points3D[1, index] = Y
                        points3D[2, index] = Z
                corners3D[
                    serial] = found_corners, points2D, points3D, validPoints
        return corners3D
def calculate_cumulative_pointcloud(frames_devices, calibration_info_devices, roi_2d, depth_threshold = 0.01):
	"""
 Calculate the cumulative pointcloud from the multiple devices
	Parameters:
	-----------
	frames_devices : dict
		The frames from the different devices
		keys: str
			Serial number of the device
		values: [frame]
			frame: rs.frame()
				The frameset obtained over the active pipeline from the realsense device
				
	calibration_info_devices : dict
		keys: str
			Serial number of the device
		values: [transformation_devices, intrinsics_devices]
			transformation_devices: Transformation object
					The transformation object containing the transformation information between the device and the world coordinate systems
			intrinsics_devices: rs.intrinscs
					The intrinsics of the depth_frame of the realsense device
					
	roi_2d : array
		The region of interest given in the following order [minX, maxX, minY, maxY]
		
	depth_threshold : double
		The threshold for the depth value (meters) in world-coordinates beyond which the point cloud information will not be used.
		Following the right-hand coordinate system, if the object is placed on the chessboard plane, the height of the object will increase along the negative Z-axis
	
	Return:
	----------
	point_cloud_cumulative : array
		The cumulative pointcloud from the multiple devices
	"""
	# Use a threshold of 5 centimeters from the chessboard as the area where useful points are found
	point_cloud_cumulative = np.array([-1, -1, -1]).transpose()
	for (device, frame) in frames_devices.items() :
		# Filter the depth_frame using the Temporal filter and get the corresponding pointcloud for each frame
		filtered_depth_frame = post_process_depth_frame(frame[rs.stream.depth], temporal_smooth_alpha=0.1, temporal_smooth_delta=80)	
		point_cloud = convert_depth_frame_to_pointcloud( np.asarray( filtered_depth_frame.get_data()), calibration_info_devices[device][1][rs.stream.depth])
		point_cloud = np.asanyarray(point_cloud)

		# Get the point cloud in the world-coordinates using the transformation
		point_cloud = calibration_info_devices[device][0].apply_transformation(point_cloud)

		# Filter the point cloud based on the depth of the object
		# The object placed has its height in the negative direction of z-axis due to the right-hand coordinate system
		point_cloud = get_clipped_pointcloud(point_cloud, roi_2d)
		point_cloud = point_cloud[:,point_cloud[2,:]<-depth_threshold]
		point_cloud_cumulative = np.column_stack( ( point_cloud_cumulative, point_cloud ) )
	point_cloud_cumulative = np.delete(point_cloud_cumulative, 0, 1)
	return point_cloud_cumulative
def get_transformation_matrix(frames_dict, camera_set, intrinsic,
                              charuco_board):
    from_camera = np.transpose(np.array([0, 0, 0]))
    to_camera = np.transpose(np.array([0, 0, 0]))
    for frame_count in range(len(frames_dict)):
        camera_frame = frames_dict[frame_count]
        corners3D = {}
        for camera in camera_set:
            ir_frame = camera_frame[camera][(rs.stream.infrared, 1)]
            depth_frame = post_process_depth_frame(
                camera_frame[camera][rs.stream.depth])
            depth_intrinsics = intrinsic[camera][rs.stream.depth]
            list_found_corners, IDs = cv_find_chessboard(
                depth_frame, ir_frame, charuco_board)
            validPoints = [False] * charuco_board.chessboardCorners.shape[0]
            corners3D[camera] = [IDs, None, None, validPoints]
            points3D = np.zeros((3, charuco_board.chessboardCorners.shape[0]))
            if list_found_corners:
                found_corners = list_found_corners[0]
                for index in range(len(found_corners)):
                    theID = IDs[0][index][0]
                    corner = found_corners[index].flatten()
                    depth = get_depth_at_pixel(depth_frame, corner[0],
                                               corner[1])
                    if depth != 0 and depth is not None:
                        validPoints[theID] = True
                        [X, Y, Z] = convert_depth_pixel_to_metric_coordinate(
                            depth, corner[0], corner[1], depth_intrinsics)
                        points3D[0, theID] = X
                        points3D[1, theID] = Y
                        points3D[2, theID] = Z
                corners3D[camera] = IDs, found_corners, points3D, validPoints
        for corner in range(len(charuco_board.nearestMarkerCorners)):
            if corners3D[camera_set[0]][3][corner] and corners3D[
                    camera_set[1]][3][corner]:
                to_camera = np.vstack(
                    (to_camera, np.array(corners3D[camera_set[0]][2][:,
                                                                     corner])))
                from_camera = np.vstack(
                    (from_camera,
                     np.array(corners3D[camera_set[1]][2][:, corner])))
    if np.size(from_camera) > 30:
        rotation, translation, rmsd = calculate_transformation_kabsch(
            np.transpose(to_camera[1:, :]), np.transpose(from_camera[1:, :]))
        return Transformation(rotation, translation), rmsd

    else:
        return 0, np.inf
	def get_chessboard_corners_in3d(self):
		"""
		Searches the chessboard corners in the infrared images of 
		every connected device and uses the information in the 
		corresponding depth image to calculate the 3d 
		coordinates of the chessboard corners in the coordinate system of 
		the camera

		Returns:
		-----------
		corners3D : dict
			keys: str
				Serial number of the device
			values: [success, points3D, validDepths] 
				success: bool
					Indicates wether the operation was successfull
				points3d: array
					(3,N) matrix with the coordinates of the chessboard corners
					in the coordinate system of the camera. N is the number of corners
					in the chessboard. May contain points with invalid depth values
				validDephts: [bool]*
					Sequence with length N indicating which point in points3D has a valid depth value
		"""
		corners3D = {}
		for (serial, frameset) in self.frames.items():
			depth_frame = post_process_depth_frame(frameset[rs.stream.depth])
			infrared_frame = frameset[(rs.stream.infrared, 1)]
			depth_intrinsics = self.intrinsic[serial][rs.stream.depth]	
			found_corners, points2D = cv_find_chessboard(depth_frame, infrared_frame, self.chessboard_params)
			corners3D[serial] = [found_corners, None, None, None]
			if found_corners:
				points3D = np.zeros((3, len(points2D[0])))
				validPoints = [False] * len(points2D[0])
				for index in range(len(points2D[0])):
					corner = points2D[:,index].flatten()
					depth = get_depth_at_pixel(depth_frame, corner[0], corner[1])
					if depth != 0 and depth is not None:
						validPoints[index] = True
						[X,Y,Z] = convert_depth_pixel_to_metric_coordinate(depth, corner[0], corner[1], depth_intrinsics)
						points3D[0, index] = X
						points3D[1, index] = Y
						points3D[2, index] = Z
				corners3D[serial] = found_corners, points2D, points3D, validPoints
		return corners3D
def get_charuco_points(camera_frame, transform, intrinsic, charuco_board):
    ir_frame = camera_frame[(rs.stream.infrared, 1)]
    depth_frame = post_process_depth_frame(camera_frame[rs.stream.depth])
    depth_intrinsics = intrinsic[rs.stream.depth]
    list_found_corners, IDs = cv_find_chessboard(depth_frame, ir_frame,
                                                 charuco_board)
    board_points = np.array([0, 0, 0])
    if list_found_corners:
        found_corners = list_found_corners[0]
        for idx in range(len(found_corners)):
            corner = found_corners[idx].flatten()
            depth = get_depth_at_pixel(depth_frame, corner[0], corner[1])
            coords = convert_depth_pixel_to_metric_coordinate(
                depth, corner[0], corner[1], depth_intrinsics)
            board_points = np.vstack((board_points, coords))

        return transform.apply_transformation(np.transpose(
            board_points[1:, :]))
    else:
        return np.array([])
def get_charuco_points_ID(camera_frame, transform, intrinsic, charuco_board):
    ir_frame = camera_frame[(rs.stream.infrared, 1)]
    depth_frame = post_process_depth_frame(camera_frame[rs.stream.depth])
    depth_intrinsics = intrinsic[rs.stream.depth]
    list_found_corners, IDs = cv_find_chessboard(depth_frame, ir_frame,
                                                 charuco_board)
    board_points = np.array(
        np.zeros((3, charuco_board.chessboardCorners.shape[0])))
    validPoints = np.array([False] * charuco_board.chessboardCorners.shape[0])
    if list_found_corners:
        found_corners = list_found_corners[0]
        for idx in range(len(found_corners)):
            corner = found_corners[idx].flatten()
            ID = IDs[0][idx][0]
            validPoints[ID] = True
            depth = get_depth_at_pixel(depth_frame, corner[0], corner[1])
            coords = convert_depth_pixel_to_metric_coordinate(
                depth, corner[0], corner[1], depth_intrinsics)
            board_points[:, ID] = coords

        return transform.apply_transformation(board_points), validPoints
    else:
        return np.array([]), np.array([False] *
                                      charuco_board.chessboardCorners.shape[0])
Exemple #10
0
    def calibrate(self, amount_frames=150):
        """ ................................................................................................... "
        "                                                                                                       "
        "  Calibrates the cameras by creating a transformation matrix between the different cameras.            "
        "  The calibration will continue until all cameras has a transformation matrix to the camera with       "
        "  the lowest serial number. The function will not return if it cannot find all matrices                "
        "                                                                                                       "
        "  Returns: transformation_devices, A dict containing all transfer_matrces from al cameras
        "  to lowest serial number camera. The key in the dict is the serial number in the "from" camera
        "                                                                                                       "
        "                                                                               Sindre Skaar            "
        "                                                                                                       "
        " ..................................................................................................  """
        # Make a dict to store all images for calibration
        frame_dict = {}
        transform_dict = {}
        rms_dict = {}
        for from_device in self.device_list:
            transform_dict[from_device] = {}
            rms_dict[from_device] = {}
            for to_device in self.device_list:
                transform_dict[from_device][to_device] = {}
                rms_dict[from_device][to_device] = np.inf
        print("Starting to take images in 5 seconds")
        time.sleep(5)
        devices_stitched = False

        while not devices_stitched:
            print("taking new set of  images")
            for frame_count in range(amount_frames):
                print("taking image")
                print(amount_frames - frame_count, "images left")
                frames = self.device_manager.poll_frames_keep()
                time.sleep(0.5)
                frame_dict[frame_count] = {}
                for device in self.device_list:
                    ir_frame = np.asanyarray(frames[device][(rs.stream.infrared, 1)].get_data())
                    depth_frame = np.asanyarray(
                        post_process_depth_frame(frames[device][rs.stream.depth]).get_data())
                    frame_dict[frame_count][device] = {'ir_frame': ir_frame, 'depth_frame': depth_frame}
                del frames

            # Make transfer matrices between all possible cameras
            for idx, from_device in enumerate(self.device_list[:-1]):
                for to_device in self.device_list[idx + 1:]:
                    if to_device != from_device:
                        temp_transform, temp_rms = get_transformation_matrix_wout_rsobject(frame_dict,
                                                                                           [from_device, to_device],
                                                                                           self.intrinsics_devices,
                                                                                           self.charuco_boards['charuco_board'])
                        if temp_rms < rms_dict[from_device][to_device]:
                            rms_dict[from_device][to_device] = temp_rms
                            rms_dict[to_device][from_device] = temp_rms
                            transform_dict[from_device][to_device] = temp_transform
                            transform_dict[to_device][from_device] = temp_transform.inverse()

            # Use Dijkstra to find shortest path and check if all cameras are connected
            transformation_matrices = least_error_transfroms(transform_dict, rms_dict)
            if transformation_matrices != 0:
                devices_stitched = True
            # Prints rms error between camera transforms
            test = matrix_viewer(rms_dict)
            print(test)

        # Turns transformation matrices into Transfomation objects
        transformation_devices = {}
        for matrix in transformation_matrices:
            the_matirx = transformation_matrices[matrix]
            transformation_devices[matrix] = Transformation(the_matirx[:3, :3], the_matirx[:3, 3])

        self.transformation_devices = transformation_devices

        save_calibration = input('Press "y" if you want to save calibration \n')
        if save_calibration == "y":
            saved = False
            while not saved:
                name = input("Type in name of file to save. remember to end name with '.npy' \n")
                try:
                    np.save(name, transformation_matrices)
                    saved = True
                except:
                    print("could not save, try another name and remember '.npy' in the end")

        frame_dict.clear()
        self.cameras_calibrated = True
        return self.transformation_devices
def main():

    # First we set up the cameras to start streaming
    # Define some constants
    resolution_width = 1280  # pixels
    resolution_height = 720  # pixels
    frame_rate = 30  # fps
    dispose_frames_for_stablisation = 30  # frames

    # Enable the streams from all the intel realsense devices
    rs_config = rs.config()
    rs_config.enable_stream(rs.stream.depth, resolution_width,
                            resolution_height, rs.format.z16, frame_rate)
    rs_config.enable_stream(rs.stream.infrared, 1, resolution_width,
                            resolution_height, rs.format.y8, frame_rate)
    rs_config.enable_stream(rs.stream.color, resolution_width,
                            resolution_height, rs.format.bgr8, frame_rate)

    # Use the device manager class to enable the devices and get the frames
    device_manager = DeviceManager(rs.context(), rs_config)
    device_manager.enable_all_devices()
    device_manager._available_devices.sort()
    device_list = device_manager._available_devices

    # Allow some frames for the auto-exposure controller to stablise
    for frame in range(dispose_frames_for_stablisation):
        frames = device_manager.poll_frames_keep()

    assert (len(device_manager._available_devices) > 0)

    #Then we calibrate the images

    # Get the intrinsics of the realsense device
    intrinsics_devices = device_manager.get_device_intrinsics(frames)

    # Set the charuco board parameters for calibration
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    charuco_width = 8
    charuco_height = 5
    square_length = 0.03425
    marker_length = .026

    coordinate_dimentions = 3
    charuco_board = aruco.CharucoBoard_create(charuco_width, charuco_height,
                                              square_length, marker_length,
                                              aruco_dict)
    # Set the charuco board parameters for robot
    aruco_dict_r = aruco.Dictionary_get(aruco.DICT_4X4_250)
    charuco_width_r = 3
    charuco_height_r = 3
    square_length_r = 0.0311
    marker_length_r = .0247

    charuco_board_robot = aruco.CharucoBoard_create(charuco_width_r,
                                                    charuco_height_r,
                                                    square_length_r,
                                                    marker_length_r,
                                                    aruco_dict_r)

    # Set the charuco board parameters for robot
    aruco_dict_ro = aruco.Dictionary_get(aruco.DICT_5X5_100)
    charuco_width_ro = 3
    charuco_height_ro = 3
    square_length_ro = 0.0311
    marker_length_ro = .0247

    charuco_board_robot_2 = aruco.CharucoBoard_create(charuco_width_ro,
                                                      charuco_height_ro,
                                                      square_length_ro,
                                                      marker_length_ro,
                                                      aruco_dict_ro)

    # Decide if you want to use previous calibration or make a new one
    calibration_decision = input(
        'To use previous calibration, press "y". For new calibration press any key \n'
    )
    if calibration_decision != 'y':
        # Choose amount of frames to average
        correct_input = False
        while not correct_input:
            the_input = input(
                "Write amount of frames you want to use to calibrate \n")
            try:
                amount_frames = int(the_input)
                if amount_frames > 0:
                    correct_input = True
                else:
                    print("Frame count has to be positive")
            except ValueError:
                print('Input has to be int')

        # Make a dict to store all images for calibration
        frame_dict = {}
        transform_dict = {}
        rms_dict = {}
        for from_device in device_list:
            transform_dict[from_device] = {}
            rms_dict[from_device] = {}
            for to_device in device_list:
                transform_dict[from_device][to_device] = {}
                rms_dict[from_device][to_device] = np.inf
        print("Starting to take images in 5 seconds")
        time.sleep(5)
        devices_stitched = False

        while not devices_stitched:
            print("taking new set of  images")
            for frame_count in range(amount_frames):
                print("taking image")
                print(amount_frames - frame_count, "images left")
                frames = device_manager.poll_frames_keep()
                time.sleep(0.5)
                frame_dict[frame_count] = {}
                for device in device_list:
                    ir_frame = np.asanyarray(
                        frames[device][(rs.stream.infrared, 1)].get_data())
                    depth_frame = np.asanyarray(
                        post_process_depth_frame(
                            frames[device][rs.stream.depth]).get_data())
                    frame_dict[frame_count][device] = {
                        'ir_frame': ir_frame,
                        'depth_frame': depth_frame
                    }
                del frames

            # Make transfer matrices between all possible cameras
            for idx, from_device in enumerate(device_list[:-1]):
                for to_device in device_list[idx + 1:]:
                    if to_device != from_device:
                        temp_transform, temp_rms = get_transformation_matrix_wout_rsobject(
                            frame_dict, [from_device, to_device],
                            intrinsics_devices, charuco_board)
                        if temp_rms < rms_dict[from_device][to_device]:
                            rms_dict[from_device][to_device] = temp_rms
                            rms_dict[to_device][from_device] = temp_rms
                            transform_dict[from_device][
                                to_device] = temp_transform
                            transform_dict[to_device][
                                from_device] = temp_transform.inverse()

            #Use Djikstra's to fin shortest path and check if all cameras are connected
            transformation_matrices = least_error_transfroms(
                transform_dict, rms_dict)
            if transformation_matrices != 0:
                devices_stitched = True
            # Prints rms error between camera transforms
            test = matrix_viewer(rms_dict)
            print(test)

        # Turns transformation matrices into Transfomation objects
        transformation_devices = {}
        for matrix in transformation_matrices:
            the_matirx = transformation_matrices[matrix]
            transformation_devices[matrix] = Transformation(
                the_matirx[:3, :3], the_matirx[:3, 3])

        # Saves calibration transforms if the user wants to
        save_calibration = input(
            'Press "y" if you want to save calibration \n')
        if save_calibration == "y":
            calibration_name = input
            saved = False
            while not saved:
                name = input(
                    "Type in name of file to save. remember to end name with '.npy' \n"
                )
                try:
                    np.save(name, transformation_matrices)
                    saved = True
                except:
                    print(
                        "could not save, try another name and remember '.npy' in the end"
                    )

        frame_dict.clear()
    else:
        correct_filename = False
        while not correct_filename:
            file_name = input('Type in calibration file name \n')
            try:
                transfer_matirices_save = np.load(file_name, allow_pickle=True)
                transfer_matrices = transfer_matirices_save[()]
                correct_filename = True
            except:
                print('No such file in directory: "', file_name, '"')
        transformation_devices = {}
        for matrix in transfer_matrices:
            the_matrix = transfer_matrices[matrix]
            transformation_devices[matrix] = Transformation(
                the_matrix[:3, :3], the_matrix[:3, 3])

    print("Calibration completed...")

    # Enable the emitter of the devices and extract serial numbers to identify cameras
    device_manager.enable_emitter(True)
    key_list = device_manager.poll_frames().keys()

    while True:
        visualisation = input(
            'Presss "1" for RMS error, "2" for chessboard visualisation and "3" for 3d pointcloud and "4" for robot to camea calibration\n'
        )

        if visualisation == '1':

            calculate_avg_rms_error(device_list, device_manager,
                                    transformation_devices, charuco_board,
                                    intrinsics_devices)

        elif visualisation == '2':

            visualise_chessboard(device_manager, device_list,
                                 intrinsics_devices, charuco_board,
                                 transformation_devices)

        elif visualisation == '3':

            visualise_point_cloud(key_list, resolution_height,
                                  resolution_width, device_manager,
                                  coordinate_dimentions,
                                  transformation_devices)
        elif visualisation == '4':
            robot_calibration = CameraRobotCalibration(transformation_devices,
                                                       device_manager,
                                                       charuco_board_robot_2,
                                                       charuco_board_robot)
            input("Set target in position for calibration\n")
            test = robot_calibration.test_functions()
            print(test)

        else:
            print("Input not recognised")