def get_transform_short(to_camera, from_camera): if np.size(from_camera) > 30: rotation, translation, rmsd = calculate_transformation_kabsch( np.transpose(to_camera[1:, :]), np.transpose(from_camera[1:, :])) return Transformation(rotation, translation), rmsd else: return 0, np.inf
def get_transformation_matrix_wout_rsobject(frames_dict, camera_set, intrinsic, charuco_board): from_camera = np.transpose(np.array([0, 0, 0])) to_camera = np.transpose(np.array([0, 0, 0])) for frame_count in range(len(frames_dict)): camera_frame = frames_dict[frame_count] corners3D = {} for camera in camera_set: ir_frame = camera_frame[camera]['ir_frame'] depth_frame = camera_frame[camera]['depth_frame'] depth_intrinsics = intrinsic[camera][rs.stream.depth] list_found_corners, IDs = cv_find_chessboard_np( depth_frame, ir_frame, charuco_board) validPoints = [False] * charuco_board.chessboardCorners.shape[0] corners3D[camera] = [IDs, None, None, validPoints] points3D = np.zeros((3, charuco_board.chessboardCorners.shape[0])) if list_found_corners: found_corners = list_found_corners[0] for index in range(len(found_corners)): theID = IDs[0][index][0] corner = found_corners[index].flatten() depth = depth_frame[int(round(corner[1])), int(round(corner[0]))] / 1000 depth = subpixel_depth(depth_frame, corner[1], corner[0]) if depth != 0 and depth is not None: validPoints[theID] = True [X, Y, Z] = convert_depth_pixel_to_metric_coordinate( depth, corner[0], corner[1], depth_intrinsics) points3D[0, theID] = X points3D[1, theID] = Y points3D[2, theID] = Z corners3D[camera] = IDs, found_corners, points3D, validPoints for corner in range(len(charuco_board.nearestMarkerCorners)): if corners3D[camera_set[0]][3][corner] and corners3D[ camera_set[1]][3][corner]: to_camera = np.vstack( (to_camera, np.array(corners3D[camera_set[0]][2][:, corner]))) from_camera = np.vstack( (from_camera, np.array(corners3D[camera_set[1]][2][:, corner]))) if np.size(from_camera) > 90: rotation, translation, rmsd = calculate_transformation_kabsch( np.transpose(to_camera[1:, :]), np.transpose(from_camera[1:, :])) print(np.size(from_camera), 'points found. TRANSFORM MATRIX MADE') return Transformation(rotation, translation), rmsd else: print("Only", np.size(from_camera), 'points found. COMPARISON DUMPED') return 0, np.inf
def get_transformation_matrix(frames_dict, camera_set, intrinsic, charuco_board): from_camera = np.transpose(np.array([0, 0, 0])) to_camera = np.transpose(np.array([0, 0, 0])) for frame_count in range(len(frames_dict)): camera_frame = frames_dict[frame_count] corners3D = {} for camera in camera_set: ir_frame = camera_frame[camera][(rs.stream.infrared, 1)] depth_frame = post_process_depth_frame( camera_frame[camera][rs.stream.depth]) depth_intrinsics = intrinsic[camera][rs.stream.depth] list_found_corners, IDs = cv_find_chessboard( depth_frame, ir_frame, charuco_board) validPoints = [False] * charuco_board.chessboardCorners.shape[0] corners3D[camera] = [IDs, None, None, validPoints] points3D = np.zeros((3, charuco_board.chessboardCorners.shape[0])) if list_found_corners: found_corners = list_found_corners[0] for index in range(len(found_corners)): theID = IDs[0][index][0] corner = found_corners[index].flatten() depth = get_depth_at_pixel(depth_frame, corner[0], corner[1]) if depth != 0 and depth is not None: validPoints[theID] = True [X, Y, Z] = convert_depth_pixel_to_metric_coordinate( depth, corner[0], corner[1], depth_intrinsics) points3D[0, theID] = X points3D[1, theID] = Y points3D[2, theID] = Z corners3D[camera] = IDs, found_corners, points3D, validPoints for corner in range(len(charuco_board.nearestMarkerCorners)): if corners3D[camera_set[0]][3][corner] and corners3D[ camera_set[1]][3][corner]: to_camera = np.vstack( (to_camera, np.array(corners3D[camera_set[0]][2][:, corner]))) from_camera = np.vstack( (from_camera, np.array(corners3D[camera_set[1]][2][:, corner]))) if np.size(from_camera) > 30: rotation, translation, rmsd = calculate_transformation_kabsch( np.transpose(to_camera[1:, :]), np.transpose(from_camera[1:, :])) return Transformation(rotation, translation), rmsd else: return 0, np.inf
def main(): # First we set up the cameras to start streaming # Define some constants resolution_width = 1280 # pixels resolution_height = 720 # pixels frame_rate = 30 # fps dispose_frames_for_stablisation = 30 # frames # Enable the streams from all the intel realsense devices rs_config = rs.config() rs_config.enable_stream(rs.stream.depth, resolution_width, resolution_height, rs.format.z16, frame_rate) rs_config.enable_stream(rs.stream.infrared, 1, resolution_width, resolution_height, rs.format.y8, frame_rate) rs_config.enable_stream(rs.stream.color, resolution_width, resolution_height, rs.format.bgr8, frame_rate) # Use the device manager class to enable the devices and get the frames device_manager = DeviceManager(rs.context(), rs_config) device_manager.enable_all_devices() device_manager._available_devices.sort() # Allow some frames for the auto-exposure controller to stablise for frame in range(dispose_frames_for_stablisation): frames = device_manager.poll_frames() assert (len(device_manager._available_devices) > 0) #Then we calibrate the images # Get the intrinsics of the realsense device intrinsics_devices = device_manager.get_device_intrinsics(frames) # Set the charuco board parameters for calibration aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250) charuco_width = 8 charuco_height = 5 square_length = 0.03425 marker_length = .026 coordinate_dimentions = 3 charuco_board = aruco.CharucoBoard_create(charuco_width, charuco_height, square_length, marker_length, aruco_dict) # Estimate the pose of the cameras compared to the first camera in the list amount_devices = len(device_manager._available_devices) transformation_matrix = {} rms_matrix = {} for device in device_manager._available_devices: transformation_matrix[device] = {} rms_matrix[device] = {} for device2 in device_manager._available_devices: rms_matrix[device][device2] = np.inf devices_stitched = False while not devices_stitched: frames = device_manager.poll_frames() pose_estimator = PoseEstimation(frames, intrinsics_devices, charuco_board) transformation_result_kabsch = pose_estimator.perform_pose_estimation() object_point = pose_estimator.get_chessboard_corners_in3d() calibrated_device_count = 0 for device in device_manager._available_devices: if not transformation_result_kabsch[device][0]: print("Device", device, "needs to be calibrated") else: # If this is the first camera in the list if calibrated_device_count == 0: source_matrix = transformation_result_kabsch[device][1] source_device = device source_rms = transformation_result_kabsch[device][3] else: # If new estimate is better than previous if source_rms + transformation_result_kabsch[device][ 3] < rms_matrix[device][source_device]: rms_matrix[source_device][ device] = source_rms + transformation_result_kabsch[ device][3] rms_matrix[device][ source_device] = source_rms + transformation_result_kabsch[ device][3] slave_transfrom = transformation_result_kabsch[device][ 1].inverse() multiplied_transform = np.matmul( source_matrix.pose_mat, slave_transfrom.pose_mat) Multiplied_transform = Transformation( multiplied_transform[:3, :3], multiplied_transform[:3, 3]) transformation_matrix[device][ source_device] = Multiplied_transform temp_inverted_matrix = np.matmul( source_matrix.pose_mat, slave_transfrom.pose_mat) inverted_transform = Transformation( temp_inverted_matrix[:3, :3], temp_inverted_matrix[:3, 3]) transformation_matrix[source_device][ device] = inverted_transform.inverse() calibrated_device_count += 1 # Check if all devices are stitched together transformation_devices = least_error_transfroms( transformation_matrix, rms_matrix) if transformation_devices != 0: devices_stitched = True test = matrix_viewer(rms_matrix) print(test) print("Calibration completed... \n") # Enable the emitter of the devices and extract serial numbers to identify cameras device_manager.enable_emitter(True) key_list = device_manager.poll_frames().keys() pcd = o3d.geometry.PointCloud() # enable visualiser vis = o3d.visualization.Visualizer() vis.create_window() first_image = True #Stitch together all the different camera pointclouds from different cameras pcs = {} for camera in key_list: pcs[camera] = rs.pointcloud() pixels = resolution_width * resolution_height total_pixels = pixels * len(key_list) cloud = np.zeros((3, total_pixels)) transformed_pixels = np.ones((4, pixels)) idxe = np.random.permutation(cloud.shape[1]) while True: start = time.time() the_frames = device_manager.poll_frames() for idx, camera in enumerate(key_list): pc = pcs[camera] frame = the_frames[camera][rs.stream.depth] points = pc.calculate(frame) vert = np.transpose(np.asanyarray(points.get_vertices(2))) transformed_pixels[:coordinate_dimentions, :] = vert calibrated_points = np.matmul(transformation_devices[camera], transformed_pixels) cloud[:, pixels * idx:pixels * (idx + 1)] = calibrated_points[:coordinate_dimentions, :] # Reduces rendered points and removes points with extreme z values keep_ratio = 0.01 cloud_filtered = cloud[:, idxe[0:math.floor(cloud.shape[1] * keep_ratio)]] #cloud_filtered = cloud_filtered - np.min(cloud_filtered[2, :]) dist_thresh = 3 cloud_filtered = -cloud_filtered[:, cloud_filtered[2, :] < dist_thresh] # cloud_filtered = cloud_filtered[:, cloud_filtered[2, :] > -1] # cloud_filtered = cloud_filtered[:, np.invert(np.any(cloud_filtered > dist_thresh, axis=0))] # cloud_filtered = cloud_filtered[:, np.invert(np.any(cloud_filtered > dist_thresh, axis=0))] # renders points from all different cameras #mlab.points3d( cloud_filtered[0, :], cloud_filtered[1, :], cloud_filtered[2, :], scale_factor=0.1) #mlab.show() pcd.points = o3d.utility.Vector3dVector(np.transpose(cloud_filtered)) if first_image: vis.add_geometry(pcd) first_image = False vis.update_geometry() vis.poll_events() vis.update_renderer() end = time.time() #txt = input() #print(1 / (end - start)) device_manager.disable_streams() vis.destroy_window()
def __init__(self, init_file_camera=None): self.resolution_width = 1280 # pixels self.resolution_height = 720 # pixels self.frame_rate = 30 # fps dispose_frames_for_stablisation = 30 # frames self.coordinate_dimentions = 3 self.cameras_calibrated = False self.charuco_boards = {} self.transformation_devices = {} # Enable the streams from all the intel realsense devices rs_config = rs.config() rs_config.enable_stream(rs.stream.depth, self.resolution_width, self.resolution_height, rs.format.z16, self.frame_rate) rs_config.enable_stream(rs.stream.infrared, 1, self.resolution_width, self.resolution_height, rs.format.y8, self.frame_rate) rs_config.enable_stream(rs.stream.color, self.resolution_width, self.resolution_height, rs.format.bgr8, self.frame_rate) # Use the device manager class to enable the devices and get the frames self.device_manager = DeviceManager(rs.context(), rs_config) self.device_manager.enable_all_devices() self.device_manager._available_devices.sort() self.device_list = self.device_manager._available_devices #initialize pointcloud depth image matrix self.pcs = {} for camera in self.device_list: self.pcs[camera] = rs.pointcloud() self.pixels = self.resolution_width * self.resolution_height self.total_pixels = self.pixels * len(self.device_list) self.cloud = np.zeros((3, self.total_pixels)) self.transformed_pixels = np.ones((4, self.pixels)) # Allow some frames for the auto-exposure controller to stablise for frame in range(dispose_frames_for_stablisation): frames = self.device_manager.poll_frames_keep() assert (len(self.device_manager._available_devices) > 0) # Then we define the charuco boards used in the rig self.set_charuco() # Get the intrinsics of the realsense device self.intrinsics_devices = self.device_manager.get_device_intrinsics(frames) try: transfer_matirices_save = np.load(init_file_camera, allow_pickle=True) transfer_matrices = transfer_matirices_save[()] correct_filename = True transformation_devices = {} for matrix in transfer_matrices: the_matrix = transfer_matrices[matrix] transformation_devices[matrix] = Transformation(the_matrix[:3, :3], the_matrix[:3, 3]) self.cameras_calibrated = True self.transformation_devices = transformation_devices except: print('No such file in directory: "', init_file_camera, '"') print("Rig not calibrated\n") return print("Calibration completed...\n") return
def calibrate(self, amount_frames=150): """ ................................................................................................... " " " " Calibrates the cameras by creating a transformation matrix between the different cameras. " " The calibration will continue until all cameras has a transformation matrix to the camera with " " the lowest serial number. The function will not return if it cannot find all matrices " " " " Returns: transformation_devices, A dict containing all transfer_matrces from al cameras " to lowest serial number camera. The key in the dict is the serial number in the "from" camera " " " Sindre Skaar " " " " .................................................................................................. """ # Make a dict to store all images for calibration frame_dict = {} transform_dict = {} rms_dict = {} for from_device in self.device_list: transform_dict[from_device] = {} rms_dict[from_device] = {} for to_device in self.device_list: transform_dict[from_device][to_device] = {} rms_dict[from_device][to_device] = np.inf print("Starting to take images in 5 seconds") time.sleep(5) devices_stitched = False while not devices_stitched: print("taking new set of images") for frame_count in range(amount_frames): print("taking image") print(amount_frames - frame_count, "images left") frames = self.device_manager.poll_frames_keep() time.sleep(0.5) frame_dict[frame_count] = {} for device in self.device_list: ir_frame = np.asanyarray(frames[device][(rs.stream.infrared, 1)].get_data()) depth_frame = np.asanyarray( post_process_depth_frame(frames[device][rs.stream.depth]).get_data()) frame_dict[frame_count][device] = {'ir_frame': ir_frame, 'depth_frame': depth_frame} del frames # Make transfer matrices between all possible cameras for idx, from_device in enumerate(self.device_list[:-1]): for to_device in self.device_list[idx + 1:]: if to_device != from_device: temp_transform, temp_rms = get_transformation_matrix_wout_rsobject(frame_dict, [from_device, to_device], self.intrinsics_devices, self.charuco_boards['charuco_board']) if temp_rms < rms_dict[from_device][to_device]: rms_dict[from_device][to_device] = temp_rms rms_dict[to_device][from_device] = temp_rms transform_dict[from_device][to_device] = temp_transform transform_dict[to_device][from_device] = temp_transform.inverse() # Use Dijkstra to find shortest path and check if all cameras are connected transformation_matrices = least_error_transfroms(transform_dict, rms_dict) if transformation_matrices != 0: devices_stitched = True # Prints rms error between camera transforms test = matrix_viewer(rms_dict) print(test) # Turns transformation matrices into Transfomation objects transformation_devices = {} for matrix in transformation_matrices: the_matirx = transformation_matrices[matrix] transformation_devices[matrix] = Transformation(the_matirx[:3, :3], the_matirx[:3, 3]) self.transformation_devices = transformation_devices save_calibration = input('Press "y" if you want to save calibration \n') if save_calibration == "y": saved = False while not saved: name = input("Type in name of file to save. remember to end name with '.npy' \n") try: np.save(name, transformation_matrices) saved = True except: print("could not save, try another name and remember '.npy' in the end") frame_dict.clear() self.cameras_calibrated = True return self.transformation_devices
def get_transform_short(to_camera, from_camera): rotation, translation, rmsd = cc.calculate_transformation_kabsch( np.transpose(to_camera[1:, :]), np.transpose(from_camera[1:, :])) return Transformation(rotation, translation), rmsd
def main(): # First we set up the cameras to start streaming # Define some constants resolution_width = 1280 # pixels resolution_height = 720 # pixels frame_rate = 30 # fps dispose_frames_for_stablisation = 30 # frames # Enable the streams from all the intel realsense devices rs_config = rs.config() rs_config.enable_stream(rs.stream.depth, resolution_width, resolution_height, rs.format.z16, frame_rate) rs_config.enable_stream(rs.stream.infrared, 1, resolution_width, resolution_height, rs.format.y8, frame_rate) rs_config.enable_stream(rs.stream.color, resolution_width, resolution_height, rs.format.bgr8, frame_rate) # Use the device manager class to enable the devices and get the frames device_manager = DeviceManager(rs.context(), rs_config) device_manager.enable_all_devices() device_manager._available_devices.sort() device_list = device_manager._available_devices # Allow some frames for the auto-exposure controller to stablise for frame in range(dispose_frames_for_stablisation): frames = device_manager.poll_frames() assert (len(device_manager._available_devices) > 0) #Then we calibrate the images # Get the intrinsics of the realsense device intrinsics_devices = device_manager.get_device_intrinsics(frames) # Set the charuco board parameters for calibration aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250) charuco_width = 8 charuco_height = 5 square_length = 0.03425 marker_length = .026 coordinate_dimentions = 3 charuco_board = aruco.CharucoBoard_create(charuco_width, charuco_height, square_length, marker_length, aruco_dict) # Choose amount of frames to average amount_frames = 12 frame_dict = {} transform_dict = {} rms_dict = {} for from_device in device_list: transform_dict[from_device] = {} rms_dict[from_device] = {} for to_device in device_list: transform_dict[from_device][to_device] = {} rms_dict[from_device][to_device] = np.inf devices_stitched = False while not devices_stitched: print("taking new set of images") for frame_count in range(amount_frames): print("taking image") print(amount_frames - frame_count, "images left") frames = device_manager.poll_frames() print("Next image in 1 seconds") time.sleep(1) frame_dict[frame_count] = frames for idx, from_device in enumerate(device_list[:-1]): for to_device in device_list[idx + 1:]: if to_device != from_device: temp_transform, temp_rms = get_transformation_matrix( frame_dict, [from_device, to_device], intrinsics_devices, charuco_board) if temp_rms < rms_dict[from_device][to_device]: rms_dict[from_device][to_device] = temp_rms rms_dict[to_device][from_device] = temp_rms transform_dict[from_device][to_device] = temp_transform transform_dict[to_device][ from_device] = temp_transform.inverse() test = matrix_viewer(rms_dict) print(test) devices_stitched = True for idx, from_device in enumerate(device_list[1:]): if rms_dict[from_device][device_list[idx]] == np.inf: devices_stitched = False transformation_devices = {} identity = np.identity(4) transformation_devices[device_list[0]] = Transformation( identity[:3, :3], identity[:3, 3]) for idx, from_device in enumerate(device_list[1:]): temp_transform = np.matmul( transformation_devices[device_list[idx]].pose_mat, transform_dict[from_device][device_list[idx]].pose_mat) transformation_devices[from_device] = Transformation( temp_transform[:3, :3], temp_transform[:3, 3]) # Printing print("Calibration completed... \n") # Enable the emitter of the devices and extract serial numbers to identify cameras device_manager.enable_emitter(True) key_list = device_manager.poll_frames().keys() pcd = o3d.geometry.PointCloud() # enable visualiser vis = o3d.visualization.Visualizer() vis.create_window() first_image = True while True: frames = device_manager.poll_frames() displayed_points = np.zeros((10, 3)) for camera in device_list: added_points = get_charuco_points(frames[camera], transformation_devices[camera], intrinsics_devices[camera], charuco_board) if added_points.any(): displayed_points = np.vstack( (displayed_points, np.transpose(added_points))) pcd.points = o3d.utility.Vector3dVector(displayed_points) if first_image: vis.add_geometry(pcd) first_image = False vis.update_geometry() vis.poll_events() vis.update_renderer() device_manager.disable_streams() vis.destroy_window()
def main(): # First we set up the cameras to start streaming # Define some constants resolution_width = 1280 # pixels resolution_height = 720 # pixels frame_rate = 30 # fps dispose_frames_for_stablisation = 30 # frames # Enable the streams from all the intel realsense devices rs_config = rs.config() rs_config.enable_stream(rs.stream.depth, resolution_width, resolution_height, rs.format.z16, frame_rate) rs_config.enable_stream(rs.stream.infrared, 1, resolution_width, resolution_height, rs.format.y8, frame_rate) rs_config.enable_stream(rs.stream.color, resolution_width, resolution_height, rs.format.bgr8, frame_rate) # Use the device manager class to enable the devices and get the frames device_manager = DeviceManager(rs.context(), rs_config) device_manager.enable_all_devices() device_manager._available_devices.sort() device_list = device_manager._available_devices # Allow some frames for the auto-exposure controller to stablise for frame in range(dispose_frames_for_stablisation): frames = device_manager.poll_frames_keep() assert (len(device_manager._available_devices) > 0) #Then we calibrate the images # Get the intrinsics of the realsense device intrinsics_devices = device_manager.get_device_intrinsics(frames) # Set the charuco board parameters for calibration aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250) charuco_width = 8 charuco_height = 5 square_length = 0.03425 marker_length = .026 coordinate_dimentions = 3 charuco_board = aruco.CharucoBoard_create(charuco_width, charuco_height, square_length, marker_length, aruco_dict) # Set the charuco board parameters for robot aruco_dict_r = aruco.Dictionary_get(aruco.DICT_4X4_250) charuco_width_r = 3 charuco_height_r = 3 square_length_r = 0.0311 marker_length_r = .0247 charuco_board_robot = aruco.CharucoBoard_create(charuco_width_r, charuco_height_r, square_length_r, marker_length_r, aruco_dict_r) # Set the charuco board parameters for robot aruco_dict_ro = aruco.Dictionary_get(aruco.DICT_5X5_100) charuco_width_ro = 3 charuco_height_ro = 3 square_length_ro = 0.0311 marker_length_ro = .0247 charuco_board_robot_2 = aruco.CharucoBoard_create(charuco_width_ro, charuco_height_ro, square_length_ro, marker_length_ro, aruco_dict_ro) # Decide if you want to use previous calibration or make a new one calibration_decision = input( 'To use previous calibration, press "y". For new calibration press any key \n' ) if calibration_decision != 'y': # Choose amount of frames to average correct_input = False while not correct_input: the_input = input( "Write amount of frames you want to use to calibrate \n") try: amount_frames = int(the_input) if amount_frames > 0: correct_input = True else: print("Frame count has to be positive") except ValueError: print('Input has to be int') # Make a dict to store all images for calibration frame_dict = {} transform_dict = {} rms_dict = {} for from_device in device_list: transform_dict[from_device] = {} rms_dict[from_device] = {} for to_device in device_list: transform_dict[from_device][to_device] = {} rms_dict[from_device][to_device] = np.inf print("Starting to take images in 5 seconds") time.sleep(5) devices_stitched = False while not devices_stitched: print("taking new set of images") for frame_count in range(amount_frames): print("taking image") print(amount_frames - frame_count, "images left") frames = device_manager.poll_frames_keep() time.sleep(0.5) frame_dict[frame_count] = {} for device in device_list: ir_frame = np.asanyarray( frames[device][(rs.stream.infrared, 1)].get_data()) depth_frame = np.asanyarray( post_process_depth_frame( frames[device][rs.stream.depth]).get_data()) frame_dict[frame_count][device] = { 'ir_frame': ir_frame, 'depth_frame': depth_frame } del frames # Make transfer matrices between all possible cameras for idx, from_device in enumerate(device_list[:-1]): for to_device in device_list[idx + 1:]: if to_device != from_device: temp_transform, temp_rms = get_transformation_matrix_wout_rsobject( frame_dict, [from_device, to_device], intrinsics_devices, charuco_board) if temp_rms < rms_dict[from_device][to_device]: rms_dict[from_device][to_device] = temp_rms rms_dict[to_device][from_device] = temp_rms transform_dict[from_device][ to_device] = temp_transform transform_dict[to_device][ from_device] = temp_transform.inverse() #Use Djikstra's to fin shortest path and check if all cameras are connected transformation_matrices = least_error_transfroms( transform_dict, rms_dict) if transformation_matrices != 0: devices_stitched = True # Prints rms error between camera transforms test = matrix_viewer(rms_dict) print(test) # Turns transformation matrices into Transfomation objects transformation_devices = {} for matrix in transformation_matrices: the_matirx = transformation_matrices[matrix] transformation_devices[matrix] = Transformation( the_matirx[:3, :3], the_matirx[:3, 3]) # Saves calibration transforms if the user wants to save_calibration = input( 'Press "y" if you want to save calibration \n') if save_calibration == "y": calibration_name = input saved = False while not saved: name = input( "Type in name of file to save. remember to end name with '.npy' \n" ) try: np.save(name, transformation_matrices) saved = True except: print( "could not save, try another name and remember '.npy' in the end" ) frame_dict.clear() else: correct_filename = False while not correct_filename: file_name = input('Type in calibration file name \n') try: transfer_matirices_save = np.load(file_name, allow_pickle=True) transfer_matrices = transfer_matirices_save[()] correct_filename = True except: print('No such file in directory: "', file_name, '"') transformation_devices = {} for matrix in transfer_matrices: the_matrix = transfer_matrices[matrix] transformation_devices[matrix] = Transformation( the_matrix[:3, :3], the_matrix[:3, 3]) print("Calibration completed...") # Enable the emitter of the devices and extract serial numbers to identify cameras device_manager.enable_emitter(True) key_list = device_manager.poll_frames().keys() while True: visualisation = input( 'Presss "1" for RMS error, "2" for chessboard visualisation and "3" for 3d pointcloud and "4" for robot to camea calibration\n' ) if visualisation == '1': calculate_avg_rms_error(device_list, device_manager, transformation_devices, charuco_board, intrinsics_devices) elif visualisation == '2': visualise_chessboard(device_manager, device_list, intrinsics_devices, charuco_board, transformation_devices) elif visualisation == '3': visualise_point_cloud(key_list, resolution_height, resolution_width, device_manager, coordinate_dimentions, transformation_devices) elif visualisation == '4': robot_calibration = CameraRobotCalibration(transformation_devices, device_manager, charuco_board_robot_2, charuco_board_robot) input("Set target in position for calibration\n") test = robot_calibration.test_functions() print(test) else: print("Input not recognised")