def get_device_fy(config): from realsense_device_manager import DeviceManager device_manager = DeviceManager(rs.context(), config) device_manager.enable_all_devices() # Allow some frames for the auto-exposure controller to stablise for frame in range(30): frames = device_manager.poll_frames() assert (len(device_manager._available_devices) > 0) intrinsics_devices = device_manager.get_device_intrinsics(frames) temp = intrinsics_devices['908212070032'] temp2 = temp[rs.stream.color].__getattribute__('fy') device_manager.disable_streams() return temp2
def main(): # First we set up the cameras to start streaming # Define some constants resolution_width = 1280 # pixels resolution_height = 720 # pixels frame_rate = 30 # fps dispose_frames_for_stablisation = 30 # frames # Enable the streams from all the intel realsense devices rs_config = rs.config() rs_config.enable_stream(rs.stream.depth, resolution_width, resolution_height, rs.format.z16, frame_rate) rs_config.enable_stream(rs.stream.infrared, 1, resolution_width, resolution_height, rs.format.y8, frame_rate) rs_config.enable_stream(rs.stream.color, resolution_width, resolution_height, rs.format.bgr8, frame_rate) # Use the device manager class to enable the devices and get the frames device_manager = DeviceManager(rs.context(), rs_config) device_manager.enable_all_devices() device_manager._available_devices.sort() # Allow some frames for the auto-exposure controller to stablise for frame in range(dispose_frames_for_stablisation): frames = device_manager.poll_frames() assert (len(device_manager._available_devices) > 0) #Then we calibrate the images # Get the intrinsics of the realsense device intrinsics_devices = device_manager.get_device_intrinsics(frames) # Set the charuco board parameters for calibration aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250) charuco_width = 8 charuco_height = 5 square_length = 0.03425 marker_length = .026 coordinate_dimentions = 3 charuco_board = aruco.CharucoBoard_create(charuco_width, charuco_height, square_length, marker_length, aruco_dict) # Estimate the pose of the cameras compared to the first camera in the list amount_devices = len(device_manager._available_devices) transformation_matrix = {} rms_matrix = {} for device in device_manager._available_devices: transformation_matrix[device] = {} rms_matrix[device] = {} for device2 in device_manager._available_devices: rms_matrix[device][device2] = np.inf devices_stitched = False while not devices_stitched: frames = device_manager.poll_frames() pose_estimator = PoseEstimation(frames, intrinsics_devices, charuco_board) transformation_result_kabsch = pose_estimator.perform_pose_estimation() object_point = pose_estimator.get_chessboard_corners_in3d() calibrated_device_count = 0 for device in device_manager._available_devices: if not transformation_result_kabsch[device][0]: print("Device", device, "needs to be calibrated") else: # If this is the first camera in the list if calibrated_device_count == 0: source_matrix = transformation_result_kabsch[device][1] source_device = device source_rms = transformation_result_kabsch[device][3] else: # If new estimate is better than previous if source_rms + transformation_result_kabsch[device][ 3] < rms_matrix[device][source_device]: rms_matrix[source_device][ device] = source_rms + transformation_result_kabsch[ device][3] rms_matrix[device][ source_device] = source_rms + transformation_result_kabsch[ device][3] slave_transfrom = transformation_result_kabsch[device][ 1].inverse() multiplied_transform = np.matmul( source_matrix.pose_mat, slave_transfrom.pose_mat) Multiplied_transform = Transformation( multiplied_transform[:3, :3], multiplied_transform[:3, 3]) transformation_matrix[device][ source_device] = Multiplied_transform temp_inverted_matrix = np.matmul( source_matrix.pose_mat, slave_transfrom.pose_mat) inverted_transform = Transformation( temp_inverted_matrix[:3, :3], temp_inverted_matrix[:3, 3]) transformation_matrix[source_device][ device] = inverted_transform.inverse() calibrated_device_count += 1 # Check if all devices are stitched together transformation_devices = least_error_transfroms( transformation_matrix, rms_matrix) if transformation_devices != 0: devices_stitched = True test = matrix_viewer(rms_matrix) print(test) print("Calibration completed... \n") # Enable the emitter of the devices and extract serial numbers to identify cameras device_manager.enable_emitter(True) key_list = device_manager.poll_frames().keys() pcd = o3d.geometry.PointCloud() # enable visualiser vis = o3d.visualization.Visualizer() vis.create_window() first_image = True #Stitch together all the different camera pointclouds from different cameras pcs = {} for camera in key_list: pcs[camera] = rs.pointcloud() pixels = resolution_width * resolution_height total_pixels = pixels * len(key_list) cloud = np.zeros((3, total_pixels)) transformed_pixels = np.ones((4, pixels)) idxe = np.random.permutation(cloud.shape[1]) while True: start = time.time() the_frames = device_manager.poll_frames() for idx, camera in enumerate(key_list): pc = pcs[camera] frame = the_frames[camera][rs.stream.depth] points = pc.calculate(frame) vert = np.transpose(np.asanyarray(points.get_vertices(2))) transformed_pixels[:coordinate_dimentions, :] = vert calibrated_points = np.matmul(transformation_devices[camera], transformed_pixels) cloud[:, pixels * idx:pixels * (idx + 1)] = calibrated_points[:coordinate_dimentions, :] # Reduces rendered points and removes points with extreme z values keep_ratio = 0.01 cloud_filtered = cloud[:, idxe[0:math.floor(cloud.shape[1] * keep_ratio)]] #cloud_filtered = cloud_filtered - np.min(cloud_filtered[2, :]) dist_thresh = 3 cloud_filtered = -cloud_filtered[:, cloud_filtered[2, :] < dist_thresh] # cloud_filtered = cloud_filtered[:, cloud_filtered[2, :] > -1] # cloud_filtered = cloud_filtered[:, np.invert(np.any(cloud_filtered > dist_thresh, axis=0))] # cloud_filtered = cloud_filtered[:, np.invert(np.any(cloud_filtered > dist_thresh, axis=0))] # renders points from all different cameras #mlab.points3d( cloud_filtered[0, :], cloud_filtered[1, :], cloud_filtered[2, :], scale_factor=0.1) #mlab.show() pcd.points = o3d.utility.Vector3dVector(np.transpose(cloud_filtered)) if first_image: vis.add_geometry(pcd) first_image = False vis.update_geometry() vis.poll_events() vis.update_renderer() end = time.time() #txt = input() #print(1 / (end - start)) device_manager.disable_streams() vis.destroy_window()
class Camera: """ ................................................................................................... " " " " Initializes a Camera object. If a Calibration file is received, it will use the save file " " to calibrate the cameras " " " " Returns: none " " Sindre Skaar " " " " .................................................................................................. """ def __init__(self, init_file_camera=None): self.resolution_width = 1280 # pixels self.resolution_height = 720 # pixels self.frame_rate = 30 # fps dispose_frames_for_stablisation = 30 # frames self.coordinate_dimentions = 3 self.cameras_calibrated = False self.charuco_boards = {} self.transformation_devices = {} # Enable the streams from all the intel realsense devices rs_config = rs.config() rs_config.enable_stream(rs.stream.depth, self.resolution_width, self.resolution_height, rs.format.z16, self.frame_rate) rs_config.enable_stream(rs.stream.infrared, 1, self.resolution_width, self.resolution_height, rs.format.y8, self.frame_rate) rs_config.enable_stream(rs.stream.color, self.resolution_width, self.resolution_height, rs.format.bgr8, self.frame_rate) # Use the device manager class to enable the devices and get the frames self.device_manager = DeviceManager(rs.context(), rs_config) self.device_manager.enable_all_devices() self.device_manager._available_devices.sort() self.device_list = self.device_manager._available_devices #initialize pointcloud depth image matrix self.pcs = {} for camera in self.device_list: self.pcs[camera] = rs.pointcloud() self.pixels = self.resolution_width * self.resolution_height self.total_pixels = self.pixels * len(self.device_list) self.cloud = np.zeros((3, self.total_pixels)) self.transformed_pixels = np.ones((4, self.pixels)) # Allow some frames for the auto-exposure controller to stablise for frame in range(dispose_frames_for_stablisation): frames = self.device_manager.poll_frames_keep() assert (len(self.device_manager._available_devices) > 0) # Then we define the charuco boards used in the rig self.set_charuco() # Get the intrinsics of the realsense device self.intrinsics_devices = self.device_manager.get_device_intrinsics(frames) try: transfer_matirices_save = np.load(init_file_camera, allow_pickle=True) transfer_matrices = transfer_matirices_save[()] correct_filename = True transformation_devices = {} for matrix in transfer_matrices: the_matrix = transfer_matrices[matrix] transformation_devices[matrix] = Transformation(the_matrix[:3, :3], the_matrix[:3, 3]) self.cameras_calibrated = True self.transformation_devices = transformation_devices except: print('No such file in directory: "', init_file_camera, '"') print("Rig not calibrated\n") return print("Calibration completed...\n") return def set_charuco(self): """ ................................................................................................... " " " " Creates the Charuco boards that will be used during the experiment. " " All Charuco boards made are saved in the class within self.charuco_boards dict. " " " " Returns: None " " " " Sindre Skaar " " " " .................................................................................................. """ charuco_boards = {} # Set the charuco board parameters for calibration aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_100) charuco_width = 8 charuco_height = 5 square_length = 0.0392 marker_length = square_length*0.8 charuco_boards['charuco_board'] = aruco.CharucoBoard_create(charuco_width, charuco_height, square_length, marker_length, aruco_dict) # Set the charuco board parameters for robot aruco_dict_r = aruco.Dictionary_get(aruco.DICT_4X4_250) charuco_width_r = 3 charuco_height_r = 3 square_length_r = 0.0311 marker_length_r = .0247 charuco_boards['charuco_robot'] = aruco.CharucoBoard_create(charuco_width_r, charuco_height_r, square_length_r, marker_length_r, aruco_dict_r) # Set the charuco board parameters for robot aruco_dict_ro = aruco.Dictionary_get(aruco.DICT_5X5_100) charuco_width_ro = 3 charuco_height_ro = 3 square_length_ro = 0.0311 marker_length_ro = .0247 charuco_boards['charuco_target'] = aruco.CharucoBoard_create(charuco_width_ro, charuco_height_ro, square_length_ro, marker_length_ro, aruco_dict_ro) self.charuco_boards = charuco_boards def calibrate(self, amount_frames=150): """ ................................................................................................... " " " " Calibrates the cameras by creating a transformation matrix between the different cameras. " " The calibration will continue until all cameras has a transformation matrix to the camera with " " the lowest serial number. The function will not return if it cannot find all matrices " " " " Returns: transformation_devices, A dict containing all transfer_matrces from al cameras " to lowest serial number camera. The key in the dict is the serial number in the "from" camera " " " Sindre Skaar " " " " .................................................................................................. """ # Make a dict to store all images for calibration frame_dict = {} transform_dict = {} rms_dict = {} for from_device in self.device_list: transform_dict[from_device] = {} rms_dict[from_device] = {} for to_device in self.device_list: transform_dict[from_device][to_device] = {} rms_dict[from_device][to_device] = np.inf print("Starting to take images in 5 seconds") time.sleep(5) devices_stitched = False while not devices_stitched: print("taking new set of images") for frame_count in range(amount_frames): print("taking image") print(amount_frames - frame_count, "images left") frames = self.device_manager.poll_frames_keep() time.sleep(0.5) frame_dict[frame_count] = {} for device in self.device_list: ir_frame = np.asanyarray(frames[device][(rs.stream.infrared, 1)].get_data()) depth_frame = np.asanyarray( post_process_depth_frame(frames[device][rs.stream.depth]).get_data()) frame_dict[frame_count][device] = {'ir_frame': ir_frame, 'depth_frame': depth_frame} del frames # Make transfer matrices between all possible cameras for idx, from_device in enumerate(self.device_list[:-1]): for to_device in self.device_list[idx + 1:]: if to_device != from_device: temp_transform, temp_rms = get_transformation_matrix_wout_rsobject(frame_dict, [from_device, to_device], self.intrinsics_devices, self.charuco_boards['charuco_board']) if temp_rms < rms_dict[from_device][to_device]: rms_dict[from_device][to_device] = temp_rms rms_dict[to_device][from_device] = temp_rms transform_dict[from_device][to_device] = temp_transform transform_dict[to_device][from_device] = temp_transform.inverse() # Use Dijkstra to find shortest path and check if all cameras are connected transformation_matrices = least_error_transfroms(transform_dict, rms_dict) if transformation_matrices != 0: devices_stitched = True # Prints rms error between camera transforms test = matrix_viewer(rms_dict) print(test) # Turns transformation matrices into Transfomation objects transformation_devices = {} for matrix in transformation_matrices: the_matirx = transformation_matrices[matrix] transformation_devices[matrix] = Transformation(the_matirx[:3, :3], the_matirx[:3, 3]) self.transformation_devices = transformation_devices save_calibration = input('Press "y" if you want to save calibration \n') if save_calibration == "y": saved = False while not saved: name = input("Type in name of file to save. remember to end name with '.npy' \n") try: np.save(name, transformation_matrices) saved = True except: print("could not save, try another name and remember '.npy' in the end") frame_dict.clear() self.cameras_calibrated = True return self.transformation_devices def visualise(self): """ ................................................................................................... " " " " A functions used to easily visualise the errors, by calculating the rms errors on a charuco board, " " visualising the chess board points, visualising point cloud stream or an rgbd image. " " all functions are made as infinite while loops and are thus, just for visualisation and does not " " return anything. " " " " Returns: None " " " " Sindre Skaar " " " " .................................................................................................. """ if not self.cameras_calibrated: print("Cameras not calibrated") return self.device_manager.enable_emitter(True) key_list = self.device_manager.poll_frames().keys() while True: visualisation = input('Presss "1" for RMS error, "2" for chessboard visualisation and "3" for 3d pointcloud and "4" for robot to camea calibration\n') if visualisation == '1': calculate_avg_rms_error(self.device_list, self.device_manager, self.transformation_devices, self.charuco_boards['charuco_board'], self.intrinsics_devices) elif visualisation == '2': visualise_chessboard(self.device_manager, self.device_list, self.intrinsics_devices, self.transformation_devices['charuco_board'], self.transformation_devices) elif visualisation == '3': visualise_point_cloud(key_list, self.resolution_height, self.resolution_width, self.device_manager, self.coordinate_dimentions, self.transformation_devices) elif visualisation == '4': visualise_rgbd_cloud(key_list, self.resolution_height, self.resolution_width, self.device_manager, self.coordinate_dimentions, self.transformation_devices) elif visualisation == '5': create_point_cloud(key_list, self.resolution_height, self.resolution_width, self.device_manager, self.coordinate_dimentions, self.transformation_devices) else: print("Input not recognised") def get_robot_data(self): """Returns data necessary for the robot calibration""" return self.transformation_devices, self.device_manager,\ self.charuco_boards['charuco_target'], self.charuco_boards['charuco_robot'] def get_transform_matrix(self): """Returns the transfer matrices used to go from all cameras to lowest serial camera. Returns -1 if the rig is not calibrated""" if self.cameras_calibrated: return self.transformation_devices else: print('Cameras not calibrated') return -1 def poll_depth_frame(self): """ ................................................................................................... " " " " Polls the depth frames form all connected cameras, transforms the point clouds to lowest " " serial number cameras coordinate system and concatenates them together. If rig is not calibrated " " it returns -1 " " " " Returns: cloud, a point cloud form all cameras stitched together into one point cloud. " " Or returns -1 if cameras are not calibrated " " " " " " Sindre Skaar " " " " .................................................................................................. """ if not self.cameras_calibrated: print("Cameras not calibrated") return -1 the_frames = self.device_manager.poll_frames() for idx, camera in enumerate(self.device_list): pc = self.pcs[camera] frame = the_frames[camera][rs.stream.depth] points = pc.calculate(frame) vert = np.transpose(np.asanyarray(points.get_vertices(2))) self.transformed_pixels[:3, :] = vert calibrated_points = np.matmul(self.transformation_devices[camera].pose_mat, self.transformed_pixels) self.cloud[:, self.pixels * idx: self.pixels * (idx + 1)] = calibrated_points[:3, :] return self.cloud
def run_demo(): # Define some constants resolution_width = 1280 # pixels resolution_height = 720 # pixels frame_rate = 15 # fps dispose_frames_for_stablisation = 30 # frames # chessboard_width = 6 # squares # chessboard_height = 9 # squares # square_size = 0.0253 # meters try: # Enable the streams from all the intel realsense devices rs_config = rs.config() rs_config.enable_stream(rs.stream.depth, resolution_width, resolution_height, rs.format.z16, frame_rate) rs_config.enable_stream(rs.stream.infrared, 1, resolution_width, resolution_height, rs.format.y8, frame_rate) rs_config.enable_stream(rs.stream.color, resolution_width, resolution_height, rs.format.bgr8, frame_rate) # Use the device manager class to enable the devices and get the frames device_manager = DeviceManager(rs.context(), rs_config) device_manager.enable_all_devices() # Allow some frames for the auto-exposure controller to stablise for frame in range(dispose_frames_for_stablisation): frames = device_manager.poll_frames() assert( len(device_manager._available_devices) > 0 ) """ 1: Calibration Calibrate all the available devices to the world co-ordinates. For this purpose, a chessboard printout for use with opencv based calibration process is needed. """ # Get the intrinsics of the realsense device intrinsics_devices = device_manager.get_device_intrinsics(frames) # Set the chessboard parameters for calibration # chessboard_params = [chessboard_height, chessboard_width, square_size] # Estimate the pose of the chessboard in the world coordinate using the Kabsch Method # calibrated_device_count = 0 # while calibrated_device_count < len(device_manager._available_devices): # frames = device_manager.poll_frames() # pose_estimator = PoseEstimation(frames, intrinsics_devices, chessboard_params) # transformation_result_kabsch = pose_estimator.perform_pose_estimation() # object_point = pose_estimator.get_chessboard_corners_in3d() # calibrated_device_count = 0 # for device in device_manager._available_devices: # if not transformation_result_kabsch[device][0]: # print("Place the chessboard on the plane where the object needs to be detected..") # else: # calibrated_device_count += 1 # Save the transformation object for all devices in an array to use for measurements # transformation_devices={} # chessboard_points_cumulative_3d = np.array([-1,-1,-1]).transpose() # for device in device_manager._available_devices: # transformation_devices[device] = transformation_result_kabsch[device][1].inverse() # points3D = object_point[device][2][:,object_point[device][3]] # points3D = transformation_devices[device].apply_transformation(points3D) # chessboard_points_cumulative_3d = np.column_stack( (chessboard_points_cumulative_3d,points3D) ) # # Extract the bounds between which the object's dimensions are needed # # It is necessary for this demo that the object's length and breath is smaller than that of the chessboard # chessboard_points_cumulative_3d = np.delete(chessboard_points_cumulative_3d, 0, 1) # roi_2D = get_boundary_corners_2D(chessboard_points_cumulative_3d) print("Calibration completed... \nPlace the box in the field of view of the devices...") """ 2: Measurement and display Measure the dimension of the object using depth maps from multiple RealSense devices The information from Phase 1 will be used here """ # Enable the emitter of the devices device_manager.enable_emitter(True) # Load the JSON settings file in order to enable High Accuracy preset for the realsense device_manager.load_settings_json("./HighResHighAccuracyPreset.json") # Get the extrinsics of the device to be used later extrinsics_devices = device_manager.get_depth_to_color_extrinsics(frames) # Get the calibration info as a dictionary to help with display of the measurements onto the color image instead of infra red image calibration_info_devices = defaultdict(list) for calibration_info in (transformation_devices, intrinsics_devices, extrinsics_devices): for key, value in calibration_info.items(): calibration_info_devices[key].append(value) # Continue acquisition until terminated with Ctrl+C by the user while 1: # Get the frames from all the devices frames_devices = device_manager.poll_frames() # Calculate the pointcloud using the depth frames from all the devices point_cloud = calculate_cumulative_pointcloud(frames_devices, calibration_info_devices, roi_2D) # Get the bounding box for the pointcloud in image coordinates of the color imager bounding_box_points_color_image, length, width, height = calculate_boundingbox_points(point_cloud, calibration_info_devices ) # Draw the bounding box points on the color image and visualise the results visualise_measurements(frames_devices, bounding_box_points_color_image, length, width, height) except KeyboardInterrupt: print("The program was interupted by the user. Closing the program...") finally: device_manager.disable_streams() cv2.destroyAllWindows()
def run(): try: rs_config = rs.config() rs_config.enable_stream(rs.stream.depth, resolution_width, resolution_height, rs.format.z16, frame_rate) rs_config.enable_stream(rs.stream.infrared, 1, resolution_width, resolution_height, rs.format.y8, frame_rate) rs_config.enable_stream(rs.stream.color, resolution_width, resolution_height, rs.format.bgr8, frame_rate) device_manager = DeviceManager(rs.context(), rs_config) device_manager.enable_all_devices() # device_manager.enable_device(u'819612070850',False) # pdb.set_trace() print 'Cam Init...' for frame in range(dispose_frames_for_stablisation): frames = device_manager.poll_frames() intrinsics_devices = device_manager.get_device_intrinsics(frames) # pdb.set_trace() t0 = time.time() q = (cv2.waitKey(1)) & 0xFF imgs_fanuc, imgs_ur = [], [] times = [] vels = [] ur_poses = [] i = 0 t_pub = time.time() save_img = False while q != 27: frames = device_manager.poll_frames() if frames is {} or len(frames) != 2: continue # print device_manager._enabled_devices if q == ord('s'): save_img = True start_time = time.time() for serial in device_manager._enabled_devices: color_img = np.array( frames[serial][rs.stream.color].get_data()) cv2.imshow(win_serial[serial], color_img) if serial == u'818312071299': img_ur = color_img time_ur = time.time() elif serial == u'819612070850': img_fanuc = color_img time_fanuc = time.time() # print "fanuc_camera is coming......" # print '% d camera time : %f '%(len(device_manager._enabled_devices),time.time()-t0) q = (cv2.waitKey(10)) & 0xFF if save_img: imgs_fanuc.append(img_fanuc) imgs_ur.append(img_ur) times.append(time_fanuc - start_time) ur_poses.append(get_frame('tool0')) t0 = time.time() if time.time() - t_pub > 0.15: ret, img, corner = detect_grid(img_fanuc) if ret: rvec, tvec, error = pose_estimation(corner) tvec = tvec / 1000. # print 'error:',error Pose2_H = XYZRodrigues_to_Hmatrix( tvec.flatten(1).tolist() + rvec.flatten(1).tolist()) # print "Detection error is %f" % error else: print "Detection of chess point is wrong." continue temp = np.dot(ur2fanuc_H, fanuc_hand_to_eye_H) temp = np.dot(temp, Pose2_H) temp = np.dot(temp, Track_H) target_H = np.dot(temp, np.linalg.inv(ur5_hand_in_eye_H)) target_Q = quaternion_from_matrix(target_H) # print "ur5 target pose: " # print "%f %f %f %f %f %f %f" % (target_H[0, 3], target_H[1, 3], target_H[2, 3], # target_Q[0], target_Q[1], target_Q[2], # target_Q[3]) # cv2.destroyAllWindows() # pdb.set_trace() tt = time.time() # t_matrix,_ = getFrame('base','tool0') # current_pose = Hmatrix_to_XYZRodrigues(t_matrix) # print current_pose current_pose = get_frame('tool0') # print current_pose # pdb.set_trace() # print " get current pose cause : %f"%(time.time()-tt) rvec, _ = cv2.Rodrigues(target_H[:3, :3]) rvec = rvec.flatten(1) tvec = target_H[:3, 3] target_pose = tvec.tolist() + rvec.tolist() diff_pose = np.array(target_pose) - np.array(current_pose) vv_t, vv_r = 3, 0.5 while np.sum((diff_pose[:3] * vv_t > 0.5).astype(np.int8)) > 0: vv_t -= 0.1 diff_pose_vel = (diff_pose[:3] * vv_t).tolist() + ( diff_pose[3:] * vv_r).tolist() # pdb.set_trace() # print "----- diff pose vel ----" # print diff_pose_vel # pdb.set_trace() # pose_msg = "movel(p[%5.2f, %5.2f, %5.2f, %5.2f, %5.2f, %5.2f], %5.2f, %5.2f)\n" % ( # target_H[0, 3], target_H[1, 3], target_H[2, 3], # rvec[0], rvec[1], rvec[2], acc, vel) diff_vel_msg = "speedl([%5.2f, %5.2f, %5.2f, %5.2f, %5.2f, %5.2f], %5.2f,%5.2f )\n" % ( diff_pose_vel[0], diff_pose_vel[1], diff_pose_vel[2], diff_pose_vel[3], diff_pose_vel[4], diff_pose_vel[5], 1.2, 0.5) vels.append(diff_pose_vel) # pdb.set_trace() pub.publish(diff_vel_msg) print time.time() - t_pub i += 1 t_pub = time.time() # pdb.set_trace() xx, yy, zz, rr, pp, qq = [], [], [], [], [], [] for vel in vels: x, y, z, r, p, q = vel xx.append(x) yy.append(y) zz.append(z) rr.append(r) pp.append(p) qq.append(q) plt.plot(xx, 'r') plt.plot(yy, 'b') plt.plot(zz, 'g') # plt.figure() plt.plot(rr, 'c') plt.plot(pp, 'k') plt.plot(qq, 'w') label = ['x', 'y', 'z', 'rx', 'ry', 'rz'] plt.ylim(-0.02, 0.02) plt.legend(label) plt.show() outfile_name = datetime.datetime.now().strftime(("%y%m%d_%H%M%S")) with h5py.File(outfile_name + '_fanuc_ur_cam.hdf5', 'w') as f: f.create_dataset('fanuc', data=np.array(imgs_fanuc)) f.create_dataset('ur', data=np.array(imgs_ur)) f.create_dataset('time', data=np.array(times)) f.create_dataset('pose', data=np.array(ur_poses)) # pub.publish(pose_msg) # pdb.set_trace() except KeyboardInterrupt: print("The program was interupted by the user. Closing the program...") finally: device_manager.disable_streams() cv2.destroyAllWindows()
def main(): # First we set up the cameras to start streaming # Define some constants resolution_width = 1280 # pixels resolution_height = 720 # pixels frame_rate = 30 # fps dispose_frames_for_stablisation = 30 # frames # Enable the streams from all the intel realsense devices rs_config = rs.config() rs_config.enable_stream(rs.stream.depth, resolution_width, resolution_height, rs.format.z16, frame_rate) rs_config.enable_stream(rs.stream.infrared, 1, resolution_width, resolution_height, rs.format.y8, frame_rate) rs_config.enable_stream(rs.stream.color, resolution_width, resolution_height, rs.format.bgr8, frame_rate) # Use the device manager class to enable the devices and get the frames device_manager = DeviceManager(rs.context(), rs_config) device_manager.enable_all_devices() device_manager._available_devices.sort() device_list = device_manager._available_devices # Allow some frames for the auto-exposure controller to stablise for frame in range(dispose_frames_for_stablisation): frames = device_manager.poll_frames() assert (len(device_manager._available_devices) > 0) #Then we calibrate the images # Get the intrinsics of the realsense device intrinsics_devices = device_manager.get_device_intrinsics(frames) # Set the charuco board parameters for calibration aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250) charuco_width = 8 charuco_height = 5 square_length = 0.03425 marker_length = .026 coordinate_dimentions = 3 charuco_board = aruco.CharucoBoard_create(charuco_width, charuco_height, square_length, marker_length, aruco_dict) starting_points_found = False while not starting_points_found: frames = device_manager.poll_frames() corners = get_corners(device_list[0], frames, intrinsics_devices, charuco_board) print("Show camera charuco board") time.sleep(1) if all(corners[3]): starting_points = corners starting_points_found = True print("Charuco board found") device_manager.enable_emitter(True) key_list = device_manager.poll_frames().keys() pcd = o3d.geometry.PointCloud() # enable visualiser vis = o3d.visualization.Visualizer() vis.create_window() first_image = True visualised_cloud = np.transpose(np.array([[0, 0, 0], [0, 0, 0]])) last_used = time.clock() prev_transform = [cc.Transformation(np.eye(3), np.array([0, 0, 0])), 0] while True: if first_image: time.sleep(1) corners_new_point = get_corners(device_list[0], frames, intrinsics_devices, charuco_board) if time.clock() - last_used > 2 or first_image: print("Starting imaging") from_camera = np.transpose(np.array([0, 0, 0])) to_camera = np.transpose(np.array([0, 0, 0])) frames = device_manager.poll_frames() for corner in range(len(charuco_board.nearestMarkerCorners)): if starting_points[3][corner] and corners_new_point[3][corner]: to_camera = np.vstack( (to_camera, np.array(starting_points[2][:, corner]))) from_camera = np.vstack( (from_camera, np.array(corners_new_point[2][:, corner]))) if np.size(to_camera) > 25: print("update") last_used = time.clock() transformation = get_transform_short(from_camera, to_camera) difference = np.sum( np.absolute(transformation[0].pose_mat - prev_transform[0].pose_mat)) if difference < 0.1: start = time.time() the_frames = device_manager.poll_frames() frame = frames[device_list[0]][rs.stream.depth] pc = rs.pointcloud() points = pc.calculate(frame) vert = np.transpose(np.asanyarray(points.get_vertices(2))) calibrated_points = transformation[0].apply_transformation( vert) cloud = calibrated_points idxe = np.random.permutation(cloud.shape[1]) # Reduces rendered points and removes points with extreme z values keep_ratio = 0.01 cloud_filtered = cloud[:, idxe[0:math.floor(cloud.shape[1] * keep_ratio)]] # cloud_filtered = cloud_filtered - np.min(cloud_filtered[2, :]) dist_thresh = 3 #cloud_filtered = -cloud_filtered[:, cloud_filtered[2, :] < dist_thresh] visualised_cloud = np.hstack( (visualised_cloud, cloud_filtered)) # cloud_filtered = cloud_filtered[:, cloud_filtered[2, :] > -1] # cloud_filtered = cloud_filtered[:, np.invert(np.any(cloud_filtered > dist_thresh, axis=0))] # cloud_filtered = cloud_filtered[:, np.invert(np.any(cloud_filtered > dist_thresh, axis=0))] # renders points from all different cameras # mlab.points3d( cloud_filtered[0, :], cloud_filtered[1, :], cloud_filtered[2, :], scale_factor=0.1) # mlab.show() pcd.points = o3d.utility.Vector3dVector( np.transpose(visualised_cloud)) if first_image: vis.add_geometry(pcd) first_image = False vis.update_geometry() vis.poll_events() vis.update_renderer() prev_transform = transformation if not first_image: vis.update_geometry() vis.poll_events() vis.update_renderer() '''''' ''' Pseudo code: -Take imege to get starting position while true: get new positionfrom camera extract pointcloud from that position and transform it to starting position ''' '''''' ''
def main(): # First we set up the cameras to start streaming # Define some constants resolution_width = 1280 # pixels resolution_height = 720 # pixels frame_rate = 30 # fps dispose_frames_for_stablisation = 30 # frames # Enable the streams from all the intel realsense devices rs_config = rs.config() rs_config.enable_stream(rs.stream.depth, resolution_width, resolution_height, rs.format.z16, frame_rate) rs_config.enable_stream(rs.stream.infrared, 1, resolution_width, resolution_height, rs.format.y8, frame_rate) rs_config.enable_stream(rs.stream.color, resolution_width, resolution_height, rs.format.bgr8, frame_rate) # Use the device manager class to enable the devices and get the frames device_manager = DeviceManager(rs.context(), rs_config) device_manager.enable_all_devices() device_manager._available_devices.sort() device_list = device_manager._available_devices # Allow some frames for the auto-exposure controller to stablise for frame in range(dispose_frames_for_stablisation): frames = device_manager.poll_frames() assert (len(device_manager._available_devices) > 0) #Then we calibrate the images # Get the intrinsics of the realsense device intrinsics_devices = device_manager.get_device_intrinsics(frames) # Set the charuco board parameters for calibration aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250) charuco_width = 8 charuco_height = 5 square_length = 0.03425 marker_length = .026 coordinate_dimentions = 3 charuco_board = aruco.CharucoBoard_create(charuco_width, charuco_height, square_length, marker_length, aruco_dict) # Choose amount of frames to average amount_frames = 12 frame_dict = {} transform_dict = {} rms_dict = {} for from_device in device_list: transform_dict[from_device] = {} rms_dict[from_device] = {} for to_device in device_list: transform_dict[from_device][to_device] = {} rms_dict[from_device][to_device] = np.inf devices_stitched = False while not devices_stitched: print("taking new set of images") for frame_count in range(amount_frames): print("taking image") print(amount_frames - frame_count, "images left") frames = device_manager.poll_frames() print("Next image in 1 seconds") time.sleep(1) frame_dict[frame_count] = frames for idx, from_device in enumerate(device_list[:-1]): for to_device in device_list[idx + 1:]: if to_device != from_device: temp_transform, temp_rms = get_transformation_matrix( frame_dict, [from_device, to_device], intrinsics_devices, charuco_board) if temp_rms < rms_dict[from_device][to_device]: rms_dict[from_device][to_device] = temp_rms rms_dict[to_device][from_device] = temp_rms transform_dict[from_device][to_device] = temp_transform transform_dict[to_device][ from_device] = temp_transform.inverse() test = matrix_viewer(rms_dict) print(test) devices_stitched = True for idx, from_device in enumerate(device_list[1:]): if rms_dict[from_device][device_list[idx]] == np.inf: devices_stitched = False transformation_devices = {} identity = np.identity(4) transformation_devices[device_list[0]] = Transformation( identity[:3, :3], identity[:3, 3]) for idx, from_device in enumerate(device_list[1:]): temp_transform = np.matmul( transformation_devices[device_list[idx]].pose_mat, transform_dict[from_device][device_list[idx]].pose_mat) transformation_devices[from_device] = Transformation( temp_transform[:3, :3], temp_transform[:3, 3]) # Printing print("Calibration completed... \n") # Enable the emitter of the devices and extract serial numbers to identify cameras device_manager.enable_emitter(True) key_list = device_manager.poll_frames().keys() pcd = o3d.geometry.PointCloud() # enable visualiser vis = o3d.visualization.Visualizer() vis.create_window() first_image = True while True: frames = device_manager.poll_frames() displayed_points = np.zeros((10, 3)) for camera in device_list: added_points = get_charuco_points(frames[camera], transformation_devices[camera], intrinsics_devices[camera], charuco_board) if added_points.any(): displayed_points = np.vstack( (displayed_points, np.transpose(added_points))) pcd.points = o3d.utility.Vector3dVector(displayed_points) if first_image: vis.add_geometry(pcd) first_image = False vis.update_geometry() vis.poll_events() vis.update_renderer() device_manager.disable_streams() vis.destroy_window()
def run_calibration(self): try: while True: if world.device_manager is None: device_manager = DeviceManager() else: device_manager = world.device_manager device_manager.enable_device() print(">CALIBRATION STARTING") for i in range(world.stablisation): frames = device_manager.poll_frames() assert (device_manager._enabled_devices is not None) intrinsic = device_manager.get_device_intrinsics(frames) # print(type(intrinsic),intrinsic) calibrated = False cv.namedWindow("CALIBRATE", cv.WINDOW_AUTOSIZE) print(">SETTING IMAGE") while True: frames = device_manager.poll_frames() img = np.asanyarray(frames[rs.stream.color].get_data()) gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) found, corners = cv.findChessboardCorners(gray, (world.chessboard_width, world.chessboard_height)) if found: cv.drawChessboardCorners(img, (world.chessboard_width, world.chessboard_height), corners, found) cv.imshow("CALIBRATE", img) key = cv.waitKey(1) if key == ord('q'): break while calibrated == False: frames = device_manager.poll_frames() pose_estimator = PoseEstimation(frames, intrinsic, world.chessboard_params) result = pose_estimator.perform_pose_estimation() object_point = pose_estimator.get_chessboard_corners_in3d() if not result[0]: print("Place the chessboard on the plane where the object needs to be detected..") else: calibrated = True img = np.asanyarray(frames[rs.stream.color].get_data()) cv.imshow("CALIBRATE", img) key = cv.waitKey(1) if key == ord('q'): device_manager.disable_streams() cv.destroyAllWindows() return trans = {} if world.calibrate_debug: print("matrix is: \n", result[1]) trans = result[1] points3d = np.array([[0.0,0.3,0,0],[0.0,0,0.3,0],[0.0,0,0,-0.1]], dtype="float32") if world.calibrate_debug: print("world axis is:") print(points3d) points3d = trans.apply_transformation(points3d) x,y = convert_pointcloud_to_depth(points3d, intrinsic[rs.stream.depth]) if world.calibrate_debug: print("camera axis is") print(x,y) print("Image axis is:") x, y = x.astype("int32"), y.astype("int32") if world.calibrate_debug: print(x,'\n',y) print(object_point[2][:, object_point[3]][:, :10]) print("Chess corners is(in camera):") print(trans.inv.apply_transformation( object_point[2][:, object_point[3]]).T[:10]) #plot axises while True: color = [(255,0,0), (0,255,0), (0,0,255)] axises = ["x", "y", "z"] frames = device_manager.poll_frames() img = np.asanyarray(frames[rs.stream.color].get_data()) for i in range(3): cv.line(img, (x[0], y[0]), (x[i+1], y[i+1]), color[i], 2) cv.putText(img, axises[i], (x[i+1], y[i+1]), cv.FONT_HERSHEY_PLAIN, 1, (0, 0, 0),2) cv.imshow("CALIBRATE", img) key = cv.waitKey(1) if key == ord('q'): print("Calibration completed... \nPlace stuffs in the field of view of the devices...") world.world2camera = trans self.xyAxis = np.vstack((x,y)) return elif key == ord('r'): break elif key == ord('t'): cv.imwrite("./photos/calibrate_"+world.now, img) finally: device_manager.disable_streams() cv.destroyAllWindows()
def main(): # First we set up the cameras to start streaming # Define some constants resolution_width = 1280 # pixels resolution_height = 720 # pixels frame_rate = 30 # fps dispose_frames_for_stablisation = 30 # frames # Enable the streams from all the intel realsense devices rs_config = rs.config() rs_config.enable_stream(rs.stream.depth, resolution_width, resolution_height, rs.format.z16, frame_rate) rs_config.enable_stream(rs.stream.infrared, 1, resolution_width, resolution_height, rs.format.y8, frame_rate) rs_config.enable_stream(rs.stream.color, resolution_width, resolution_height, rs.format.bgr8, frame_rate) # Use the device manager class to enable the devices and get the frames device_manager = DeviceManager(rs.context(), rs_config) device_manager.enable_all_devices() device_manager._available_devices.sort() device_list = device_manager._available_devices # Allow some frames for the auto-exposure controller to stablise for frame in range(dispose_frames_for_stablisation): frames = device_manager.poll_frames_keep() assert (len(device_manager._available_devices) > 0) #Then we calibrate the images # Get the intrinsics of the realsense device intrinsics_devices = device_manager.get_device_intrinsics(frames) # Set the charuco board parameters for calibration aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250) charuco_width = 8 charuco_height = 5 square_length = 0.03425 marker_length = .026 coordinate_dimentions = 3 charuco_board = aruco.CharucoBoard_create(charuco_width, charuco_height, square_length, marker_length, aruco_dict) # Set the charuco board parameters for robot aruco_dict_r = aruco.Dictionary_get(aruco.DICT_4X4_250) charuco_width_r = 3 charuco_height_r = 3 square_length_r = 0.0311 marker_length_r = .0247 charuco_board_robot = aruco.CharucoBoard_create(charuco_width_r, charuco_height_r, square_length_r, marker_length_r, aruco_dict_r) # Set the charuco board parameters for robot aruco_dict_ro = aruco.Dictionary_get(aruco.DICT_5X5_100) charuco_width_ro = 3 charuco_height_ro = 3 square_length_ro = 0.0311 marker_length_ro = .0247 charuco_board_robot_2 = aruco.CharucoBoard_create(charuco_width_ro, charuco_height_ro, square_length_ro, marker_length_ro, aruco_dict_ro) # Decide if you want to use previous calibration or make a new one calibration_decision = input( 'To use previous calibration, press "y". For new calibration press any key \n' ) if calibration_decision != 'y': # Choose amount of frames to average correct_input = False while not correct_input: the_input = input( "Write amount of frames you want to use to calibrate \n") try: amount_frames = int(the_input) if amount_frames > 0: correct_input = True else: print("Frame count has to be positive") except ValueError: print('Input has to be int') # Make a dict to store all images for calibration frame_dict = {} transform_dict = {} rms_dict = {} for from_device in device_list: transform_dict[from_device] = {} rms_dict[from_device] = {} for to_device in device_list: transform_dict[from_device][to_device] = {} rms_dict[from_device][to_device] = np.inf print("Starting to take images in 5 seconds") time.sleep(5) devices_stitched = False while not devices_stitched: print("taking new set of images") for frame_count in range(amount_frames): print("taking image") print(amount_frames - frame_count, "images left") frames = device_manager.poll_frames_keep() time.sleep(0.5) frame_dict[frame_count] = {} for device in device_list: ir_frame = np.asanyarray( frames[device][(rs.stream.infrared, 1)].get_data()) depth_frame = np.asanyarray( post_process_depth_frame( frames[device][rs.stream.depth]).get_data()) frame_dict[frame_count][device] = { 'ir_frame': ir_frame, 'depth_frame': depth_frame } del frames # Make transfer matrices between all possible cameras for idx, from_device in enumerate(device_list[:-1]): for to_device in device_list[idx + 1:]: if to_device != from_device: temp_transform, temp_rms = get_transformation_matrix_wout_rsobject( frame_dict, [from_device, to_device], intrinsics_devices, charuco_board) if temp_rms < rms_dict[from_device][to_device]: rms_dict[from_device][to_device] = temp_rms rms_dict[to_device][from_device] = temp_rms transform_dict[from_device][ to_device] = temp_transform transform_dict[to_device][ from_device] = temp_transform.inverse() #Use Djikstra's to fin shortest path and check if all cameras are connected transformation_matrices = least_error_transfroms( transform_dict, rms_dict) if transformation_matrices != 0: devices_stitched = True # Prints rms error between camera transforms test = matrix_viewer(rms_dict) print(test) # Turns transformation matrices into Transfomation objects transformation_devices = {} for matrix in transformation_matrices: the_matirx = transformation_matrices[matrix] transformation_devices[matrix] = Transformation( the_matirx[:3, :3], the_matirx[:3, 3]) # Saves calibration transforms if the user wants to save_calibration = input( 'Press "y" if you want to save calibration \n') if save_calibration == "y": calibration_name = input saved = False while not saved: name = input( "Type in name of file to save. remember to end name with '.npy' \n" ) try: np.save(name, transformation_matrices) saved = True except: print( "could not save, try another name and remember '.npy' in the end" ) frame_dict.clear() else: correct_filename = False while not correct_filename: file_name = input('Type in calibration file name \n') try: transfer_matirices_save = np.load(file_name, allow_pickle=True) transfer_matrices = transfer_matirices_save[()] correct_filename = True except: print('No such file in directory: "', file_name, '"') transformation_devices = {} for matrix in transfer_matrices: the_matrix = transfer_matrices[matrix] transformation_devices[matrix] = Transformation( the_matrix[:3, :3], the_matrix[:3, 3]) print("Calibration completed...") # Enable the emitter of the devices and extract serial numbers to identify cameras device_manager.enable_emitter(True) key_list = device_manager.poll_frames().keys() while True: visualisation = input( 'Presss "1" for RMS error, "2" for chessboard visualisation and "3" for 3d pointcloud and "4" for robot to camea calibration\n' ) if visualisation == '1': calculate_avg_rms_error(device_list, device_manager, transformation_devices, charuco_board, intrinsics_devices) elif visualisation == '2': visualise_chessboard(device_manager, device_list, intrinsics_devices, charuco_board, transformation_devices) elif visualisation == '3': visualise_point_cloud(key_list, resolution_height, resolution_width, device_manager, coordinate_dimentions, transformation_devices) elif visualisation == '4': robot_calibration = CameraRobotCalibration(transformation_devices, device_manager, charuco_board_robot_2, charuco_board_robot) input("Set target in position for calibration\n") test = robot_calibration.test_functions() print(test) else: print("Input not recognised")
def run_demo(): # Define some constants resolution_width = 1280 # pixels resolution_height = 720 # pixels frame_rate = 15 # fps dispose_frames_for_stablisation = 30 # frames chessboard_width = 6 # squares chessboard_height = 9 # squares square_size = 0.0253 # meters try: # Enable the streams from all the intel realsense devices rs_config = rs.config() rs_config.enable_stream(rs.stream.depth, resolution_width, resolution_height, rs.format.z16, frame_rate) rs_config.enable_stream(rs.stream.infrared, 1, resolution_width, resolution_height, rs.format.y8, frame_rate) rs_config.enable_stream(rs.stream.color, resolution_width, resolution_height, rs.format.bgr8, frame_rate) # Use the device manager class to enable the devices and get the frames device_manager = DeviceManager(rs.context(), rs_config) device_manager.enable_all_devices() # Allow some frames for the auto-exposure controller to stablise for frame in range(dispose_frames_for_stablisation): frames = device_manager.poll_frames() assert( len(device_manager._available_devices) > 0 ) """ 1: Calibration Calibrate all the available devices to the world co-ordinates. For this purpose, a chessboard printout for use with opencv based calibration process is needed. """ # Get the intrinsics of the realsense device intrinsics_devices = device_manager.get_device_intrinsics(frames) # Set the chessboard parameters for calibration chessboard_params = [chessboard_height, chessboard_width, square_size] # Estimate the pose of the chessboard in the world coordinate using the Kabsch Method calibrated_device_count = 0 while calibrated_device_count < len(device_manager._available_devices): frames = device_manager.poll_frames() pose_estimator = PoseEstimation(frames, intrinsics_devices, chessboard_params) transformation_result_kabsch = pose_estimator.perform_pose_estimation() object_point = pose_estimator.get_chessboard_corners_in3d() calibrated_device_count = 0 for device in device_manager._available_devices: if not transformation_result_kabsch[device][0]: print("Place the chessboard on the plane where the object needs to be detected..") else: calibrated_device_count += 1 # Save the transformation object for all devices in an array to use for measurements transformation_devices={} chessboard_points_cumulative_3d = np.array([-1,-1,-1]).transpose() for device in device_manager._available_devices: transformation_devices[device] = transformation_result_kabsch[device][1].inverse() points3D = object_point[device][2][:,object_point[device][3]] points3D = transformation_devices[device].apply_transformation(points3D) chessboard_points_cumulative_3d = np.column_stack( (chessboard_points_cumulative_3d,points3D) ) # Extract the bounds between which the object's dimensions are needed # It is necessary for this demo that the object's length and breath is smaller than that of the chessboard chessboard_points_cumulative_3d = np.delete(chessboard_points_cumulative_3d, 0, 1) roi_2D = get_boundary_corners_2D(chessboard_points_cumulative_3d) print("Calibration completed... \nPlace the box in the field of view of the devices...") """ 2: Measurement and display Measure the dimension of the object using depth maps from multiple RealSense devices The information from Phase 1 will be used here """ # Enable the emitter of the devices device_manager.enable_emitter(True) # Load the JSON settings file in order to enable High Accuracy preset for the realsense device_manager.load_settings_json("./HighResHighAccuracyPreset.json") # Get the extrinsics of the device to be used later extrinsics_devices = device_manager.get_depth_to_color_extrinsics(frames) # Get the calibration info as a dictionary to help with display of the measurements onto the color image instead of infra red image calibration_info_devices = defaultdict(list) for calibration_info in (transformation_devices, intrinsics_devices, extrinsics_devices): for key, value in calibration_info.items(): calibration_info_devices[key].append(value) # Continue acquisition until terminated with Ctrl+C by the user while 1: # Get the frames from all the devices frames_devices = device_manager.poll_frames() # Calculate the pointcloud using the depth frames from all the devices point_cloud = calculate_cumulative_pointcloud(frames_devices, calibration_info_devices, roi_2D) # Get the bounding box for the pointcloud in image coordinates of the color imager bounding_box_points_color_image, length, width, height = calculate_boundingbox_points(point_cloud, calibration_info_devices ) # Draw the bounding box points on the color image and visualise the results visualise_measurements(frames_devices, bounding_box_points_color_image, length, width, height) except KeyboardInterrupt: print("The program was interupted by the user. Closing the program...") finally: device_manager.disable_streams() cv2.destroyAllWindows()
class RealSense: def __init__(self): self.WIN_NAME = 'RealSense' self.pitch, self.yaw = math.radians(-10), math.radians(-15) self.translation = np.array([0, 0, -1], dtype=np.float32) self.distance = 2 self.paused = False self.decimate = 1 self.scale = True self.color = True # Define some constants self.resolution_width = 1280 # pixels self.resolution_height = 720 # pixels self.frame_rate = 15 # fps self.dispose_frames_for_stablisation = 25 # frames self.chessboard_width = 6 # squares self.chessboard_height = 9 # squares self.square_size = 0.0253 # meters # Allow some frames for the auto-exposure controller to stablise self.intrinsics_devices = None self.chessboard_params = None self.rs_config = rs.config() self.rs_config.enable_stream(rs.stream.depth, self.resolution_width, self.resolution_height, rs.format.z16, self.frame_rate) self.rs_config.enable_stream(rs.stream.infrared, 1, self.resolution_width, self.resolution_height, rs.format.y8, self.frame_rate) self.rs_config.enable_stream(rs.stream.color, self.resolution_width, self.resolution_height, rs.format.bgr8, self.frame_rate) # Use the device manager class to enable the devices and get the frames self.device_manager = DeviceManager(rs.context(), self.rs_config) self.device_manager.enable_all_devices() print('0') for self.frame in range(self.dispose_frames_for_stablisation): self.frames = self.device_manager.poll_frames() #print('framm = ',self.frame) # assert( len(self.device_manager._available_devices) > 0 ) print('realsense initialized!!') def calibaration(self): self.intrinsics_devices = self.device_manager.get_device_intrinsics( self.frames) #print(' Set the chessboard parameters for calibration ') self.chessboard_params = [ self.chessboard_height, self.chessboard_width, self.square_size ] # Estimate the pose of the chessboard in the world coordinate using the Kabsch Method calibrated_device_count = 0 while calibrated_device_count < len( self.device_manager._available_devices): self.frames = self.device_manager.poll_frames() pose_estimator = PoseEstimation(self.frames, self.intrinsics_devices, self.chessboard_params) transformation_result_kabsch = pose_estimator.perform_pose_estimation( ) object_point = pose_estimator.get_chessboard_corners_in3d() calibrated_device_count = 0 for device in self.device_manager._available_devices: if not transformation_result_kabsch[device][0]: print( "Place the chessboard on the plane where the object needs to be detected.." ) else: calibrated_device_count += 1 print('calibrated_device_count =', calibrated_device_count) # Save the transformation object for all devices in an array to use for measurements self.transformation_devices = {} chessboard_points_cumulative_3d = np.array([-1, -1, -1]).transpose() for device in self.device_manager._available_devices: self.transformation_devices[device] = transformation_result_kabsch[ device][1].inverse() points3D = object_point[device][2][:, object_point[device][3]] points3D = self.transformation_devices[ device].apply_transformation(points3D) chessboard_points_cumulative_3d = np.column_stack( (chessboard_points_cumulative_3d, points3D)) print( ' Extract the bounds between which the objects dimensions are needed' ) # It is necessary for this demo that the object's length and breath is smaller than that of the chessboard chessboard_points_cumulative_3d = np.delete( chessboard_points_cumulative_3d, 0, 1) self.roi_2D = get_boundary_corners_2D(chessboard_points_cumulative_3d) print( "Calibration completed... \nPlace the box in the field of view of the devices..." ) def setEmitter(self): # print('Enable the emitter of the devices') self.device_manager.enable_emitter(True) # print('-------Enable the emitter of the devices') # Load the JSON settings file in order to enable High Accuracy preset for the realsense #self.device_manager.load_settings_json("HighResHighAccuracyPreset.json") #print(' Get the extrinsics of the device to be used later') extrinsics_devices = self.device_manager.get_depth_to_color_extrinsics( self.frames) print( ' Get the calibration info as a dictionary to help with display of the measurements onto the color image instead of infra red image' ) self.calibration_info_devices = defaultdict(list) for calibration_info in (self.transformation_devices, self.intrinsics_devices, extrinsics_devices): for key, value in calibration_info.items(): self.calibration_info_devices[key].append(value) print("CalibrasetEmittertion completed... ") def processing(self): frames_devices = self.device_manager.poll_frames() print('get frames') # Calculate the pointcloud using the depth frames from all the devices point_cloud = calculate_cumulative_pointcloud( frames_devices, self.calibration_info_devices, self.roi_2D) # Get the bounding box for the pointcloud in image coordinates of the color imager bounding_box_points_color_image, length, width, height = calculate_boundingbox_points( point_cloud, self.calibration_info_devices) print('길이=', length, ' 폭=', width, ' 높이', height) # Draw the bounding box points on the color image and visualise the results # visualise_measurements(frames_devices, bounding_box_points_color_image, length, width, height) def closeSense(self): self.device_manager.disable_streams()