def process_new_snapshot(snapshot_index): global num_sensors, depth_images_folder, session_name, snapshots_processed snapshots_processed.add(snapshot_index) # load depth scans and combine to snapshot model: snapshot_pointcloud = None for sensor_index in range(num_sensors): depth_image_filename = '{}/{}/{:04d}_{:02d}.npy'.format(depth_images_folder, session_name, snapshot_index, sensor_index) print('Opening image {}...'.format(depth_image_filename)) depth_image = np.load(depth_image_filename) pointcloud = utils.depth_image_to_pointcloud(depth_image, filter=filter) if pointcloud is None: return None pointcloud.paint_uniform_color(utils.item_in_range_color(snapshot_index, 10, True)) transformation = transformations[transformations.files[sensor_index]] pointcloud.transform(transformation) snapshot_pointcloud = utils.merge_pointclouds(snapshot_pointcloud, pointcloud) return snapshot_pointcloud
session_name = 'cali' snapshot_number = 0 num_sensors = 2 depth_images_folder = '../data/depth_scans' filter = [[0, 4], [-1, 1], [-1, 1.5]] # script pointclouds = [] for sensor_index in range(num_sensors): depth_image_filename = '{}/{}/{:04d}_{:02d}.npy'.format( depth_images_folder, session_name, snapshot_number, sensor_index) depth_image = np.load(depth_image_filename) pointcloud = utils.depth_image_to_pointcloud(depth_image, filter=filter) if pointcloud is None: exit( 'No points in pointcloud left after filtering, try increasing the crop size.' ) pointclouds.append(pointcloud) voxel_size = 0.01 source, target, source_down, target_down, source_fpfh, target_fpfh = fgr.prepare_dataset( voxel_size, pointclouds[0], pointclouds[1]) start = time.time() result_fast = fgr.execute_fast_global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size) print("Fast global registration took %.3f sec.\n" % (time.time() - start))
filter = [None, None, None] # script calibration_matrices_filename = '{}/{}.npz'.format(calibration_matrices_folder, session_name) transformations = np.load(calibration_matrices_filename) scans = [] model = None for sensor_index in range(num_sensors): depth_image_filename = '{}/{}/{:04d}_{:02d}.npy'.format( depth_images_folder, session_name, snapshot_number, sensor_index) depth_image = np.load(depth_image_filename) pointcloud = utils.depth_image_to_pointcloud(depth_image, filter=filter, estimate_normals=False) if pointcloud is None: exit( 'No points in pointcloud left after filtering, try increasing the crop size.' ) pointcloud.paint_uniform_color( utils.item_in_range_color(sensor_index, num_sensors, True)) transformation = transformations[transformations.files[sensor_index]] # pointcloud.transform(transformation) points = np.asarray(pointcloud.points) points_transformed = np.matmul( np.hstack((points, np.ones((points.shape[0], 1)))), transformation.transpose())[:, :3] pointcloud.points = open3d.Vector3dVector(points_transformed)