def multiscale_icp(source, target, voxel_size, max_iter, config, init_transformation=np.identity(4)): current_transformation = init_transformation for i, scale in enumerate(range(len(max_iter))): # multi-scale approach iter = max_iter[scale] distance_threshold = config["voxel_size"] * 1.4 print("voxel_size {}".format(voxel_size[scale])) source_down = voxel_down_sample(source, voxel_size[scale]) target_down = voxel_down_sample(target, voxel_size[scale]) if config["icp_method"] == "point_to_point": result_icp = o3d.registration.registration_icp( source_down, target_down, distance_threshold, current_transformation, o3d.registration.TransformationEstimationPointToPoint(), o3d.registration.ICPConvergenceCriteria(max_iteration=iter)) else: estimate_normals( source_down, o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size[scale] * 2.0, max_nn=30)) estimate_normals( target_down, o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size[scale] * 2.0, max_nn=30)) if config["icp_method"] == "point_to_plane": result_icp = o3d.registration.registration_icp( source_down, target_down, distance_threshold, current_transformation, o3d.registration.TransformationEstimationPointToPlane(), o3d.registration.ICPConvergenceCriteria( max_iteration=iter)) if config["icp_method"] == "color": result_icp = o3d.registration.registration_colored_icp( source_down, target_down, voxel_size[scale], current_transformation, o3d.registration.TransformationEstimationForColoredICP(), o3d.registration.ICPConvergenceCriteria( relative_fitness=1e-6, relative_rmse=1e-6, max_iteration=iter)) current_transformation = result_icp.transformation if i == len(max_iter) - 1: information_matrix = o3d.registration.get_information_matrix_from_point_clouds( source_down, target_down, voxel_size[scale] * 1.4, result_icp.transformation) if config["debug_mode"]: draw_registration_result_original_color(source, target, result_icp.transformation) return (result_icp.transformation, information_matrix)
def make_lidar_map(self): # Stack all point clouds points_all = np.vstack(self.point_clouds) # Voxel grid downsampling pcd = utils.numpy_to_open3d(points_all) self.lidar_map = voxel_down_sample(pcd, voxel_size=0.05) print(len(self.lidar_map.points))
def main(self): ''' Main function: Creates the user window and side bar with a button. This button is used to toggle between normal point cloud and downsampled point cloud ''' pangolin.CreateWindowAndBind('Sample Toggle App', 1280, 960) #Set sindow size and name gl.glEnable( gl.GL_DEPTH_TEST ) #This is enabled to allow for user to move/rotate 3D image with mouse #Define render object SampleToggling.scam = pangolin.OpenGlRenderState( pangolin.ProjectionMatrix(640, 480, 420, 420, 320, 240, 0.1, 1000), pangolin.ModelViewLookAt(0, 0.5, -3, 0, 0, 0, pangolin.AxisDirection.AxisY)) handler3d = pangolin.Handler3D(SampleToggling.scam) #Add viewpoint object SampleToggling.dcam = pangolin.CreateDisplay() SampleToggling.dcam.SetBounds(0.0, 1.0, 180 / 640., 1.0, -640.0 / 480.0) SampleToggling.dcam.SetHandler(pangolin.Handler3D(SampleToggling.scam)) #Create the side panel panel = pangolin.CreatePanel('buttonPanel') panel.SetBounds(0.0, 1.0, 0.0, 180 / 640.) gl.glPointSize(1) #Set the size of each point on the point cloud button = pangolin.VarBool('buttonPanel.Sampling Toggle Button', value=False, toggle=False) #Create a simple button #Read the .ply file and convert it into a numpy array so it can be understood by pangolin pointCloudOrigional = o3d.io.read_point_cloud("bunny.ply") pointCloudOrigionalArray = np.asarray(pointCloudOrigional.points) #Down sample this read .ply file and then convert that into a numpy array pointCloudDownSampled = voxel_down_sample(pointCloudOrigional, voxel_size=0.008) pointCloudDownSampledArray = np.asarray(pointCloudDownSampled.points) #Zoom the pointCloud so it is easy for user to see pointCloudOrigionalZoomed = pointCloudOrigionalArray * self.zoomCoefficient pointCloudSampledZoomed = pointCloudDownSampledArray * self.zoomCoefficient #Run a loop until the program is quit, toggling image each time button is pressed. while not pangolin.ShouldQuit(): self.drawPoints(pointCloudOrigionalZoomed) if pangolin.Pushed(button): while not pangolin.Pushed(button): self.drawPoints(pointCloudSampledZoomed)
def preprocess_point_cloud(pcd, config): voxel_size = config["voxel_size"] pcd_down = voxel_down_sample(pcd, voxel_size) estimate_normals( pcd_down, o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size * 2.0, max_nn=30)) pcd_fpfh = o3d.registration.compute_fpfh_feature( pcd_down, o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size * 5.0, max_nn=100)) return (pcd_down, pcd_fpfh)
def visualize_edge_and_planes(self,edge_points,plane_points,cloud): pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(cloud[:,:3]) downpcd = voxel_down_sample(pcd,voxel_size=.5) downpcd.paint_uniform_color([1,1,0]) pcd_edge = o3d.geometry.PointCloud() pcd_edge.points = o3d.utility.Vector3dVector(edge_points) pcd_edge.paint_uniform_color([0,0,0]) pcd_plane = o3d.geometry.PointCloud() # pcd_plane.points = o3d.utility.Vector3dVector(plane_points) pcd_plane.paint_uniform_color([1,0,0]) pcd_origin = o3d.geometry.PointCloud() # pcd_origin.points = o3d.utility.Vector3dVector(np.zeros((1,3))) pcd_origin.paint_uniform_color([0,0,1]) print(cloud.shape,'total points') print(edge_points.shape,'Edge points') print(plane_points.shape,'plane point') o3d.visualization.draw_geometries([downpcd,pcd_edge,pcd_plane,pcd_origin])