def open3dVis(self, depth, normal=None, color=None): """Visualize 3d map points from eigen depth Args: depth: depth map normal: normal map color: rgb image """ points = get3dpoints(depth, color) points = np.asarray(points) pcd = PointCloud() pcd.points = Vector3dVector(points) if color is not None: color = np.reshape(color, [-1, 3]) pcd.colors = Vector3dVector(color / 255.) if normal is not None: normal = np.reshape(normal, [-1, 3]) else: _, normal, _ = self.compute_local_planes(points[:, 0], points[:, 1], points[:, 2]) normal = np.reshape(normal, [-1, 3]) pcd.normals = Vector3dVector(normal) draw_geometries([pcd])
def isomap_lines_colors(bundle, colours): coloured_lines = [] for i, points in enumerate(bundle): lines = [[j, j + 1] for j in range(len(points) - 1)] data_line = LineSet() data_line.points = Vector3dVector(bundle[i]) data_line.lines = Vector2iVector(lines) data_line.colors = Vector3dVector(colours[i]) coloured_lines.append(data_line) return coloured_lines
def render_car_cv2(self, pose, car_name, image): """Render a car instance given pose and car_name """ car = self.car_models[ car_name] #composed by many vertices and face meshes pose = np.array(pose) #rotation and tranlation vector with shape (6) # project 3D points to 2d image plane # euler angles: successively rotate on Z-Y-X rmat = uts.euler_angles_to_rotation_matrix(pose[:3]) rvect, _ = cv2.Rodrigues(rmat) imgpts, jac = cv2.projectPoints(np.float32(car['vertices']), rvect, pose[3:], self.intrinsic, distCoeffs=None) mask = np.zeros(image.shape) for face in car['faces'] - 1: pts = np.array([[ imgpts[idx, 0, 0] * mask.shape[1], imgpts[idx, 0, 1] * mask.shape[0] ] for idx in face], np.int32) pts = pts.reshape((-1, 1, 2)) cv2.polylines(mask, [pts], True, (0, 255, 0)) ### Open3d if False: from open3d import TriangleMesh, Vector3dVector, Vector3iVector, draw_geometries mesh = TriangleMesh() mesh.vertices = Vector3dVector(car['vertices']) mesh.triangles = Vector3iVector(car['faces']) mesh.paint_uniform_color([1, 0.706, 0]) car_v2 = np.copy(car['vertices']) car_v2[:, 2] += car_v2[:, 2].max() * 3 mesh2 = TriangleMesh() mesh2.vertices = Vector3dVector(car_v2) mesh2.triangles = Vector3iVector(car['faces']) mesh2.paint_uniform_color([0, 0.706, 0]) draw_geometries([mesh, mesh2]) print("A mesh with no normals and no colors does not seem good.") print("Painting the mesh") mesh.paint_uniform_color([1, 0.706, 0]) draw_geometries([mesh]) print("Computing normal and rendering it.") mesh.compute_vertex_normals() print(np.asarray(mesh.triangle_normals)) draw_geometries([mesh]) return mask
def pc2color_pcd(pc, color): from open3d import PointCloud, Vector3dVector color = np.array(color).reshape(-1, 3) assert pc.shape[0] == 3 if color.shape[0] == 1: color = np.repeat(color, pc.shape[1], axis=0) color = color.copy().astype(np.float32) color /= 255.0 pcd = PointCloud() pcd.points = Vector3dVector(pc.T) pcd.colors = Vector3dVector(color[:, ::-1]) return pcd
def main(radius, name): cloud = epc.compress((epc[:, -2] > radius) & (radius >= epc[:, -1]), 0) pcd = PointCloud() pcd.points = Vector3dVector(cloud[:, :3]) pcd.colors = Vector3dVector(cloud[:, 3:6]) write_point_cloud(name, pcd) print( pcd, cloud[:, -1].max(), cloud[:, -3].sum(), cloud[:, -4].max(), cloud[:, -5].sum(), ) return None
def merge_visualise(self, pc1, pc2): """Concatenate all 3d points in pc1 and pc2 Args: pc1: point cloud 1 pc2: point cloud 2 """ pcd = PointCloud() points = np.concatenate((pc1['points'], pc2['points']), axis=0) colors = np.concatenate((pc1['colors'], pc2['colors']), axis=0) pcd.points = Vector3dVector(np.asarray(points)) pcd.colors = Vector3dVector(np.asarray(colors) / 255.) draw_geometries([pcd])
def estimateNormals(self, points): pcd = PointCloud() pcd.points = Vector3dVector(points) estimate_normals(pcd, search_param=KDTreeSearchParamHybrid(radius=0.1, max_nn=20)) return np.asarray(pcd.normals)
def lines_colors(bundle, colours, idx): coloured_lines = [] for i in range(len(bundle)): lines = [] colours_list = [] for j in range(len(idx[i]) - 1): #print("new colour",j) for k in range(idx[i][j], idx[i][j + 1] - 1): lines.append([k, k + 1]) colours_list.append(colours[j]) data_line = LineSet() data_line.points = Vector3dVector(bundle[i]) data_line.lines = Vector2iVector(lines) data_line.colors = Vector3dVector(colours_list) coloured_lines.append(data_line) return coloured_lines
def clusters_colors(bundle, colours, labels): clustered_bundle = [] counter = 0 for points in bundle: lines = [] colours_list = [] for i, point in enumerate(points): if i < (len(points) - 1): lines.append([i, i + 1]) colours_list.append(colours[labels[counter]]) counter += 1 #print(idx[counter]) data_line = LineSet() data_line.points = Vector3dVector(points) data_line.lines = Vector2iVector(lines) data_line.colors = Vector3dVector(colours_list) clustered_bundle.append(data_line) return clustered_bundle
def pc2pcd(pc): """ Args: pc: shape(3,N) """ assert pc.shape[0] == 3 pcd = PointCloud() pcd.points = Vector3dVector(pc.T) return pcd
def draw_bundles(bundles_list, colour_list=[], rotate=False): bundles = [] for idx, bundle in enumerate(bundles_list): if len(colour_list) < 1: colour_list = [[random(), random(), random()] for _ in range(len(bundles_list))] assert len(bundles_list) == len(colour_list) colour = colour_list[idx] for points in bundle: lines = [[i, i + 1] for i in range(len(points) - 1)] data_line = LineSet() data_line.points = Vector3dVector(points) data_line.lines = Vector2iVector(lines) data_line.colors = Vector3dVector( [colour for _ in range(len(lines))]) bundles.append(data_line) if rotate: draw_geometries_with_animation_callback(bundles, __rotate_view) else: draw_geometries(bundles)
def show_pointcloud(points): """ using open3d to show point cloud Input: points: a list of points in points cloud with shape (m, 3) """ assert np.asanyarray( points).shape[1] == 3 # make sure the shape is correct pc_cropped = PointCloud() pc_cropped.points = Vector3dVector(np.asanyarray(points)) draw_geometries([pc_cropped])
def fromGridToPLY(grid): for i in range(3): if i==0: temp = np.where(grid>0.5)[i] x = temp.reshape(temp.shape[0],1) if i==1: temp = np.where(grid>0.5)[i] y = temp.reshape(temp.shape[0],1) if i==2: temp = np.where(grid>0.5)[i] z = temp.reshape(temp.shape[0],1) pcl = np.c_[x,y,z] return Vector3dVector(pcl)
def main(): #Set up loadFile() #make a copy of the mesh M1_copy = copy.deepcopy(M1) #creates an array of points from the mesh M1_pointArray = convert_PC2NA(M1_copy) M1_pointCloud = Vector3dVector(M1_pointArray) V = M1_pointCloud[0] print(M1_pointArray) print(np.linalg.norm(M1_pointArray[0])) print(normalise_array(M1_pointArray))
def open3dVis(self, data): """Visualize through open3d Args: data: dict containing, input, depth, normal """ pcd = PointCloud() depth = data['depth'][0, 0] xyz, _ = self.get_point_cloud(depth, data['image'][0]) if 'normal' in data.keys(): normals = np.transpose(data['normal'][0], (1, 2, 0)) normals = normals.reshape((-1, 3)) else: _, normals, _ = self.__compute_local_planes( xyz[:, 0], xyz[:, 1], xyz[:, 2]) normals = normals.reshape((-1, 3)) color = np.transpose(data['image'][0], [1, 2, 0]).reshape((-1, 3)) pcd.points = Vector3dVector(xyz) pcd.colors = Vector3dVector(color / 255.) pcd.normals = Vector3dVector(normals) draw_geometries([pcd])
def construct_pointcloud(points): """ construct a point cloud object in open3d Input: points: a list of points in points cloud with shape (m, 3) where m is the number of points Output: pc: a point cloud object of PointCloud class from open3d, with points """ pc = PointCloud() pc.points = Vector3dVector(np.asanyarray(points)) return pc
def pcd_array(cloud): pcd = PointCloud() pcd.points = Vector3dVector(cloud[:, :3]) pcd.colors = Vector3dVector(cloud[:, 3:]) return pcd
from open3d import read_point_cloud, write_point_cloud, Vector3dVector import joblib from numpy import load, zeros from scipy.special import erf scaler = joblib.load('scaler.joblib') svc = joblib.load('svc.joblib') proj = joblib.load('proj.joblib') pts = read_point_cloud('test.ply') pcamn = load('pca_all.npy') color = zeros((len(pcamn), 3)) pcamn = scaler.transform(pcamn) color[:, 0] = svc.decision_function(pcamn) pcamn -= svc.coef_ * svc.coef_.dot(pcamn.T).T / (svc.coef_**2).sum() color[:, 1:] = proj.transform(pcamn) pts.colors = Vector3dVector((1.0 + erf(color / 2.0**0.5)) / 2.0) write_point_cloud('vis.ply', pts)
def vis_skeleton_pcd(rec_idx, f_id, fusion_window=20): info = pickle.load(open(rec_idx + '/info_frames.pickle', 'rb')) info_npz = np.load(rec_idx + '/info_frames.npz') pcd = o3d.geometry.PointCloud() global_pcd = o3d.geometry.PointCloud() # use nearby RGBD frames to create the environment point cloud for i in range(f_id - fusion_window // 2, f_id + fusion_window // 2, 10): fname = rec_idx + '/' + '{:05d}'.format(i) + '.png' if os.path.exists(fname): infot = info[i] cam_near_clip = infot['cam_near_clip'] cam_far_clip = infot['cam_far_clip'] depth = read_depthmap(fname, cam_near_clip, cam_far_clip) # delete points that are more than 20 meters away depth[depth > 20.0] = 0 # obtain the human mask p = info_npz['joints_2d'][i, 0] fname = rec_idx + '/' + '{:05d}'.format(i) + '_id.png' id_map = cv2.imread(fname, cv2.IMREAD_ANYDEPTH) human_id = id_map[ np.clip(int(p[1]), 0, 1079), np.clip(int(p[0]), 0, 1919) ] mask = id_map == human_id kernel = np.ones((3, 3), np.uint8) mask_dilation = cv2.dilate( mask.astype(np.uint8), kernel, iterations=1 ) depth = depth * (1 - mask_dilation[..., None]) depth = o3d.geometry.Image(depth.astype(np.float32)) # cv2.imshow('tt', mask.astype(np.uint8)*255) # cv2.waitKey(0) fname = rec_idx + '/' + '{:05d}'.format(i) + '.jpg' color_raw = o3d.io.read_image(fname) focal_length = info_npz['intrinsics'][f_id, 0, 0] rgbd_image = o3d.geometry.create_rgbd_image_from_color_and_depth( color_raw, depth, depth_scale=1.0, depth_trunc=15.0, convert_rgb_to_intensity=False, ) pcd = o3d.geometry.create_point_cloud_from_rgbd_image( rgbd_image, o3d.camera.PinholeCameraIntrinsic( PinholeCameraIntrinsic( 1920, 1080, focal_length, focal_length, 960.0, 540.0 ) ), ) depth_pts = np.asarray(pcd.points) depth_pts_aug = np.hstack( [depth_pts, np.ones([depth_pts.shape[0], 1])] ) cam_extr_ref = np.linalg.inv(info_npz['world2cam_trans'][i]) depth_pts = depth_pts_aug.dot(cam_extr_ref)[:, :3] pcd.points = Vector3dVector(depth_pts) global_pcd.points.extend(pcd.points) global_pcd.colors.extend(pcd.colors) # read gt pose in world coordinate, visualize nearby frame as well joints = info_npz['joints_3d_world'][(f_id - 30) : (f_id + 30) : 10] tl, jn, _ = joints.shape joints = joints.reshape(-1, 3) # create skeletons in open3d nskeletons = tl lines, colors = create_skeleton_viz_data(nskeletons, jn) line_set = LineSet() line_set.points = Vector3dVector(joints) line_set.lines = Vector2iVector(lines) line_set.colors = Vector3dVector(colors) vis_list = [global_pcd, line_set] for j in range(joints.shape[0]): # spine joints if j % jn == 11 or j % jn == 12 or j % jn == 13: continue transformation = np.identity(4) transformation[:3, 3] = joints[j] # head joint if j % jn == 0: r = 0.07 else: r = 0.03 sphere = o3d.geometry.create_mesh_sphere(radius=r) sphere.paint_uniform_color([0.0, float(j // jn) / nskeletons, 1.0]) vis_list.append(sphere.transform(transformation)) draw_geometries(vis_list)
def open_3d_vis(args, output_dir): """ http://www.open3d.org/docs/tutorial/Basic/visualization.html -- Mouse view control -- Left button + drag : Rotate. Ctrl + left button + drag : Translate. Wheel : Zoom in/out. -- Keyboard view control -- [/] : Increase/decrease field of view. R : Reset view point. Ctrl/Cmd + C : Copy current view status into the clipboard. (A nice view has been saved as utilites/view.json Ctrl/Cmd + V : Paste view status from clipboard. -- General control -- Q, Esc : Exit window. H : Print help message. P, PrtScn : Take a screen capture. D : Take a depth capture. O : Take a capture of current rendering settings. """ json_dir = os.path.join( output_dir, 'json_' + args.list_flag + '_trans') + '_iou' + str(1.0) args.test_dir = json_dir args.gt_dir = args.dataset_dir + 'car_poses' args.res_file = os.path.join(output_dir, 'json_' + args.list_flag + '_res.txt') args.simType = None car_models = load_car_models(os.path.join(args.dataset_dir, 'car_models')) det_3d_metric = Detect3DEval(args) evalImgs = det_3d_metric.evaluate() image_id = evalImgs['image_id'] print(image_id) # We only draw the most loose constraint here gtMatches = evalImgs['gtMatches'][0] dtScores = evalImgs['dtScores'] mesh_car_all = [] # We also save road surface xmin, xmax, ymin, ymax, zmin, zmax = np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf for car in det_3d_metric._gts[image_id]: car_model = car_models[car['car_id']] R = euler_angles_to_rotation_matrix(car['pose'][:3]) vertices = np.matmul(R, car_model['vertices'].T) + np.asarray( car['pose'][3:])[:, None] xmin, xmax, ymin, ymax, zmin, zmax = update_road_surface( xmin, xmax, ymin, ymax, zmin, zmax, vertices) mesh_car = TriangleMesh() mesh_car.vertices = Vector3dVector(vertices.T) mesh_car.triangles = Vector3iVector(car_model['faces'] - 1) # Computing normal mesh_car.compute_vertex_normals() # we render the GT car in BLUE mesh_car.paint_uniform_color([0, 0, 1]) mesh_car_all.append(mesh_car) for i, car in enumerate(det_3d_metric._dts[image_id]): if dtScores[i] > args.dtScores: car_model = car_models[car['car_id']] R = euler_angles_to_rotation_matrix(car['pose'][:3]) vertices = np.matmul(R, car_model['vertices'].T) + np.asarray( car['pose'][3:])[:, None] mesh_car = TriangleMesh() mesh_car.vertices = Vector3dVector(vertices.T) mesh_car.triangles = Vector3iVector(car_model['faces'] - 1) # Computing normal mesh_car.compute_vertex_normals() if car['id'] in gtMatches: # if it is a true positive, we render it as green mesh_car.paint_uniform_color([0, 1, 0]) else: # else we render it as red mesh_car.paint_uniform_color([1, 0, 0]) mesh_car_all.append(mesh_car) # Draw the road surface here # x = np.linspace(xmin, xmax, 100) # every 0.1 meter xyz = get_road_surface_xyz(xmin, xmax, ymin, ymax, zmin, zmax) # Pass xyz to Open3D.PointCloud and visualize pcd_road = PointCloud() pcd_road.points = Vector3dVector(xyz) pcd_road.paint_uniform_color([0, 0, 1]) mesh_car_all.append(pcd_road) # draw mesh frame mesh_frame = create_mesh_coordinate_frame(size=3, origin=[0, 0, 0]) mesh_car_all.append(mesh_frame) draw_geometries(mesh_car_all) det_3d_metric.accumulate() det_3d_metric.summarize()
points = np.loadtxt(r'../Data/pole2.txt', skiprows=1, delimiter=';')[:, :3] #MyRANSAC.Model = CylinderModel((0,0), (0.05, 0.3)) #Theta and Phi, Min Max radius of the cylinders #PlaneModel() from Runner import Search from geometry import * Result = Search(1, points, CylinderModel(0.05, (0.02, 0.05), (0, 0))) Result = Search(1, points, PlaneModel(0.05)) resultPoints = Result[1][0] #resultPoints = np.vstack((Result[1][0], Result[1][1])) # For visualization: pcd = PointCloud() pcd.points = Vector3dVector(Result[2]) pcd2 = PointCloud() pcd2.points = Vector3dVector(resultPoints) pcd2.paint_uniform_color([0.1, 0.1, 0.1]) estimate_normals(pcd2, search_param=KDTreeSearchParamHybrid(radius=0.1, max_nn=20)) draw_geometries([pcd2]) ######### pcd = PointCloud() pcd.points = Vector3dVector(points) draw_geometries([pcd])
def export_scene_pointcloud(nusc: NuScenes, out_path: str, scene_token: str, scene_name, trafo_in_origin_BOOL: bool, write_ascii_BOOL: bool, channel: str = 'LIDAR_TOP', min_dist: float = 3.0, max_dist: float = 30.0, verbose: bool = True) -> None: """ Export fused point clouds of a scene to a Wavefront OBJ file. This point-cloud can be viewed in your favorite 3D rendering tool, e.g. Meshlab or Maya. :param nusc: NuScenes instance. :param out_path: Output path to write the point-cloud to. :param scene_token: Unique identifier of scene to render. :param channel: Channel to render. :param min_dist: Minimum distance to ego vehicle below which points are dropped. :param max_dist: Maximum distance to ego vehicle above which points are dropped. :param verbose: Whether to print messages to stdout. """ # Check inputs. valid_channels = [ 'LIDAR_TOP', 'RADAR_FRONT', 'RADAR_FRONT_RIGHT', 'RADAR_FRONT_LEFT', 'RADAR_BACK_LEFT', 'RADAR_BACK_RIGHT' ] camera_channels = [ 'CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT' ] assert channel in valid_channels, 'Input channel {} not valid.'.format( channel) # Get records from DB. scene_rec = nusc.get('scene', scene_token) start_sample_rec = nusc.get('sample', scene_rec['first_sample_token']) sd_rec = nusc.get('sample_data', start_sample_rec['data'][channel]) # Make list of frames cur_sd_rec = sd_rec sd_tokens = [] while cur_sd_rec['next'] != '': cur_sd_rec = nusc.get('sample_data', cur_sd_rec['next']) sd_tokens.append(cur_sd_rec['token']) ego_pose_info = [] zeitstample = 0 for sd_token in tqdm(sd_tokens): zeitstample = zeitstample + 1 if verbose: print('Processing {}'.format(sd_rec['filename'])) sc_rec = nusc.get('sample_data', sd_token) sample_rec = nusc.get('sample', sc_rec['sample_token']) # lidar_token = sd_rec['token'] # only for the start_sample_rec = nusc.get('sample', scene_rec['first_sample_token']) lidar_rec = nusc.get('sample_data', sd_token) pc = LidarPointCloud.from_file( osp.join(nusc.dataroot, lidar_rec['filename'])) # Points live in their own reference frame. So they need to be transformed (DELETED: via global to the image plane. # First step: transform the point cloud to the ego vehicle frame for the timestamp of the sweep. cs_record = nusc.get('calibrated_sensor', lidar_rec['calibrated_sensor_token']) pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix) pc.translate(np.array(cs_record['translation'])) # Optional Filter by distance to remove the ego vehicle. dists_origin = np.sqrt(np.sum(pc.points[:3, :]**2, axis=0)) keep = np.logical_and(min_dist <= dists_origin, dists_origin <= max_dist) pc.points = pc.points[:, keep] # coloring = coloring[:, keep] if verbose: print('Distance filter: Keeping %d of %d points...' % (keep.sum(), len(keep))) # Second step: transform to the global frame. poserecord = nusc.get('ego_pose', lidar_rec['ego_pose_token']) if trafo_in_origin_BOOL == True: pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix) pc.translate(np.array(poserecord['translation'])) ego_pose_info.append( "Ego-Pose-Info for time-stample %i sd_token(%s) :\n" % (zeitstample, sd_token)) ego_pose_info.append(" Rotation : %s \n" % np.array2string(np.array(poserecord['rotation']))) ego_pose_info.append( " Translation : %s \n" % np.array2string(np.array(poserecord['translation']))) pc_nparray = np.asarray(pc.points) # print(pc_nparray.T.shape(1)) xyzi = pc_nparray.T xyz = np.zeros((xyzi.shape[0], 3)) xyz[:, 0] = np.reshape(pc_nparray[0], -1) xyz[:, 1] = np.reshape(pc_nparray[1], -1) xyz[:, 2] = np.reshape(pc_nparray[2], -1) # xyzi[:, 3] = np.reshape(pc_nparray[3], -1) intensity_from_velodyne = xyzi[:, 3] / 255 # print(np.amax(intensity_from_velodyne)) # print(intensity_from_velodyne.shape) intensity = np.zeros((intensity_from_velodyne.shape[0], 3)) intensity[:, 0] = np.reshape(intensity_from_velodyne[0], -1) intensity[:, 1] = np.reshape(intensity_from_velodyne[0], -1) intensity[:, 2] = np.reshape(intensity_from_velodyne[0], -1) PointCloud_open3d = PointCloud() PointCloud_open3d.colors = Vector3dVector(intensity) # xyzi = np.zeros((xyzi0.shape[0], 3)) # xyzi[:, 0] = np.reshape(pc_nparray[0], -1) # xyzi[:, 1] = np.reshape(pc_nparray[1], -1) # xyzi[:, 2] = np.reshape(pc_nparray[2], -1) # print(xyzi.shape) PointCloud_open3d.points = Vector3dVector(xyz) write_point_cloud("%s-%i-%s.pcd" % (out_path, zeitstample, sd_token), PointCloud_open3d, write_ascii=write_ascii_BOOL) # Write ego-pose. f = open(args.out_dir + "/ego_pose_info.txt", 'w+') f.write("ego_pose_info for scene %s \n (with token %s) \n" % (scene_name, scene_token)) f.write(' '.join(ego_pose_info)) f.close()
from numpy import load, zeros from open3d import read_point_cloud, write_point_cloud, Vector3dVector import joblib scaler = joblib.load('scaler.joblib') svc = joblib.load('svc.joblib') pca = load('pca_all.npy') norms = zeros((len(pca), 3)) norms[:, 0] = svc.predict(scaler.transform(pca)) cloud = read_point_cloud('test.ply') cloud.normals = Vector3dVector(norms) write_point_cloud('tree.ply', cloud)
def convert_NA2PC(mesh_as_NA): output = PointCloud() output.points = Vector3dVector(mesh_as_NA) return output
from numpy import load from open3d import PointCloud, Vector3dVector, write_point_cloud from sys import argv if len(argv) > 3: name_epc = argv[1] radius = float(argv[2]) name_output = argv[3] else: name_epc = input('input npy: ') radius = float(input('radius: ')) name_output = input('output: ') epc = load(name_epc) cloud = epc[:, :-2].compress((epc[:, -2] > radius) & (radius >= epc[:, -1]), 0) pcd = PointCloud() pcd.points = Vector3dVector(cloud[:, :3]) pcd.colors = Vector3dVector(cloud[:, 3:]) write_point_cloud(name_output, pcd) print(pcd)
from open3d import read_point_cloud, write_point_cloud, Vector3dVector from sklearn.neighbors import KNeighborsRegressor from numpy import array, concatenate cloud = read_point_cloud('tree.ply') calibrate = read_point_cloud('photo_test.ply') neigh0 = KNeighborsRegressor(4, 'distance', n_jobs=-1) neigh0.fit(calibrate.points, calibrate.colors) arr_points = array(cloud.points) arr_colors = array(cloud.colors) arr_filter = array(cloud.normals)[:, 0] * (arr_colors[:, 2] > 0.5) points_other = arr_points.compress(True - arr_filter, 0) colors_other = arr_colors.compress(True - arr_filter, 0) neigh1 = KNeighborsRegressor(1, n_jobs=-1) neigh1.fit(points_other, colors_other) points_abn = arr_points.compress(arr_filter, 0) colors_abn = (neigh0.predict(points_abn) + neigh1.predict(points_abn)) / 2 cloud.points = Vector3dVector(concatenate((points_other, points_abn))) cloud.colors = Vector3dVector(concatenate((colors_other, colors_abn))) cloud.normals = Vector3dVector() write_point_cloud('corr.ply', cloud)