def open3dplot(points, keypoints_idx):  #使用open3d显示点云及点云的关键点
    colors = np.zeros(shape=(len(points), 3))  ##每个点都需要一个颜色向量
    for i in keypoints_idx:
        colors[i] = np.array([255, 0, 0])  #关键点为红色
    pointCloud = PointCloud()
    pointCloud.points = Vector3dVector(points)
    pointCloud.colors = Vector3dVector(colors)
    draw_geometries([pointCloud])
def draw_pc(pc_xyzrgb):
    """
    显示点云
    :param pc_xyzrgb: [n,3 or 6]
    :return:
    """
    pc = PointCloud()  # 创建一个点云实例
    pc.points = Vector3dVector(pc_xyzrgb[:, 0:3])  # 点云的x,y,z坐标
    if pc_xyzrgb.shape[1] == 3:  # 如果只提供了坐标值,灰度显示
        draw_geometries([pc])
        return 0
    if np.max(pc_xyzrgb[:, 3:6]) > 20:  # 如果提供了点云的颜色值,彩色显示
        pc.colors = Vector3dVector(pc_xyzrgb[:, 3:6] / 255.)  # 若rgb值在[0,255],将颜色值归一化到[0,1]区间
        draw_geometries([pc])
        return 0
    else:
        pc.colors = Vector3dVector(pc_xyzrgb[:, 3:6])  # 使用[0,1]空间的rgb数值
        draw_geometries([pc])
        return 0
Exemple #3
0
def visualize(pointcloud):
    from open3d.open3d.geometry import PointCloud
    from open3d.open3d.utility import Vector3dVector
    from open3d.open3d.visualization import draw_geometries

    # from open3d_study import *

    # points = np.random.rand(10000, 3)
    point_cloud = PointCloud()
    point_cloud.points = Vector3dVector(pointcloud[:, 0:3].reshape(-1, 3))
    draw_geometries([point_cloud])
    def callback(self, lidar):

        # convert pointcloud2 to array
        lidar = pc2.read_points(lidar)
        points = np.array(list(lidar))

        # get a PointCloud
        point_cloud = PointCloud()
        point_cloud.points = Vector3dVector(points[:, 0:3].reshape(-1, 3))

        # downsample
        point_cloud = point_cloud.voxel_down_sample(voxel_size=0.6)
        points = point_cloud.points
        points = np.asarray(points)

        # build a kdtree
        pcd_tree = o3d.geometry.KDTreeFlann(point_cloud)

        eigenfeatures = []
        K = 19

        save_filename = '/home/s/Dataset/syz.txt'

        for i in range(points.shape[0]):
            [k, index,
             _] = pcd_tree.search_knn_vector_3d(point_cloud.points[i], K + 1)

            eigenvalues, eigenvectors = self.PCA(points[index[:], :])

            tmp = self.get_6features(eigenvalues=eigenvalues)

            eigenfeatures.append(tmp)

        # print("eigenfeatures size:{}".format(len(eigenfeatures)))

        # convert list[] to array
        eigenfeatures = np.array(eigenfeatures)
        print('eigenfeatures shape:{}'.format(eigenfeatures.shape))

        np.savetxt(save_filename, eigenfeatures)
        print(save_filename + " save!")

        # 退出
        try:
            os._exit(0)
        except:
            print('Program is dead')
def create_patch_pair(depth_path, mask_path, im_cam, gt, save_name,
                      md_pcd_pts):
    raw_depth = io.load_depth(depth_path)
    mask = io.load_im(mask_path)

    img_pcd = PointCloud.create_from_depth_image(
        depth=Image(masked_where(mask == 0.0, raw_depth).filled(0.0)),
        intrinsic=PHCamIntrinsic(*IM_SIZE, *[im_cam['cam_K'][i] for i in K]),
        depth_scale=im_cam['depth_scale'],
        depth_trunc=150000)
    img_pcd.voxel_down_sample(VOXEL_SIZE)

    if np.asarray(img_pcd.points).shape[0] in PCD_PTS_RANGE or IS_TARGET:
        cam_R, cam_t = gt['cam_R_m2c'], gt['cam_t_m2c']

        # Select reference points on image using farthest point sampling
        img_pcd_pts_fps = torch.as_tensor(img_pcd.points).to(DEVICE)
        img_ref_idxs = fps(img_pcd_pts_fps, ratio=FPS_RATIO).to('cpu').numpy()

        # Calculate model reference points
        img_ref_pts = np.asarray(img_pcd.points)[img_ref_idxs]
        md_ref_pts = (img_ref_pts - cam_t.T) @ np.linalg.inv(cam_R).T

        # Recreate model point cloud
        md_ref_idxs = np.arange(md_ref_pts.shape[0])
        md_pcd_pts = np.concatenate([md_ref_pts, md_pcd_pts], axis=0)
        md_pcd = PointCloud()
        md_pcd.points = Vector3dVector(md_pcd_pts)

        # Calculate and save PPFs
        img_save_path = f'image/{save_name}'
        create_local_patches(img_pcd, img_ref_idxs, img_save_path)

        md_save_path = f'model/{save_name}'
        create_local_patches(md_pcd, md_ref_idxs, md_save_path)

        entry = [save_name, img_ref_idxs.shape[0]]
    else:
        entry = []

    return entry
Exemple #6
0
                plt.title('Depth image')
                plt.xlabel('X Pixel')
                plt.ylabel('Y Pixel')
                plt.tight_layout()
                plt.savefig(os.path.join(
                    depth_dir,
                    model_id.split('/')[-1] + '_%d.png' % i),
                            dpi=200)
                plt.close()

            pose = np.loadtxt(pose_path)
            points = depth2pcd(depth, intrinsics, pose)
            try:
                normalised_points = points / ((points**2).sum(axis=1).max())
                pcd = PointCloud()

            except:
                print(
                    'there is an exception in the partial normalised process: ',
                    model_id, i)

            # if there is something wrong with the normalisation process, it will automatically save
            # the previous normalised point cloud for the current objects
            # pcd.points = Vector3dVector(normalised_points)

            pcd.points = Vector3dVector(points)
            write_point_cloud(pcd_dir + '_%d.pcd' % i, pcd)

        # os.removedirs(depth_dir)
        os.removedirs(pcd_dir)
Exemple #7
0
def save_pcd(filename, points):
    pcd = PointCloud()
    pcd.points = Vector3dVector(points)
    write_point_cloud(filename, pcd)