def convert_pc2ply(anno_path, out_file_name):
    """
    Convert original dataset files to ply file (each line is XYZRGBL)
    We aggregated all the points from each instance in the room.
    """
    save_path = os.path.join(original_pc_folder, out_file_name)
    data_list = []

    for f in glob.glob(os.path.join(anno_path, '*.txt')):
        class_name = os.path.basename(f).split('_')[0]
        if class_name not in label_names:  # note: in some room there is 'staris' class
            class_name = 'clutter'
        pc = pandas.read_csv(f, header=None, delim_whitespace=True).values
        labels = np.ones((pc.shape[0], 1)) * name_to_idx[class_name]
        data_list.append(np.concatenate([pc, labels], 1))  # N*7

    pc_label = np.concatenate(data_list, 0)
    xyz_min = np.amin(pc_label, axis=0)[0:3]
    pc_label[:, 0:3] -= xyz_min

    xyz = pc_label[:, :3].astype(np.float32)
    colors = pc_label[:, 3:6].astype(np.uint8)
    labels = pc_label[:, 6].astype(np.uint8)
    write_ply(save_path, (xyz, colors, labels),
              ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    # save sub_cloud and KDTree file
    sub_xyz, sub_colors, sub_labels = DP.grid_sub_sampling(
        xyz, colors, labels, sub_grid_size)
    sub_colors = sub_colors / 255.0
    sub_ply_file = os.path.join(sub_pc_folder,
                                save_path.split('/')[-1][:-4] + '.ply')
    write_ply(sub_ply_file, [sub_xyz, sub_colors, sub_labels],
              ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    search_tree = KDTree(sub_xyz)
    kd_tree_file = os.path.join(
        sub_pc_folder,
        str(save_path.split('/')[-1][:-4]) + '_KDTree.pkl')
    with open(kd_tree_file, 'wb') as f:
        pickle.dump(search_tree, f)

    proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False))
    proj_idx = proj_idx.astype(np.int32)
    proj_save = os.path.join(sub_pc_folder,
                             str(save_path.split('/')[-1][:-4]) + '_proj.pkl')
    with open(proj_save, 'wb') as f:
        pickle.dump([proj_idx, labels], f)
                    os.path.join(raw_path, scene,
                                 scene + '_vh_clean.aggregation.json'),
                    'r') as f:
                aggregation = json.load(f)

            # Loop on object to classify points
            for segGroup in aggregation['segGroups']:
                c_name = segGroup['label']  # 取出分类的名字
                if c_name in names1:
                    nyuID = annot_to_nyuID[c_name]  # 根据分类名字,将其转换为nyu的类别编号
                    if nyuID in label_values:
                        for segment in segGroup['segments']:
                            raw_labels[segIndices == segment] = nyuID

            write_ply(os.path.join(mesh_path, scene + '_mesh.ply'),
                      [raw_xyz, raw_colors, raw_labels],
                      ['x', 'y', 'z', 'red', 'green', 'blue', 'class'],
                      triangular_faces=faces)
        else:
            write_ply(os.path.join(mesh_path,
                                   scene + '_mesh.ply'), [raw_xyz, raw_colors],
                      ['x', 'y', 'z', 'red', 'green', 'blue'],
                      triangular_faces=faces)

        # Create finer point clouds, save as original data
        xyz_min = np.amin(raw_xyz, axis=0)
        finer_xyz = raw_xyz - xyz_min
        finer_xyz, associated_vert_inds = DP.rasterize_mesh(
            finer_xyz, faces, 0.003)

        # Subsampling points
        if original_path == original_train_path:
    if exists(ply_file_full):
        print('{:s} already exists\n'.format(cloud_name))
        continue
    print('Preparation of {:s}'.format(cloud_name))
    data = np.loadtxt(txt_file)
    points = data[:, :3].astype(np.float32)
    colors = data[:, 4:7].astype(np.uint8)

    if exists(label_file):
        labels = np.loadtxt(label_file, dtype=np.int32)
        sub_points, sub_colors, sub_labels = DP.grid_sub_sampling(points,
                                                                  features=colors,
                                                                  labels=labels,
                                                                  grid_size=0.01)
        write_ply(ply_file_full,
                  (sub_points, sub_colors, sub_labels),
                  ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])
    else:
        write_ply(ply_file_full,
                  (points, colors),
                  ['x', 'y', 'z', 'red', 'green', 'blue'])
# 训练集与测试集的ply文件
train_files = np.sort([join(train_path, f) for f in listdir(train_path) if f[-4:] == '.ply'])
test_files = np.sort([join(test_path, f) for f in listdir(test_path) if f[-4:] == '.ply'])

tree_path = join(data_path, 'input_{:.3f}'.format(subsampling_parameter))
if not exists(tree_path):
    makedirs(tree_path)

files = np.hstack((train_files, test_files))
print('\nPreparing KDTree for all scenes, subsampled at {:.3f}'.format(subsampling_parameter))
Example #4
0
    for i, room_folder in enumerate(room_folders):
        print('Cloud %s - Room %d/%d' % (cloud_name, i + 1, len(room_folders)))
        for f in glob.glob(os.path.join(room_folder, 'Annotations', '*.txt')):
            class_name = os.path.basename(f).split('_')[0]
            if class_name not in label_names:
                class_name = 'clutter'
            pc = pandas.read_csv(f, header=None, delim_whitespace=True).values
            labels = np.ones((pc.shape[0], 1)) * name_to_idx[class_name]
            data_list.append(np.concatenate([pc, labels], 1))  # N*7
    pc_label = np.concatenate(data_list, 0)
    xyz_min = np.amin(pc_label, axis=0)[0:3]
    pc_label[:, 0:3] -= xyz_min
    xyz = pc_label[:, :3].astype(np.float32)
    colors = pc_label[:, 3:6].astype(np.uint8)
    labels = pc_label[:, 6].astype(np.uint8)
    write_ply(cloud_file, (xyz, colors, labels),
              ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    sub_xyz, sub_colors, sub_labels = DP.grid_sub_sampling(
        xyz, colors, labels, sub_grid_size)
    sub_colors = sub_colors / 255.0
    sub_ply_file = os.path.join(sub_pc_folder, cloud_name + '.ply')
    write_ply(sub_ply_file, [sub_xyz, sub_colors, sub_labels],
              ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

    search_tree = KDTree(sub_xyz, leaf_size=50)
    kd_tree_file = os.path.join(sub_pc_folder, cloud_name + '_KDTree.pkl')
    with open(kd_tree_file, 'wb') as f:
        pickle.dump(search_tree, f)

    proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False))
    proj_idx = proj_idx.astype(np.int32)
            segIndices = np.array(segmentations['segIndices'])

            with open(join(path, scene, scene + '_vh_clean.aggregation.json'),
                      'r') as f:
                aggregation = json.load(f)

            for segGroup in aggregation['segGroups']:
                c_name = segGroup['label']
                if c_name in names1:
                    nyuID = annot_to_nyuID[c_name]
                    if nyuID in label_values:
                        for segment in segGroup['segments']:
                            vertices_labels[segIndices == segment] = nyuID
            write_ply(join(mesh_path, scene + '_mesh.ply'),
                      [vertices, vertices_colors, vertices_labels],
                      ['x', 'y', 'z', 'red', 'green', 'blue', 'class'],
                      triangular_faces=faces)
        else:
            write_ply(join(mesh_path,
                           scene + '_mesh.ply'), [vertices, vertices_colors],
                      ['x', 'y', 'z', 'red', 'green', 'blue'],
                      triangular_faces=faces)

        # 利用三角面片稠密化点云,进行插值操作,得到新的点云以及其与原始点云中的哪个点最近
        points, associated_vert_inds = DP.rasterize_mesh(
            vertices, faces, 0.003)

        # 进行点云采样
        sub_points, sub_vert_inds = DP.grid_sub_sampling(
            points, labels=associated_vert_inds, grid_size=0.01)
def load_kernels(radius, num_kpts, dimension, fixed, lloyd=False):
    """
    :param radius: 半径,对生成的kernel points进行缩放的倍数
    :param num_kpts: kernal point的个数
    :param dimension: 点云空间的维度,2维点云或3维点云
    :param fixed: fix position of certain kernel points ('none', 'center' or 'verticals').
    :param lloyd: 选择不同的生成kernel points的算法
    :return: 生成的kernel points
    """

    # 保存kernel points的路径
    kernel_dir = 'kernels'
    if not exists(kernel_dir):
        makedirs(kernel_dir)  # 创建保存kernel points 文件的文件夹

    if num_kpts > 30:
        lloyd = True

    # 保存kernel point的文件
    kernel_file = join(
        kernel_dir, 'k_{:03d}_{:s}_{:d}D.ply'.format(num_kpts, fixed,
                                                     dimension))

    # 判断是否存在kernel points
    if not exists(kernel_file):
        if lloyd:
            kernel_points = spherical_Lloyd(1.0,
                                            num_kpts,
                                            dimension=dimension,
                                            fixed=fixed,
                                            verbose=0)
        else:
            kernel_points, grad_norms = kernel_point_optimization_debug(
                1.0,
                num_kpoints=num_kpts,
                num_kernels=100,
                dimension=dimension,
                fixed=fixed,
                verbose=0)
            # 寻找最优值
            best_k = np.argmin(grad_norms[-1, :])
            # 保存kernel points
            kernel_points = kernel_points[best_k, :, :]
        write_ply(kernel_file, kernel_points, ['x', 'y', 'z'])
    else:
        data = read_ply(kernel_file)  # 读取kernel points 坐标 (num_pts,dimension)
        kernel_points = np.vstack((data['x'], data['y'], data['z'])).T

    theta = np.random.rand() * 2 * np.pi
    c, s = np.cos(theta), np.sin(theta)
    # 暂不支持2D点的处理,直接返回原始值
    if dimension == 2:
        if fixed != 'vertical':
            rotation_matrix = np.array([[c, -s], [s, c]], dtype=np.float32)
    elif dimension == 3:
        if fixed == 'verticals':  # 三个固定点在z轴上,绕z轴旋转
            rotation_matrix = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]],
                                       dtype=np.float32)
        else:  # 可绕原点随意旋转
            phi = (np.random.rand() - 0.5) * np.pi
            u = np.array([
                np.cos(theta) * np.cos(phi),
                np.sin(theta) * np.cos(phi),
                np.sin(phi)
            ])
            alpha = np.random.rand() * 2 * np.pi
            rotation_matrix = create_3D_rotations(np.reshape(u, (1, -1)),
                                                  np.reshape(alpha,
                                                             (1, -1)))[0]
            rotation_matrix = rotation_matrix.astype(np.float32)
    kernel_points = kernel_points + np.random.normal(
        scale=0.01, size=kernel_points.shape)  # 添加随机噪声
    kernel_points = radius * kernel_points  # 缩放kernel points坐标为原来的radius倍
    kernel_points = np.matmul(kernel_points, rotation_matrix)
    return kernel_points.astype(np.float32)
Example #7
0
                # 读取每个物体的点云数据
                with open(object_file, 'r') as f:
                    object_data = np.array([[float(x) for x in line.split()]
                                            for line in f])
                points.append(object_data[:, 0:3].astype(np.float32))
                colors.append(object_data[:, 3:6].astype(np.uint8))
                object_classes = np.full((object_data.shape[0], 1),
                                         object_class,
                                         dtype=np.int32)
                classes.append(object_classes)
    points = np.vstack(points)
    colors = np.vstack(colors)
    classes = np.vstack(classes)

    write_ply(cloud_file, (points, colors, classes),
              ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

# 对点云进行采样处理

tree_path = join(data_path, 'input_{:.3f}'.format(subsampling_parameter))
if not exists(tree_path):
    makedirs(tree_path)

train_files = [join(ply_path, f + '.ply') for f in cloud_names]

for i, file_path in enumerate(train_files):
    print('Processing train data {}/{}'.format(i, len(train_files)))
    cloud_name = file_path.split('/')[-1][:-4]
    KDTree_file = join(tree_path, '{:s}_KDTree.pkl'.format(cloud_name))
    sub_ply_file = join(tree_path, '{:s}.ply'.format(cloud_name))
    proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name))
        labels = np.loadtxt(
            join(data_path, data_folder, synset, 'points_label',
                 file_name + '.seg')).astype(np.int32)

        # 中心化与归一化点云
        pmin = np.min(points, axis=0)
        pmax = np.max(points, axis=0)
        points -= (pmin + pmax) / 2
        scale = np.max(np.linalg.norm(points, axis=1))
        points *= 1.0 / scale

        # 调整点云坐标为 x,y,z顺序
        points = points[:, [0, 2, 1]]

        # 保存成ply文件
        write_ply(ply_name, (points, labels), ['x', 'y', 'z', 'label'])

        # 该类点云数量加1
        class_nums[class_name] += 1

        print('Preparing {:s} ply : {:.1f}%'.format(split,
                                                    100 * i / len(files)))

input_points = {'train': [], 'valid': [], 'test': []}
input_labels = {'train': [], 'valid': [], 'test': []}
input_point_labels = {'train': [], 'valid': [], 'test': []}

print('Prepare train points')
if subsampling_parameter > 0:
    filename = join(
        data_path,