Exemple #1
0
def eval_shapes(arg, sess, ops, up_ratio, eval_xyz):
    # loop files
    shapes = glob(eval_xyz + "/*.xyz")
    for i,item in enumerate(shapes):
        obj_name = item.split('/')[-1] # with .xyz
        data = np.loadtxt(item)
        input_sparse_xyz = data[:, 0:3]
        input_sparse_normal = data[:, 3:6]

        # normalize point cloud
        normalize_sparse_xyz, centroid, furthest_distance = normalize_point_cloud(input_sparse_xyz)
        
        # get patchwise prediction
        input_sparse_xyz_list, gen_dense_xyz_list, gen_dense_normal_list, gen_sparse_normal_list = eval_patches(normalize_sparse_xyz, sess, arg, ops)

        # un-normalize
        gen_ddense_xyz = np.concatenate(gen_dense_xyz_list, axis=0)
        gen_ddense_xyz = (gen_ddense_xyz*furthest_distance) + centroid
        gen_ddense_normal = np.concatenate(gen_dense_normal_list, axis=0)

        # formulate to fps point
        fps_idx2 = sess.run(ops['fps_idx2'], feed_dict={ops['shape_ddense_xyz_pl']:gen_ddense_xyz[np.newaxis,...]})
        fps_idx2 = fps_idx2[0]
        gen_ddense = np.concatenate([gen_ddense_xyz, gen_ddense_normal], axis=-1)
        gen_dense = gen_ddense[fps_idx2,0:6]

        # save pc
        path = os.path.join(arg.eval_path, obj_name)
        np.savetxt(path, np.squeeze(gen_dense))
Exemple #2
0
def patch_prediction_eval(pc_point, sess, ops, args, ratio):
    is_training = False

    # normalize the point clouds
    patch_point, centroid, furthest_distance = pc_util.normalize_point_cloud(
        pc_point)
    patch_point = np.expand_dims(patch_point, axis=0)

    batch_patch_point = np.tile(patch_point, [args.batch_size, 1, 1])
    batch_pred2, batch_pred1 = sess.run(
        [ops['pred2'], ops['pred1']],
        feed_dict={
            ops['pointclouds_pl']: batch_patch_point,
            ops['is_training_pl']: is_training,
            ops['pointclouds_radius']: np.ones([args.batch_size], dtype='f'),
            ops['up_ratio_pl']: ratio
        })

    pred2 = np.expand_dims(batch_pred2[0], axis=0)
    pred2_pc = pred2[:, :, 0:3]
    pred2_pc = np.squeeze(centroid + pred2_pc * furthest_distance, axis=0)

    pred1 = np.expand_dims(batch_pred1[0], axis=0)
    pred1_pc = pred1[:, :, 0:3]
    pred1_pc = np.squeeze(centroid + pred1_pc * furthest_distance, axis=0)

    return pred2_pc, pred1_pc
Exemple #3
0
    def visualize(self, input_folder=None, save_path=None):
        _, restore_model_path = model_utils.pre_load_checkpoint(MODEL_DIR)
        logger.info(restore_model_path, bold=True)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        with tf.Session(config=config) as sess:
            tf_util.optimistic_restore(sess, restore_model_path)
            samples = glob(input_folder, recursive=True)
            samples.sort()
            if len(samples) > 100:
                samples = samples[:100]
            for point_path in samples:
                start = time.time()
                data = pc_util.load(point_path, count=NUM_SHAPE_POINT)
                num_shape_point = data.shape[0]
                data = data[:, 0:3]
                is_2D = np.all(data[:, 2] == 0)
                data, centroid, furthest_distance = pc_util.normalize_point_cloud(
                    data)
                if FLAGS.drop_out < 1:
                    idx = farthest_point_sample(
                        int(num_shape_point * FLAGS.drop_out),
                        data[np.newaxis, ...]).eval()[0]
                    data = data[idx, 0:3]
                if JITTER:
                    data = pc_util.jitter_perturbation_point_cloud(
                        data[np.newaxis, ...],
                        sigma=FLAGS.jitter_sigma,
                        clip=FLAGS.jitter_max,
                        is_2D=is_2D)
                    data = data[0, ...]
                ## get the edge information
                logger.info(os.path.basename(point_path))
                mid = data[(np.abs(data[:, 2]) < np.amax(data[:, 2]) * 0.2), :]
                idx = farthest_point_sample(5, mid[np.newaxis, ...]).eval()[0]
                # idx = np.random.choice(data.shape[0], 5, replace=False)
                patches = pc_util.extract_knn_patch(mid[idx, :], data,
                                                    NUM_POINT)
                end = time.time()
                print("total time: ", end - start)
                path = os.path.join(save_path,
                                    point_path.split('/')[-1][:-4] + '.ply')
                total_levels = int(np.log2(UP_RATIO) / np.log2(STEP_RATIO))
                for p in range(patches.shape[0]):
                    patch = patches[p]
                    for i in range(1, total_levels + 1):
                        patch_result = self.patch_prediction(
                            patch, sess, STEP_RATIO**i)
                        pc_util.save_ply(
                            (patch_result * furthest_distance) + centroid,
                            path[:-4] + "_p_%d_%d.ply" % (p, i))
                    pc_util.save_ply((patch * furthest_distance) + centroid,
                                     path[:-4] + "_p_%d_%d.ply" % (p, 0))
                pc_util.save_ply((data * furthest_distance) + centroid,
                                 path[:-4] + "_input.ply")
Exemple #4
0
    def patch_prediction(self, patch_point, sess, ratio):
        # normalize the point clouds
        patch_point, centroid, furthest_distance = pc_util.normalize_point_cloud(
            patch_point)
        patch_point = np.expand_dims(patch_point, axis=0)
        pred = sess.run(self.pred,
                        feed_dict={
                            self.pointclouds_input:
                            patch_point,
                            self.pointclouds_radius:
                            np.ones(BATCH_SIZE, dtype=np.float32),
                            self.model_up_ratio:
                            ratio
                        })

        pred = np.squeeze(centroid + pred * furthest_distance, axis=0)
        return pred
Exemple #5
0
def eval_whole_model(sess, ops, args, up_ratio, path_input, path_output):
    # get necessary parameters
    num_point = args.num_point
    num_shape_point = args.num_shape_point
    patch_num_ratio = 3

    if not os.path.exists(path_output):
        os.makedirs(path_output)

    # obtain xyz file from path_input
    pcs_input = glob(path_input + "/*.xyz")
    num_pcs = len(pcs_input)
    print('total obj %d' % num_pcs)

    for i, path_input_xyz in enumerate(pcs_input):
        pc_input = np.loadtxt(path_input_xyz)
        name_obj = path_input_xyz.split('/')[-1]  # with .xyz
        pc_input = pc_input[:, 0:3]
        pc_input_normed, centroid, scale = pc_util.normalize_point_cloud(
            pc_input)

        # obtain patch prediction
        input_list, pred2_list, pred1_list = pc_prediction_eval(
            pc_input_normed,
            sess,
            ops,
            args,
            patch_num_ratio=patch_num_ratio,
            ratio=up_ratio)

        # formulate patch prediction to full model by fps
        pred2_normed = np.concatenate(pred2_list, axis=0)
        idx = farthest_point_sample(num_shape_point * up_ratio,
                                    pred2_normed[np.newaxis, ...]).eval()[0]
        pred2_normed = pred2_normed[idx, 0:3]
        pred2 = (pred2_normed * scale) + centroid

        # save xyz
        save_path = os.path.join(path_output, 'input_' + name_obj)
        np.savetxt(save_path, pc_input)
        save_path = os.path.join(path_output, 'pred_' + name_obj)
        np.savetxt(save_path, pred2)
Exemple #6
0
def eval_per_patch(input_sparse_xyz, sess, arg, ops):
    is_training = False

    # normalize patch
    normalize_sparse_xyz, centroid, furthest_distance = normalize_point_cloud(input_sparse_xyz)
    normalize_sparse_xyz = np.expand_dims(normalize_sparse_xyz,axis=0)
    batch_normalize_sparse_xyz = np.tile(normalize_sparse_xyz, [arg.batch_size, 1, 1])

    # feed_dict and return result
    gen_dense_xyz, gen_dense_normal, gen_sparse_normal = sess.run([ops['gen_dense_xyz'], ops['gen_dense_normal'], ops['gen_sparse_normal']],
        feed_dict={ops['input_sparse_xyz_pl']: batch_normalize_sparse_xyz, 
            ops['training_pl']: is_training,
            ops['input_r_pl']: np.ones([arg.batch_size], dtype='f')
    })

    gen_dense_xyz = np.expand_dims(gen_dense_xyz[0], axis=0)
    gen_dense_xyz = np.squeeze(centroid + gen_dense_xyz * furthest_distance, axis=0)
    gen_dense_normal = gen_dense_normal[0]
    gen_sparse_normal = gen_sparse_normal[0]
    return gen_dense_xyz, gen_dense_normal, gen_sparse_normal
Exemple #7
0
FLAGS = parser.parse_args()
PRED_DIR = os.path.abspath(FLAGS.pred)
GT_DIR = os.path.abspath(FLAGS.gt)
print(PRED_DIR)
# NAME = FLAGS.name

print(GT_DIR)
gt_paths = glob(os.path.join(GT_DIR, '*.xyz'))

gt_names = [os.path.basename(p)[:-4] for p in gt_paths]
print(len(gt_paths))

gt = load(gt_paths[0])[:, :3]
pred_placeholder = tf.placeholder(tf.float32, [1, 8192, 3])
gt_placeholder = tf.placeholder(tf.float32, [1, 8192, 3])
pred_tensor, centroid, furthest_distance = normalize_point_cloud(
    pred_placeholder)
gt_tensor, centroid, furthest_distance = normalize_point_cloud(gt_placeholder)

cd_forward, _, cd_backward, _ = tf_nndistance.nn_distance(
    pred_tensor, gt_tensor)
cd_forward = cd_forward[0, :]
cd_backward = cd_backward[0, :]

precentages = np.array([0.008, 0.012])


def cal_nearest_distance(queries, pc, k=2):
    """
    """
    knn_search = NearestNeighbors(n_neighbors=k, algorithm='auto')
    knn_search.fit(pc)
Exemple #8
0
    def test_hierarical_prediction(self, input_folder=None, save_path=None):
        _, restore_model_path = model_utils.pre_load_checkpoint(MODEL_DIR)
        logger.info(restore_model_path, bold=True)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        with tf.Session(config=config) as sess:
            tf_util.optimistic_restore(sess, restore_model_path)
            total_time = 0
            samples = glob(input_folder, recursive=True)
            samples.sort()
            # if len(samples)>100:
            #     samples = samples[:100]
            for point_path in samples:
                start = time.time()
                data = pc_util.load(point_path, count=NUM_SHAPE_POINT)
                num_shape_point = data.shape[0]
                data = data[:, 0:3]
                is_2D = np.all(data[:, 2] == 0)
                data, centroid, furthest_distance = pc_util.normalize_point_cloud(
                    data)
                if FLAGS.drop_out < 1:
                    idx = farthest_point_sample(
                        int(num_shape_point * FLAGS.drop_out),
                        data[np.newaxis, ...]).eval()[0]
                    data = data[idx, 0:3]
                if JITTER:
                    data = pc_util.jitter_perturbation_point_cloud(
                        data[np.newaxis, ...],
                        sigma=FLAGS.jitter_sigma,
                        clip=FLAGS.jitter_max,
                        is_2D=is_2D)
                    data = data[0, ...]
                ## get the edge information
                logger.info(os.path.basename(point_path))
                input_list, pred_list = self.pc_prediction(
                    data, sess, patch_num_ratio=PATCH_NUM_RATIO)
                end = time.time()
                print("total time: ", end - start)
                pred_pc = np.concatenate(pred_list, axis=0)
                pred_pc = (pred_pc * furthest_distance) + centroid
                data = (data * furthest_distance) + centroid
                folder = os.path.basename(os.path.dirname(point_path))
                path = os.path.join(save_path, folder,
                                    point_path.split('/')[-1][:-4] + '.ply')
                # pc_util.save_ply(pred_pc, path[:-4]+'_overlapped.ply')
                pc_util.save_ply(data, path[:-4] + '_input.ply')
                idx = farthest_point_sample(
                    int(num_shape_point * FLAGS.drop_out) * UP_RATIO,
                    pred_pc[np.newaxis, ...]).eval()[0]
                pred_pc = pred_pc[idx, 0:3]
                # pred_pc, _, _ = pc_util.normalize_point_cloud(pred_pc)
                # pred_pc = (pred_pc * furthest_distance) + centroid
                pc_util.save_ply(pred_pc, path[:-4] + '.ply')

                # if len(input_list) > 1:
                #     counter = 0
                #     for in_p, pred_p in zip(input_list, pred_list):
                #         pc_util.save_ply(in_p*furthest_distance+centroid, path[:-4]+"_input_patch_%d.ply" % counter)
                #         pc_util.save_ply(pred_p*furthest_distance+centroid, path[:-4]+"_pred_patch_%d.ply" % counter)
                #         counter += 1

            print(total_time / len(samples))
Exemple #9
0
             np.arange(pc.shape[0]),
             size=[num_patch_per_shape],
             replace=False)
         seed_points = pc[seed_idx, ...]
     input_patches_value = sess.run(
         input_patches,
         feed_dict={
             input_placeholder:
             pc[np.newaxis, ...],
             num_in_point_placeholder:
             num_point * ratio,
             seed_points_placeholder:
             seed_points[np.newaxis, ...]
         })
     if furthest_distance is None or centroid is None:
         input_patches_value, centroid, furthest_distance = normalize_point_cloud(
             input_patches_value)
     else:
         input_patches_value = (
             input_patches_value -
             centroid) / furthest_distance
     example[dset] = input_patches_value
 # each example [N, P, 3] to N examples
 for i in range(num_patch_per_shape):
     # [save_ply(example[k][i], "./{}_{}.ply".format(i, k)) for k in example]
     features = {
         k: _floats_feature(v[i].flatten().tolist())
         for k, v in example.items()
     }
     tfexample = tf.train.Example(
         features=tf.train.Features(feature=features))
     writer.write(tfexample.SerializeToString())