Ejemplo n.º 1
0
    def visualize(self, input_folder=None, save_path=None):
        _, restore_model_path = model_utils.pre_load_checkpoint(MODEL_DIR)
        logger.info(restore_model_path, bold=True)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        with tf.Session(config=config) as sess:
            tf_util.optimistic_restore(sess, restore_model_path)
            samples = glob(input_folder, recursive=True)
            samples.sort()
            if len(samples) > 100:
                samples = samples[:100]
            for point_path in samples:
                start = time.time()
                data = pc_util.load(point_path, count=NUM_SHAPE_POINT)
                num_shape_point = data.shape[0]
                data = data[:, 0:3]
                is_2D = np.all(data[:, 2] == 0)
                data, centroid, furthest_distance = pc_util.normalize_point_cloud(
                    data)
                if FLAGS.drop_out < 1:
                    idx = farthest_point_sample(
                        int(num_shape_point * FLAGS.drop_out),
                        data[np.newaxis, ...]).eval()[0]
                    data = data[idx, 0:3]
                if JITTER:
                    data = pc_util.jitter_perturbation_point_cloud(
                        data[np.newaxis, ...],
                        sigma=FLAGS.jitter_sigma,
                        clip=FLAGS.jitter_max,
                        is_2D=is_2D)
                    data = data[0, ...]
                ## get the edge information
                logger.info(os.path.basename(point_path))
                mid = data[(np.abs(data[:, 2]) < np.amax(data[:, 2]) * 0.2), :]
                idx = farthest_point_sample(5, mid[np.newaxis, ...]).eval()[0]
                # idx = np.random.choice(data.shape[0], 5, replace=False)
                patches = pc_util.extract_knn_patch(mid[idx, :], data,
                                                    NUM_POINT)
                end = time.time()
                print("total time: ", end - start)
                path = os.path.join(save_path,
                                    point_path.split('/')[-1][:-4] + '.ply')
                total_levels = int(np.log2(UP_RATIO) / np.log2(STEP_RATIO))
                for p in range(patches.shape[0]):
                    patch = patches[p]
                    for i in range(1, total_levels + 1):
                        patch_result = self.patch_prediction(
                            patch, sess, STEP_RATIO**i)
                        pc_util.save_ply(
                            (patch_result * furthest_distance) + centroid,
                            path[:-4] + "_p_%d_%d.ply" % (p, i))
                    pc_util.save_ply((patch * furthest_distance) + centroid,
                                     path[:-4] + "_p_%d_%d.ply" % (p, 0))
                pc_util.save_ply((data * furthest_distance) + centroid,
                                 path[:-4] + "_input.ply")
Ejemplo n.º 2
0
def analyze_uniform(idx_file, radius_file, map_points_file):
    start_time = time()
    points = load(map_points_file)[:, 4:]
    radius = np.loadtxt(radius_file)
    print('radius:', radius)
    with open(idx_file) as f:
        lines = f.readlines()

    sample_number = 1000
    rad_number = radius.shape[0]

    uniform_measure = np.zeros([rad_number, 1])

    densitys = np.zeros([rad_number, sample_number])

    expect_number = precentages * points.shape[0]
    expect_number = np.reshape(expect_number, [rad_number, 1])

    for j in range(rad_number):
        uniform_dis = []

        for i in range(sample_number):

            density, idx = lines[i * rad_number + j].split(':')
            densitys[j, i] = int(density)
            coverage = np.square(densitys[j, i] -
                                 expect_number[j]) / expect_number[j]

            num_points = re.findall("(\d+)", idx)

            idx = list(map(int, num_points))
            if len(idx) < 5:
                continue

            idx = np.array(idx).astype(np.int32)
            map_point = points[idx]

            shortest_dis = cal_nearest_distance(map_point, map_point, 2)
            disk_area = math.pi * (radius[j]**2) / map_point.shape[0]
            expect_d = math.sqrt(2 * disk_area / 1.732)  ##using hexagon

            dis = np.square(shortest_dis - expect_d) / expect_d
            dis_mean = np.mean(dis)
            uniform_dis.append(coverage * dis_mean)

        uniform_dis = np.array(uniform_dis).astype(np.float32)
        uniform_measure[j, 0] = np.mean(uniform_dis)

    print('time cost for uniform :', time() - start_time)
    return uniform_measure
Ejemplo n.º 3
0
    def eval_per_epoch(self, epoch, input_folder):
        step = self.step.eval()
        max_ratio = self.get_next_ratio(step)
        ratio_idx = int(np.log2(max_ratio) / np.log2(STEP_RATIO)) - 1
        start = time.time()
        samples = glob(input_folder, recursive=True)
        samples.sort()
        for i in range(ratio_idx):
            ratio = STEP_RATIO**(i + 1)
            save_path = os.path.join(
                MODEL_DIR, "eval", "epoch_%d" % epoch,
                'knn_p%d_s%d_x%d' % (NUM_POINT, NUM_SHAPE_POINT, ratio))
            if len(samples) > 50:
                samples = samples[:50]
            for point_path in samples:
                data = pc_util.load(point_path, count=NUM_SHAPE_POINT)
                data = data[:, 0:3]
                ## get the edge information
                logger.info(os.path.basename(point_path))
                input_list, pred_list = self.pc_prediction(
                    data,
                    self.sess,
                    ratio=ratio,
                    patch_num_ratio=PATCH_NUM_RATIO)

                input_pc = np.concatenate(input_list, axis=0)
                pred_pc = np.concatenate(pred_list, axis=0)

                path = os.path.join(save_path,
                                    point_path.split('/')[-1][:-4] + '.ply')
                pc_util.save_ply(pred_pc, path[:-4] + '.ply')
                pc_util.save_ply(input_pc, path[:-4] + '_input.ply')
                # if len(input_list) > 1:
                #     counter = 0
                #     for in_p, pred_p in zip(input_list, pred_list):
                #         pc_util.save_ply(in_p, os.path.join(save_path, point_path.split('/')[-1][:-4]+"_input_patch_%d.ply" % counter))
                #         pc_util.save_ply(pred_p, os.path.join(save_path, point_path.split('/')[-1][:-4]+"_pred_patch_%d.ply" % counter))
                #         counter += 1
        end = time.time()
        logger.info("Evaluation time: %.2f" % (end - start))
Ejemplo n.º 4
0
parser = argparse.ArgumentParser()
parser.add_argument("--pred", type=str, required=True, help=".xyz")
parser.add_argument("--gt", type=str, required=True, help=".xyz")
FLAGS = parser.parse_args()
PRED_DIR = os.path.abspath(FLAGS.pred)
GT_DIR = os.path.abspath(FLAGS.gt)
print(PRED_DIR)
# NAME = FLAGS.name

print(GT_DIR)
gt_paths = glob(os.path.join(GT_DIR, '*.xyz'))

gt_names = [os.path.basename(p)[:-4] for p in gt_paths]
print(len(gt_paths))

gt = load(gt_paths[0])[:, :3]
pred_placeholder = tf.placeholder(tf.float32, [1, 8192, 3])
gt_placeholder = tf.placeholder(tf.float32, [1, 8192, 3])
pred_tensor, centroid, furthest_distance = normalize_point_cloud(
    pred_placeholder)
gt_tensor, centroid, furthest_distance = normalize_point_cloud(gt_placeholder)

cd_forward, _, cd_backward, _ = tf_nndistance.nn_distance(
    pred_tensor, gt_tensor)
cd_forward = cd_forward[0, :]
cd_backward = cd_backward[0, :]

precentages = np.array([0.008, 0.012])


def cal_nearest_distance(queries, pc, k=2):
Ejemplo n.º 5
0
        # print("total inputs ", len(gt_pred_pairs))
        tag = re.search("/(\w+)/result", os.path.dirname(gt_pred_pairs[0][1]))
        if tag:
            tag = tag.groups()[0]
        else:
            tag = D

        print("{:60s}".format(tag), end=' ')
        global_p2f = []
        global_density = []
        with open(os.path.join(os.path.dirname(gt_pred_pairs[0][1]), "evaluation.csv"), "w") as f:
            writer = csv.DictWriter(f, fieldnames=fieldnames, restval="-", extrasaction="ignore")
            writer.writeheader()
            for gt_path, pred_path in gt_pred_pairs:
                row = {}
                gt = load(gt_path)[:, :3]
                gt = gt[np.newaxis, ...]
                pred = pc_util.load(pred_path)
                pred = pred[:, :3]
                row["name"] = os.path.basename(pred_path)
                pred = pred[np.newaxis, ...]
                cd_forward_value, cd_backward_value = sess.run([cd_forward, cd_backward], feed_dict={pred_placeholder:pred, gt_placeholder:gt})
                save_ply_property(np.squeeze(pred), cd_forward_value, pred_path[:-4]+"_cdF.ply", property_max=0.003, cmap_name="jet")
                save_ply_property(np.squeeze(gt), cd_backward_value, pred_path[:-4]+"_cdB.ply", property_max=0.003, cmap_name="jet")
                # cd_backward_value = cd_forward_value = 0.0
                md_value = np.mean(cd_forward_value)+np.mean(cd_backward_value)
                hd_value = np.max(np.amax(cd_forward_value, axis=0)+np.amax(cd_backward_value, axis=0))
                cd_backward_value = np.mean(cd_backward_value)
                cd_forward_value = np.mean(cd_forward_value)
                # row["CD_forward"] = np.mean(cd_forward_value)
                # row["CD_backwar"] = np.mean(cd_backward_value)
Ejemplo n.º 6
0
        loss = weight * loss
        losses.append(loss)

    return losses, None


if __name__ == '__main__':
    import utils.pc_util as pc_util
    from glob import glob
    gt_files = glob("../data/test_data/sketchfab/poisson_10000/*.xyz")
    pc_files = glob("../data/test_data/sketchfab/poisson_2500/*.xyz")
    is_2D = False
    gt = []
    pc = []
    for b in range(4):
        gt.append(pc_util.load(gt_files[b])[np.newaxis, :, :3])
        pc.append(pc_util.load(pc_files[b])[np.newaxis, :, :3])

    import pdb
    pdb.set_trace()
    gt = np.concatenate(gt, axis=0)
    pc = np.concatenate(pc, axis=0)

    # fetcher = Fetcher(input_data, label, radius, batch_size=10,
    #     step_ratio=4, up_ratio=16, num_in_point=1024)
    gt = tf.constant(gt, dtype=tf.float32)
    pred = tf.constant(pc, dtype=tf.float32)
    # covariance matrix
    _, idx = knn_point(5, gt, pred)
    # [B, P, k, 3]
    grouped = tf.gather_nd(gt, idx)
Ejemplo n.º 7
0
    def test_hierarical_prediction(self, input_folder=None, save_path=None):
        _, restore_model_path = model_utils.pre_load_checkpoint(MODEL_DIR)
        logger.info(restore_model_path, bold=True)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        with tf.Session(config=config) as sess:
            tf_util.optimistic_restore(sess, restore_model_path)
            total_time = 0
            samples = glob(input_folder, recursive=True)
            samples.sort()
            # if len(samples)>100:
            #     samples = samples[:100]
            for point_path in samples:
                start = time.time()
                data = pc_util.load(point_path, count=NUM_SHAPE_POINT)
                num_shape_point = data.shape[0]
                data = data[:, 0:3]
                is_2D = np.all(data[:, 2] == 0)
                data, centroid, furthest_distance = pc_util.normalize_point_cloud(
                    data)
                if FLAGS.drop_out < 1:
                    idx = farthest_point_sample(
                        int(num_shape_point * FLAGS.drop_out),
                        data[np.newaxis, ...]).eval()[0]
                    data = data[idx, 0:3]
                if JITTER:
                    data = pc_util.jitter_perturbation_point_cloud(
                        data[np.newaxis, ...],
                        sigma=FLAGS.jitter_sigma,
                        clip=FLAGS.jitter_max,
                        is_2D=is_2D)
                    data = data[0, ...]
                ## get the edge information
                logger.info(os.path.basename(point_path))
                input_list, pred_list = self.pc_prediction(
                    data, sess, patch_num_ratio=PATCH_NUM_RATIO)
                end = time.time()
                print("total time: ", end - start)
                pred_pc = np.concatenate(pred_list, axis=0)
                pred_pc = (pred_pc * furthest_distance) + centroid
                data = (data * furthest_distance) + centroid
                folder = os.path.basename(os.path.dirname(point_path))
                path = os.path.join(save_path, folder,
                                    point_path.split('/')[-1][:-4] + '.ply')
                # pc_util.save_ply(pred_pc, path[:-4]+'_overlapped.ply')
                pc_util.save_ply(data, path[:-4] + '_input.ply')
                idx = farthest_point_sample(
                    int(num_shape_point * FLAGS.drop_out) * UP_RATIO,
                    pred_pc[np.newaxis, ...]).eval()[0]
                pred_pc = pred_pc[idx, 0:3]
                # pred_pc, _, _ = pc_util.normalize_point_cloud(pred_pc)
                # pred_pc = (pred_pc * furthest_distance) + centroid
                pc_util.save_ply(pred_pc, path[:-4] + '.ply')

                # if len(input_list) > 1:
                #     counter = 0
                #     for in_p, pred_p in zip(input_list, pred_list):
                #         pc_util.save_ply(in_p*furthest_distance+centroid, path[:-4]+"_input_patch_%d.ply" % counter)
                #         pc_util.save_ply(pred_p*furthest_distance+centroid, path[:-4]+"_pred_patch_%d.ply" % counter)
                #         counter += 1

            print(total_time / len(samples))
Ejemplo n.º 8
0
 for i, train_relpath in enumerate(train_relpath_shards):
     print("shard {}".format(i))
     print(train_relpath)
     with tf.python_io.TFRecordWriter(
             os.path.join(
                 out_dir, "{}_p{}_shard{}.tfrecord".format(
                     "_".join(datasets), num_point, i))) as writer:
         for p in train_relpath:
             seed_points = None
             centroid = furthest_distance = None
             example = {}
             for i, zipped in enumerate(
                     zip(ratios, npoints, dirs, datasets)):
                 ratio, npc, dirname, dset = zipped
                 point_path = dirname + p
                 pc = load(point_path)[:, :3]
                 if seed_points is None:
                     seed_idx = np.random.choice(
                         np.arange(pc.shape[0]),
                         size=[num_patch_per_shape],
                         replace=False)
                     seed_points = pc[seed_idx, ...]
                 input_patches_value = sess.run(
                     input_patches,
                     feed_dict={
                         input_placeholder:
                         pc[np.newaxis, ...],
                         num_in_point_placeholder:
                         num_point * ratio,
                         seed_points_placeholder:
                         seed_points[np.newaxis, ...]