Ejemplo n.º 1
0
    def pc_prediction(self, gm, sess, patch_num_ratio=3, edge_threshold=0.05):
        ## get patch seed from farthestsampling
        points = tf.convert_to_tensor(np.expand_dims(gm.data,axis=0),dtype=tf.float32)
        start= time.time()
        seed1_num = int(gm.data.shape[0] / (NUM_POINT/2) * patch_num_ratio)

        ## FPS sampling
        seed = farthest_point_sample(seed1_num*2, points).eval()[0]
        seed_list = seed[:seed1_num]
        print "farthest distance sampling cost", time.time() - start
        ratios = np.random.uniform(1.0,1.0,size=[seed1_num])

        input_list = []
        up_point_list=[]
        up_edge_list = []
        up_edgedist_list = []
        fail = 0
        for seed,ratio in tqdm(zip(seed_list,ratios)):
            try:
                patch_size = int(NUM_POINT * ratio)
                idx = np.asarray(gm.bfs_knn(seed,patch_size))
                # idx = np.asarray(gm.geodesic_knn(seed,patch_size))
                if len(idx)<NUM_POINT:
                    fail = fail + 1
                    continue
                idx1 = np.random.permutation(idx.shape[0])[:NUM_POINT]
                idx1.sort()
                idx = idx[idx1]
                point = gm.data[idx]
            except:
                fail= fail+1
                continue
            up_point,up_edgepoint,up_edgedist = self.patch_prediction(point, sess,ratio,edge_threshold)

            input_list.append(point)
            up_point_list.append(up_point)
            up_edge_list.append(up_edgepoint)
            up_edgedist_list.append(up_edgedist)
        print "total %d fails" % fail

        input = np.concatenate(input_list,axis=0)
        pred = np.concatenate(up_point_list,axis=0)

        pred_edge = np.concatenate(up_edge_list, axis=0)
        print "total %d edgepoint" % pred_edge.shape[0]
        pred_edgedist = np.concatenate(up_edgedist_list,axis=0)
        rgba = data_provider.convert_dist2rgba(pred_edgedist, scale=10)
        pred_edge = np.hstack((pred_edge, rgba, pred_edgedist.reshape(-1, 1)))

        return input, pred, pred_edge
Ejemplo n.º 2
0
    def test(self, show=False, use_normal=False):
        data_folder = '../../PointSR_data/CAD/mesh_MC16k'
        phase = data_folder.split('/')[-2] + data_folder.split('/')[-1]
        save_path = os.path.join(MODEL_DIR, 'result/' + phase)
        self.saver = tf.train.Saver()
        _, restore_model_path = model_utils.pre_load_checkpoint(MODEL_DIR)
        print restore_model_path

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        with tf.Session(config=config) as sess:
            self.saver.restore(sess, restore_model_path)
            samples = glob(data_folder + "/.xyz")
            samples.sort()
            total_time = 0

            #input, dist, edge, data_radius, name = data_provider.load_patch_data(NUM_POINT, True, 30)
            #edge = np.reshape(edge,[-1,NUM_EDGE,6])

            for i, item in tqdm(enumerate(samples)):
                input = np.loadtxt(item)
                edge = np.loadtxt(
                    item.replace('mesh_MC16k',
                                 'mesh_edge').replace('.xyz', '_edge.xyz'))
                idx = np.all(edge[:, 0:3] == edge[:, 3:6], axis=-1)
                edge = edge[idx == False]
                l = len(edge)
                idx = range(l) * (1300 / l) + list(
                    np.random.permutation(l)[:1300 % l])
                edge = edge[idx]

                # # coord = input[:, 0:3]
                # # centroid = np.mean(coord, axis=0, keepdims=True)
                # # coord = coord - centroid
                # # furthest_distance = np.amax(np.sqrt(np.sum(abs(coord) ** 2, axis=-1)))
                # # coord = coord / furthest_distance
                # # input[:, 0:3] = coord
                input = np.expand_dims(input, axis=0)
                # input = data_provider.jitter_perturbation_point_cloud(input, sigma=0.01, clip=0.02)

                start_time = time.time()
                edge_pl = tf.placeholder(tf.float32, [1, edge.shape[0], 6])
                dist_gt_pl = tf.sqrt(
                    tf.reduce_min(model_utils.distance_point2edge(
                        self.pred, edge_pl),
                                  axis=-1))

                pred, pred_dist, dist_gt = sess.run(
                    [self.pred, self.pred_dist, dist_gt_pl],
                    feed_dict={
                        self.pointclouds_input: input[:, :, 0:3],
                        self.pointclouds_radius: np.ones(BATCH_SIZE),
                        edge_pl: np.expand_dims(edge, axis=0)
                    })
                total_time += time.time() - start_time
                norm_pl = np.zeros_like(pred)
                ##--------------visualize predicted point cloud----------------------
                if show:
                    f, axis = plt.subplots(3)
                    axis[0].imshow(
                        pc_util.point_cloud_three_views(input[:, 0:3],
                                                        diameter=5))
                    axis[1].imshow(
                        pc_util.point_cloud_three_views(pred[0, :, :],
                                                        diameter=5))
                    axis[2].imshow(
                        pc_util.point_cloud_three_views(gt[:, 0:3],
                                                        diameter=5))
                    plt.show()

                path = os.path.join(save_path,
                                    item.split('/')[-1][:-4] + ".ply")
                # rgba =data_provider.convert_dist2rgba(pred_dist2,scale=10)
                # data_provider.save_ply(path, np.hstack((pred[0, ...],rgba,pred_dist2.reshape(NUM_ADDPOINT,1))))

                path = os.path.join(save_path,
                                    item.split('/')[-1][:-4] + "_gt.ply")
                rgba = data_provider.convert_dist2rgba(dist_gt[0], scale=5)
                data_provider.save_ply(
                    path,
                    np.hstack(
                        (pred[0, ...], rgba, dist_gt.reshape(NUM_ADDPOINT,
                                                             1))))

                path = path.replace(phase, phase + "_input")
                path = path.replace('xyz', 'ply')
                rgba = data_provider.convert_dist2rgba(pred_dist[0], scale=5)
                data_provider.save_ply(
                    path,
                    np.hstack((input[0], rgba, pred_dist.reshape(NUM_POINT,
                                                                 1))))
            print total_time / len(samples)
Ejemplo n.º 3
0
    def pc_prediction(self,
                      gm,
                      sess,
                      patch_num_ratio=3,
                      edge_threshold=0.05,
                      edge=None):
        ## get patch seed from farthestsampling
        points = tf.convert_to_tensor(np.expand_dims(gm.data, axis=0),
                                      dtype=tf.float32)
        start = time.time()
        seed1_num = int(gm.data.shape[0] / (NUM_POINT / 8) * patch_num_ratio)

        ## FPS sampling
        seed = farthest_point_sample(seed1_num * 2, points).eval()[0]
        seed_list = seed[:seed1_num]
        print "farthest distance sampling cost", time.time() - start

        if edge is None:
            ratios = np.random.uniform(1.0, 1.0, size=[seed1_num])
        else:
            edge_tree = spatial.cKDTree(edge)
            seed_data = gm.data[np.asarray(seed_list)]
            seed_tree = spatial.cKDTree(seed_data)
            indics = seed_tree.query_ball_tree(edge_tree, r=0.02)
            ratios = []
            cnt = 0
            for item in indics:
                if len(item) >= 3:
                    #ratios.append(np.random.uniform(1.0,2.0))
                    ratios.append(1.0)
                    cnt = cnt + 1
                else:
                    # ratios.append(np.random.uniform(1.0,3.0))
                    ratios.append(3.0)
            print "total %d edge patch" % (cnt)
        ######
        mm1 = {}
        mm2 = {}
        mm3 = {}
        # for i in xrange(gm.data.shape[0]):
        for i in xrange(100):
            mm1[i] = []
            mm2[i] = []
            mm3[i] = []
        ######
        input_list = []
        up_point_list = []
        up_edge_list = []
        up_edgedist_list = []
        fail = 0
        for seed, ratio in tqdm(zip(seed_list, ratios)):
            try:
                patch_size = int(NUM_POINT * ratio)
                idx = np.asarray(gm.bfs_knn(seed, patch_size))
                if len(idx) < NUM_POINT:
                    fail = fail + 1
                    continue
                idx1 = np.random.permutation(idx.shape[0])[:NUM_POINT]
                idx1.sort()
                idx = idx[idx1]
                point = gm.data[idx]
            except:
                fail = fail + 1
                continue
            up_point, up_edgepoint, up_edgedist = self.patch_prediction(
                point, sess, ratio, edge_threshold)

            # ## handle with the points of same point
            # for cnt, item in enumerate(idx[:128]):
            #     if item <10000:
            #         mm1[item].append(up_point[cnt])
            #         mm2[item].append(up_point[cnt+128])
            #         mm3[item].append(up_point[cnt+128*2])
            #         # mm[item].append(up_point[cnt+128*3])
            # ########
            input_list.append(point)
            up_point_list.append(up_point)
            up_edge_list.append(up_edgepoint)
            up_edgedist_list.append(up_edgedist)
        print "total %d fails" % fail

        # ##
        # colors = np.random.randint(0,255,(10000,3))
        # color_point = []
        # for item in mm1.keys():
        #     aa = np.asarray(mm1[item])
        #     if len(aa)==0:
        #         continue
        #     aa = np.concatenate([aa,np.tile(colors[item],(len(aa),1))],axis=-1)
        #     color_point.extend(aa)
        # color_point = np.asarray(color_point)
        # data_provider.save_xyz('/home/lqyu/server/proj49/PointSR2/'+point_path.split('/')[-1][:-4] +'1.txt',color_point)
        #
        # color_point = []
        # for item in mm2.keys():
        #     aa = np.asarray(mm2[item])
        #     if len(aa) == 0:
        #         continue
        #     aa = np.concatenate([aa, np.tile(colors[item], (len(aa), 1))], axis=-1)
        #     color_point.extend(aa)
        # color_point = np.asarray(color_point)
        # data_provider.save_xyz('/home/lqyu/server/proj49/PointSR2/'+point_path.split('/')[-1][:-4] +'2.txt', color_point)
        #
        # color_point = []
        # for item in mm3.keys():
        #     aa = np.asarray(mm3[item])
        #     if len(aa) == 0:
        #         continue
        #     aa = np.concatenate([aa, np.tile(colors[item], (len(aa), 1))], axis=-1)
        #     color_point.extend(aa)
        # color_point = np.asarray(color_point)
        # data_provider.save_xyz('/home/lqyu/server/proj49/PointSR2/'+point_path.split('/')[-1][:-4] +'3.txt', color_point)
        # ##

        input = np.concatenate(input_list, axis=0)
        pred = np.concatenate(up_point_list, axis=0)

        pred_edge = np.concatenate(up_edge_list, axis=0)
        print "total %d edgepoint" % pred_edge.shape[0]
        pred_edgedist = np.concatenate(up_edgedist_list, axis=0)
        rgba = data_provider.convert_dist2rgba(pred_edgedist, scale=10)
        pred_edge = np.hstack((pred_edge, rgba, pred_edgedist.reshape(-1, 1)))

        return input, pred, pred_edge
Ejemplo n.º 4
0
    def test_hierarical_prediction(self):
        data_folder = '../../PointSR_data/CAD_imperfect/simu_noise'
        phase = data_folder.split('/')[-2] + "_" + data_folder.split('/')[-1]
        save_path = os.path.join(MODEL_DIR, 'result/' + phase)
        self.saver = tf.train.Saver()
        _, restore_model_path = model_utils.pre_load_checkpoint(MODEL_DIR)
        print restore_model_path

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        with tf.Session(config=config) as sess:
            self.saver.restore(sess, restore_model_path)
            total_time = 0
            samples = glob(data_folder + "/*.xyz")
            samples.sort()
            for point_path in samples:
                print point_path
                edge_path = point_path.replace('simu_noise', 'mesh_edge')
                edge_path = edge_path.replace('_noise.xyz', '_edge.xyz')
                gm = GKNN(point_path,
                          edge_path,
                          patch_size=NUM_POINT,
                          patch_num=30,
                          add_noise=False)
                l = gm.edge.shape[0]
                idxx = range(l) * (NUM_EDGE / l) + list(
                    np.random.permutation(l)[:NUM_EDGE % l])
                edge = gm.edge[idxx]

                ## get patch seed from farthestsampling
                points = tf.convert_to_tensor(np.expand_dims(gm.data, axis=0),
                                              dtype=tf.float32)
                start = time.time()
                seed = farthest_point_sample(gm.data.shape[0] / 2,
                                             points).eval()[0]
                print "aaaaa", time.time() - start

                seed1_num = int(gm.data.shape[0] / NUM_POINT * 30)
                seed_list1 = seed[:seed1_num]
                seed_left = seed[seed1_num:]

                # seed2_num = int(gm.data.shape[0] / NUM_POINT * 1)
                # seed_list2 = gm.get_seed_fromdensity(seed2_num)
                # seed_list = np.concatenate([seed_list1, seed_list2])
                seed_list = np.unique(seed_list1)

                inputs = []
                up_point_list = []
                up_edge_list = []
                up_edgedist_list = []
                input_edge_list = []
                input_edgedist_list = []
                fail = 0
                for seed in tqdm(seed_list):
                    try:
                        patch_size = NUM_POINT * np.random.randint(1, 5)
                        point = gm.bfs_knn(seed, patch_size)
                        idx = np.random.permutation(patch_size)[:NUM_POINT]
                        idx.sort()
                        point = point[idx]
                    except:
                        fail = fail + 1
                        continue

                    #get the idx
                    idx1 = np.reshape(np.arange(NUM_POINT), [1, NUM_POINT])
                    idx0 = np.zeros((1, NUM_POINT))
                    idx = np.stack((idx0, idx1), axis=-1)

                    up_point, up_edgepoint, up_edgedist, input_edge, input_edgedist = self.patch_prediction(
                        point, edge, idx, sess)
                    inputs.append(point)
                    up_point_list.append(up_point)
                    up_edge_list.append(up_edgepoint)
                    up_edgedist_list.append(up_edgedist)
                    input_edge_list.append(input_edge)
                    input_edgedist_list.append(input_edgedist)
                print "total %d fails" % fail

                input = np.concatenate(inputs, axis=0)
                path = os.path.join(
                    save_path,
                    point_path.split('/')[-1][:-4] + "_input.xyz")
                data_provider.save_xyz(path, gm.data)

                input_edge = np.concatenate(input_edge_list, axis=0)
                input_edgedist = np.concatenate(input_edgedist_list, axis=0)
                rgba = data_provider.convert_dist2rgba(input_edgedist,
                                                       scale=10)
                path = os.path.join(
                    save_path,
                    point_path.split('/')[-1][:-4] + "_inputedge.ply")
                data_provider.save_ply(
                    path,
                    np.hstack((input_edge, rgba, input_edgedist.reshape(-1,
                                                                        1))))

                pred = np.concatenate(up_point_list, axis=0)
                path = os.path.join(
                    save_path,
                    point_path.split('/')[-1][:-4] + "_output.xyz")
                data_provider.save_xyz(path, pred)

                pred_edge = np.concatenate(up_edge_list, axis=0)

                t1 = time.time()
                print "total %d edgepoint" % pred_edge.shape[0]
                edge_dist = np.zeros(pred_edge.shape[0])
                for sid in range(0, pred_edge.shape[0], 20000):
                    eid = np.minimum(pred_edge.shape[0], sid + 20000)
                    tf_point = tf.placeholder(tf.float32, [1, eid - sid, 3])
                    tf_edge = tf.placeholder(tf.float32,
                                             [1, gm.edge.shape[0], 6])
                    pred_edge_dist_tf = model_utils.distance_point2edge(
                        tf_point, tf_edge)
                    pred_edge_dist_tf = tf.sqrt(
                        tf.reduce_min(pred_edge_dist_tf, axis=-1))
                    edge_dist[sid:eid] = sess.run(pred_edge_dist_tf,
                                                  feed_dict={
                                                      tf_point:
                                                      np.expand_dims(
                                                          pred_edge[sid:eid],
                                                          axis=0),
                                                      tf_edge:
                                                      np.expand_dims(gm.edge,
                                                                     axis=0)
                                                  })
                t3 = time.time()
                print "tf time %f" % (t3 - t1)
                rgba = data_provider.convert_dist2rgba(edge_dist, scale=10)
                path = os.path.join(
                    save_path,
                    point_path.split('/')[-1][:-4] + "_outputedgeerror.ply")
                data_provider.save_ply(
                    path, np.hstack((pred_edge, rgba, edge_dist.reshape(-1,
                                                                        1))))

                pred_edgedist = np.concatenate(up_edgedist_list, axis=0)
                rgba = data_provider.convert_dist2rgba(pred_edgedist, scale=10)
                path = os.path.join(
                    save_path,
                    point_path.split('/')[-1][:-4] + "_outputedge.ply")
                data_provider.save_ply(
                    path,
                    np.hstack((pred_edge, rgba, pred_edgedist.reshape(-1, 1))))

            print total_time / len(samples)