Ejemplo n.º 1
0
    def test_hierarical_prediction(self, input_folder=None, save_path=None):
        self.saver = tf.train.Saver()
        _, restore_model_path = model_utils.pre_load_checkpoint(MODEL_DIR)
        print restore_model_path

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        with tf.Session(config=config) as sess:
            self.saver.restore(sess, restore_model_path)
            total_time = 0
            samples = glob(input_folder)
            samples.sort()
            for point_path in samples:
                edge_path = None
                print point_path, edge_path
                start = time.time()
                gm = GKNN(point_path, edge_path, patch_size=NUM_POINT, patch_num=30,add_noise=False,normalization=True)

                ##get the edge information
                _,pred,pred_edge = self.pc_prediction(gm,sess,patch_num_ratio=3, edge_threshold=0.05)
                end = time.time()
                print "total time: ",end-start

                #path = os.path.join(save_path, point_path.split('/')[-1][:-4] + "_input.xyz")
                #data_provider.save_xyz(path, gm.data)

                path = os.path.join(save_path, point_path.split('/')[-1][:-4] + "_output.xyz")
                data_provider.save_xyz(path, pred)

                path = os.path.join(save_path, point_path.split('/')[-1][:-4] + "_outputedge.ply")
                data_provider.save_ply(path, pred_edge)

            print total_time/len(samples)
Ejemplo n.º 2
0
    def visualize(self, input_folder=None, save_path=None):
        _, restore_model_path = model_utils.pre_load_checkpoint(MODEL_DIR)
        logger.info(restore_model_path, bold=True)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        with tf.Session(config=config) as sess:
            tf_util.optimistic_restore(sess, restore_model_path)
            samples = glob(input_folder, recursive=True)
            samples.sort()
            if len(samples) > 100:
                samples = samples[:100]
            for point_path in samples:
                start = time.time()
                data = pc_util.load(point_path, count=NUM_SHAPE_POINT)
                num_shape_point = data.shape[0]
                data = data[:, 0:3]
                is_2D = np.all(data[:, 2] == 0)
                data, centroid, furthest_distance = pc_util.normalize_point_cloud(
                    data)
                if FLAGS.drop_out < 1:
                    idx = farthest_point_sample(
                        int(num_shape_point * FLAGS.drop_out),
                        data[np.newaxis, ...]).eval()[0]
                    data = data[idx, 0:3]
                if JITTER:
                    data = pc_util.jitter_perturbation_point_cloud(
                        data[np.newaxis, ...],
                        sigma=FLAGS.jitter_sigma,
                        clip=FLAGS.jitter_max,
                        is_2D=is_2D)
                    data = data[0, ...]
                ## get the edge information
                logger.info(os.path.basename(point_path))
                mid = data[(np.abs(data[:, 2]) < np.amax(data[:, 2]) * 0.2), :]
                idx = farthest_point_sample(5, mid[np.newaxis, ...]).eval()[0]
                # idx = np.random.choice(data.shape[0], 5, replace=False)
                patches = pc_util.extract_knn_patch(mid[idx, :], data,
                                                    NUM_POINT)
                end = time.time()
                print("total time: ", end - start)
                path = os.path.join(save_path,
                                    point_path.split('/')[-1][:-4] + '.ply')
                total_levels = int(np.log2(UP_RATIO) / np.log2(STEP_RATIO))
                for p in range(patches.shape[0]):
                    patch = patches[p]
                    for i in range(1, total_levels + 1):
                        patch_result = self.patch_prediction(
                            patch, sess, STEP_RATIO**i)
                        pc_util.save_ply(
                            (patch_result * furthest_distance) + centroid,
                            path[:-4] + "_p_%d_%d.ply" % (p, i))
                    pc_util.save_ply((patch * furthest_distance) + centroid,
                                     path[:-4] + "_p_%d_%d.ply" % (p, 0))
                pc_util.save_ply((data * furthest_distance) + centroid,
                                 path[:-4] + "_input.ply")
Ejemplo n.º 3
0
    def init_network(self):
        self.model_path = os.path.join(ROOT_DIR,
                                       'log/PartialNetNew4/model.ckpt')

        with tf.Graph().as_default():
            with tf.device('/gpu:' + str(0)):
                pointclouds_pl = tf.placeholder(tf.float32, shape=(1, None, 3))
                pointclouds_pl_big = tf.placeholder(tf.float32,
                                                    shape=(1, None, 3))
                pointclouds_gt_small = tf.placeholder(tf.float32,
                                                      shape=(1, None, 3))
                is_training_pl = tf.placeholder(tf.bool, shape=())
                pred_angle = PartialNet.get_model(pointclouds_pl,
                                                  is_training_pl,
                                                  bn_decay=0.5)

                # with tf.variable_scope(tf.get_variable_scope()):
                #     with tf.device('/gpu:0'), tf.name_scope('gpu_0') as scope:
                #         # Evenly split input data to each GPU
                #         input_pc_batch = tf.slice(pointclouds_pl,
                #                                   [0, 0, 0], [1, -1, -1])
                #
                #         pred_angle = PartialNet.get_model_new(input_pc_batch, is_training_pl,bn_decay=0.5)

                # Get training operator
            saver = tf.train.Saver()
            # Create a session
            config = tf.ConfigProto()
            # config.gpu_options.visible_device_list = '1'
            config.gpu_options.allow_growth = True
            config.allow_soft_placement = True
            config.log_device_placement = False
            # config.gpu_options.per_process_gpu_memory_fraction = 0.5
            self.sess = tf.Session(config=config)

            restore_epoch, checkpoint_path = model_utils.pre_load_checkpoint(
                os.path.dirname(self.model_path))
            # Restore variables from disk.
            model_name = checkpoint_path.split('/')[-1]
            checkpoint_path = os.path.join(os.path.dirname(self.model_path),
                                           model_name)
            print('model_name:', model_name, checkpoint_path)
            saver.restore(self.sess, checkpoint_path)
            print('Model restored.')

            self.ops = {
                'pointclouds_pl': pointclouds_pl,
                'pointclouds_pl_big': pointclouds_pl_big,
                'pointclouds_gt_small': pointclouds_gt_small,
                'is_training_pl': is_training_pl,
                'pred_angle': pred_angle
            }
Ejemplo n.º 4
0
def prediction_whole_model(use_normal=False):
    data_folder = FLAGS.dataset
    phase = data_folder.split('/')[-2] + data_folder.split('/')[-1]
    save_path = os.path.join(MODEL_DIR, 'result/' + phase)
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    samples = glob(data_folder + "/*.xyz")
    samples.sort(reverse=True)
    input = np.loadtxt(samples[0])
    if use_normal:
        pointclouds_ipt = tf.placeholder(tf.float32,
                                         shape=(1, input.shape[0], 6))
    else:
        pointclouds_ipt = tf.placeholder(tf.float32,
                                         shape=(1, input.shape[0], 3))
    pred, _ = MODEL_GEN.get_gen_model(pointclouds_ipt,
                                      is_training=False,
                                      scope='generator',
                                      reuse=False,
                                      use_normal=use_normal,
                                      use_bn=False,
                                      use_ibn=False,
                                      bn_decay=0.95,
                                      up_ratio=UP_RATIO)
    saver = tf.train.Saver()
    _, restore_model_path = model_utils.pre_load_checkpoint(MODEL_DIR)
    print(restore_model_path)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    with tf.Session(config=config) as sess:
        saver.restore(sess, restore_model_path)
        for i, item in enumerate(samples):
            input = np.loadtxt(item)
            input = np.expand_dims(input, axis=0)
            if not use_normal:
                input = input[:, :, 0:3]
            print(item, input.shape)

            pred_pl = sess.run(pred, feed_dict={pointclouds_ipt: input})
            path = os.path.join(save_path, item.split('/')[-1])
            if use_normal:
                norm_pl = np.zeros_like(pred_pl)
                data_provider.save_pl(
                    path, np.hstack((pred_pl[0, ...], norm_pl[0, ...])))
            else:
                data_provider.save_pl(path, pred_pl[0, ...])
            path = path[:-4] + '_input.xyz'
            data_provider.save_pl(path, input[0])
Ejemplo n.º 5
0
    def train(self, assign_model_path=None):
        # Create a session
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        # config.log_device_placement = False
        with tf.Session(config=config) as self.sess:
            self.train_writer = tf.summary.FileWriter(
                os.path.join(MODEL_DIR, 'train'), self.sess.graph)
            init = tf.global_variables_initializer()
            self.sess.run(init)

            # restore the model
            saver = tf.train.Saver(max_to_keep=6)
            restore_epoch, checkpoint_path = model_utils.pre_load_checkpoint(
                MODEL_DIR)
            global LOG_FOUT
            if restore_epoch == 0:
                LOG_FOUT = open(os.path.join(MODEL_DIR, 'log_train.txt'), 'w')
                LOG_FOUT.write(str(socket.gethostname()) + '\n')
                LOG_FOUT.write(str(FLAGS) + '\n')
            else:
                LOG_FOUT = open(os.path.join(MODEL_DIR, 'log_train.txt'), 'a')
                saver.restore(self.sess, checkpoint_path)

            ###assign the generator with another model file
            if assign_model_path is not None:
                print "Load pre-train model from %s" % (assign_model_path)
                assign_saver = tf.train.Saver(var_list=[
                    var for var in tf.trainable_variables()
                    if var.name.startswith("generator")
                ])
                assign_saver.restore(self.sess, assign_model_path)

            ##read data
            self.fetchworker = data_provider.Fetcher(BATCH_SIZE, NUM_POINT,
                                                     USE_DATA_NORM)
            self.fetchworker.start()
            for epoch in tqdm(range(restore_epoch, MAX_EPOCH + 1), ncols=55):
                log_string('**** EPOCH %03d ****\t' % (epoch))
                self.train_one_epoch()
                if epoch % 20 == 0:
                    saver.save(self.sess,
                               os.path.join(MODEL_DIR, "model"),
                               global_step=epoch)
            self.fetchworker.shutdown()
Ejemplo n.º 6
0
    def test(self, show=False, use_normal=False):
        data_folder = '../../PointSR_data/CAD/mesh_MC16k'
        phase = data_folder.split('/')[-2] + data_folder.split('/')[-1]
        save_path = os.path.join(MODEL_DIR, 'result/' + phase)
        self.saver = tf.train.Saver()
        _, restore_model_path = model_utils.pre_load_checkpoint(MODEL_DIR)
        print restore_model_path

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        with tf.Session(config=config) as sess:
            self.saver.restore(sess, restore_model_path)
            samples = glob(data_folder + "/.xyz")
            samples.sort()
            total_time = 0

            #input, dist, edge, data_radius, name = data_provider.load_patch_data(NUM_POINT, True, 30)
            #edge = np.reshape(edge,[-1,NUM_EDGE,6])

            for i, item in tqdm(enumerate(samples)):
                input = np.loadtxt(item)
                edge = np.loadtxt(
                    item.replace('mesh_MC16k',
                                 'mesh_edge').replace('.xyz', '_edge.xyz'))
                idx = np.all(edge[:, 0:3] == edge[:, 3:6], axis=-1)
                edge = edge[idx == False]
                l = len(edge)
                idx = range(l) * (1300 / l) + list(
                    np.random.permutation(l)[:1300 % l])
                edge = edge[idx]

                # # coord = input[:, 0:3]
                # # centroid = np.mean(coord, axis=0, keepdims=True)
                # # coord = coord - centroid
                # # furthest_distance = np.amax(np.sqrt(np.sum(abs(coord) ** 2, axis=-1)))
                # # coord = coord / furthest_distance
                # # input[:, 0:3] = coord
                input = np.expand_dims(input, axis=0)
                # input = data_provider.jitter_perturbation_point_cloud(input, sigma=0.01, clip=0.02)

                start_time = time.time()
                edge_pl = tf.placeholder(tf.float32, [1, edge.shape[0], 6])
                dist_gt_pl = tf.sqrt(
                    tf.reduce_min(model_utils.distance_point2edge(
                        self.pred, edge_pl),
                                  axis=-1))

                pred, pred_dist, dist_gt = sess.run(
                    [self.pred, self.pred_dist, dist_gt_pl],
                    feed_dict={
                        self.pointclouds_input: input[:, :, 0:3],
                        self.pointclouds_radius: np.ones(BATCH_SIZE),
                        edge_pl: np.expand_dims(edge, axis=0)
                    })
                total_time += time.time() - start_time
                norm_pl = np.zeros_like(pred)
                ##--------------visualize predicted point cloud----------------------
                if show:
                    f, axis = plt.subplots(3)
                    axis[0].imshow(
                        pc_util.point_cloud_three_views(input[:, 0:3],
                                                        diameter=5))
                    axis[1].imshow(
                        pc_util.point_cloud_three_views(pred[0, :, :],
                                                        diameter=5))
                    axis[2].imshow(
                        pc_util.point_cloud_three_views(gt[:, 0:3],
                                                        diameter=5))
                    plt.show()

                path = os.path.join(save_path,
                                    item.split('/')[-1][:-4] + ".ply")
                # rgba =data_provider.convert_dist2rgba(pred_dist2,scale=10)
                # data_provider.save_ply(path, np.hstack((pred[0, ...],rgba,pred_dist2.reshape(NUM_ADDPOINT,1))))

                path = os.path.join(save_path,
                                    item.split('/')[-1][:-4] + "_gt.ply")
                rgba = data_provider.convert_dist2rgba(dist_gt[0], scale=5)
                data_provider.save_ply(
                    path,
                    np.hstack(
                        (pred[0, ...], rgba, dist_gt.reshape(NUM_ADDPOINT,
                                                             1))))

                path = path.replace(phase, phase + "_input")
                path = path.replace('xyz', 'ply')
                rgba = data_provider.convert_dist2rgba(pred_dist[0], scale=5)
                data_provider.save_ply(
                    path,
                    np.hstack((input[0], rgba, pred_dist.reshape(NUM_POINT,
                                                                 1))))
            print total_time / len(samples)
Ejemplo n.º 7
0
def train(assign_model_path=None):
    is_training = True
    bn_decay = 0.95
    step = tf.Variable(0, trainable=False)
    learning_rate = BASE_LEARNING_RATE
    tf.summary.scalar('bn_decay', bn_decay)
    tf.summary.scalar('learning_rate', learning_rate)

    # get placeholder
    pointclouds_pl, pointclouds_gt, pointclouds_gt_normal, pointclouds_radius = MODEL_GEN.placeholder_inputs(
        BATCH_SIZE, NUM_POINT, UP_RATIO)

    #create the generator model
    pred, _ = MODEL_GEN.get_gen_model(pointclouds_pl,
                                      is_training,
                                      scope='generator',
                                      bradius=pointclouds_radius,
                                      reuse=None,
                                      use_normal=False,
                                      use_bn=False,
                                      use_ibn=False,
                                      bn_decay=bn_decay,
                                      up_ratio=UP_RATIO)

    #get emd loss
    gen_loss_emd, matchl_out = model_utils.get_emd_loss(
        pred, pointclouds_gt, pointclouds_radius)

    #get repulsion loss
    if USE_REPULSION_LOSS:
        gen_repulsion_loss = model_utils.get_repulsion_loss4(pred)
        tf.summary.scalar('loss/gen_repulsion_loss', gen_repulsion_loss)
    else:
        gen_repulsion_loss = 0.0

    #get total loss function
    pre_gen_loss = 100 * gen_loss_emd + gen_repulsion_loss + tf.losses.get_regularization_loss(
    )

    # create pre-generator ops
    gen_update_ops = [
        op for op in tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        if op.name.startswith("generator")
    ]
    gen_tvars = [
        var for var in tf.trainable_variables()
        if var.name.startswith("generator")
    ]

    with tf.control_dependencies(gen_update_ops):
        pre_gen_train = tf.train.AdamOptimizer(
            learning_rate,
            beta1=0.9).minimize(pre_gen_loss,
                                var_list=gen_tvars,
                                colocate_gradients_with_ops=True,
                                global_step=step)
    # merge summary and add pointclouds summary
    tf.summary.scalar('loss/gen_emd', gen_loss_emd)
    tf.summary.scalar('loss/regularation', tf.losses.get_regularization_loss())
    tf.summary.scalar('loss/pre_gen_total', pre_gen_loss)
    pretrain_merged = tf.summary.merge_all()

    pointclouds_image_input = tf.placeholder(tf.float32,
                                             shape=[None, 500, 1500, 1])
    pointclouds_input_summary = tf.summary.image('pointcloud_input',
                                                 pointclouds_image_input,
                                                 max_outputs=1)
    pointclouds_image_pred = tf.placeholder(tf.float32,
                                            shape=[None, 500, 1500, 1])
    pointclouds_pred_summary = tf.summary.image('pointcloud_pred',
                                                pointclouds_image_pred,
                                                max_outputs=1)
    pointclouds_image_gt = tf.placeholder(tf.float32,
                                          shape=[None, 500, 1500, 1])
    pointclouds_gt_summary = tf.summary.image('pointcloud_gt',
                                              pointclouds_image_gt,
                                              max_outputs=1)
    image_merged = tf.summary.merge([
        pointclouds_input_summary, pointclouds_pred_summary,
        pointclouds_gt_summary
    ])

    # Create a session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    config.log_device_placement = False
    with tf.Session(config=config) as sess:
        train_writer = tf.summary.FileWriter(os.path.join(MODEL_DIR, 'train'),
                                             sess.graph)
        init = tf.global_variables_initializer()
        sess.run(init)
        ops = {
            'pointclouds_pl': pointclouds_pl,
            'pointclouds_gt': pointclouds_gt,
            'pointclouds_gt_normal': pointclouds_gt_normal,
            'pointclouds_radius': pointclouds_radius,
            'pointclouds_image_input': pointclouds_image_input,
            'pointclouds_image_pred': pointclouds_image_pred,
            'pointclouds_image_gt': pointclouds_image_gt,
            'pretrain_merged': pretrain_merged,
            'image_merged': image_merged,
            'gen_loss_emd': gen_loss_emd,
            'pre_gen_train': pre_gen_train,
            'pred': pred,
            'step': step,
        }
        #restore the model
        saver = tf.train.Saver(max_to_keep=6)
        restore_epoch, checkpoint_path = model_utils.pre_load_checkpoint(
            MODEL_DIR)
        global LOG_FOUT
        if restore_epoch == 0:
            LOG_FOUT = open(os.path.join(MODEL_DIR, 'log_train.txt'), 'w')
            LOG_FOUT.write(str(socket.gethostname()) + '\n')
            LOG_FOUT.write(str(FLAGS) + '\n')
        else:
            LOG_FOUT = open(os.path.join(MODEL_DIR, 'log_train.txt'), 'a')
            saver.restore(sess, checkpoint_path)

        ###assign the generator with another model file
        if assign_model_path is not None:
            print "Load pre-train model from %s" % (assign_model_path)
            assign_saver = tf.train.Saver(var_list=[
                var for var in tf.trainable_variables()
                if var.name.startswith("generator")
            ])
            assign_saver.restore(sess, assign_model_path)

        ##read data
        input_data, gt_data, data_radius, _ = data_provider.load_patch_data(
            skip_rate=1,
            num_point=NUM_POINT,
            norm=USE_DATA_NORM,
            use_randominput=USE_RANDOM_INPUT)

        fetchworker = data_provider.Fetcher(input_data, gt_data, data_radius,
                                            BATCH_SIZE, NUM_POINT,
                                            USE_RANDOM_INPUT, USE_DATA_NORM)
        fetchworker.start()
        for epoch in tqdm(range(restore_epoch, MAX_EPOCH + 1), ncols=55):
            log_string('**** EPOCH %03d ****\t' % (epoch))
            train_one_epoch(sess, ops, fetchworker, train_writer)
            if epoch % 20 == 0:
                saver.save(sess,
                           os.path.join(MODEL_DIR, "model"),
                           global_step=epoch)
        fetchworker.shutdown()
Ejemplo n.º 8
0
    def test_hierarical_prediction(self):
        data_folder = '../../PointSR_data/virtualscan/chair_test1/*_noise_half.xyz'
        # data_folder = '../../PointSR_data/rawscan/aaa.xyz'
        # data_folder = '/home/lqyu/chair/tmp.xyz'
        phase = data_folder.split('/')[-3] + "_" + data_folder.split('/')[-2]
        save_path = os.path.join(
            MODEL_DIR,
            'result/' + 'halfnoise_' + phase + '_512_0.05_dynamic_96')

        data_folder = '../../PointSR_data/realscan/ToyTurtle_clean_simply_simply.xyz'
        save_path = os.path.join(
            '../../PointSR_data/tmp/realscan_simply_simply_residual_2048_0.05')

        self.saver = tf.train.Saver()
        _, restore_model_path = model_utils.pre_load_checkpoint(MODEL_DIR)
        print restore_model_path

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        with tf.Session(config=config) as sess:
            self.saver.restore(sess, restore_model_path)
            total_time = 0
            samples = glob(data_folder)
            samples.sort()
            for point_path in samples:
                if 'no_noise' in point_path:
                    continue
                edge_path = point_path.replace('new_simu_noise',
                                               'mesh_edge').replace(
                                                   '_noise_double.xyz',
                                                   '_edge.xyz')
                edge_path = None
                print point_path, edge_path
                gm = GKNN_realscan(point_path,
                                   edge_path,
                                   patch_size=NUM_POINT,
                                   patch_num=30,
                                   add_noise=False,
                                   normalization=False)

                ##get the edge information
                _, pred, pred_edge = self.pc_prediction(gm,
                                                        sess,
                                                        patch_num_ratio=3,
                                                        edge_threshold=0.05)

                ## re-prediction with edge information
                # input, pred,pred_edge = self.pc_prediction(gm,sess,patch_num_ratio=3, edge_threshold=0.05,edge=pred_edge[:,0:3])

                path = os.path.join(
                    save_path,
                    point_path.split('/')[-1][:-4] + "_input.xyz")
                data_provider.save_xyz(path, gm.data)

                path = os.path.join(
                    save_path,
                    point_path.split('/')[-1][:-4] + "_output.xyz")
                data_provider.save_xyz(path, pred)

                path = os.path.join(
                    save_path,
                    point_path.split('/')[-1][:-4] + "_outputedge.ply")
                data_provider.save_ply(path, pred_edge)

            print total_time / len(samples)
Ejemplo n.º 9
0
def evaluate():
    with tf.device('/gpu:' + str(GPU_INDEX)):
        test_data_placeholder = tf.placeholder(tf.float32,
                                               shape=(BATCH_SIZE, NUM_POINT,
                                                      data_dim))
        is_training_placeholder = tf.placeholder(tf.bool)

        per_point_val, _, spatial_weights = MODEL.get_model(
            test_data_placeholder, is_training_placeholder)

        saver = tf.train.Saver()

    # Create a session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    config.log_device_placement = True
    sess = tf.Session(config=config)

    # Restore variables from disk.
    _, restore_model_path = model_utils.pre_load_checkpoint(LOG_DIR)
    # restore_model_path = LOG_DIR + '/model.ckpt-200'
    saver.restore(sess, restore_model_path)
    print("Model restored: %s" % restore_model_path)

    ops = {
        'pointclouds_pl': test_data_placeholder,
        'is_training_pl': is_training_placeholder,
        'per_point_val': per_point_val
    }
    # 'spatial_weights': spatial_weights}

    is_training = False

    num_batches = test_data_size // BATCH_SIZE
    test_data_size_inuse = int(num_batches * BATCH_SIZE)
    per_point_vals = np.zeros(shape=(test_data_size_inuse, NUM_POINT, 1))
    per_point_features = np.zeros(shape=(test_data_size_inuse, NUM_POINT, 128))

    for batch_idx in tqdm(range(num_batches)):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        current_test_data = test_data[start_idx:end_idx, :, :]

        feed_dict = {
            ops['pointclouds_pl']: current_test_data,
            ops['is_training_pl']: is_training,
        }

        per_point_val = sess.run([ops['per_point_val']], feed_dict=feed_dict)

        per_point_features[start_idx:end_idx, :, :] = per_point_val[0]
        per_point_vals[start_idx:end_idx, :, 0] = np.max(per_point_val[0],
                                                         axis=-1)

        # per_point_vals[start_idx:end_idx, :, 0] = np.mean(per_point_val[0], axis=-1)

        # per_point_vals[start_idx:end_idx, :, 0] = np.linalg.norm(per_point_val[0], ord=2, axis=-1)

    ## save detected point saliency probability
    output_path = LOG_DIR + '/' + 'saliency_prob/'
    if not os.path.exists(output_path):
        os.makedirs(output_path)
    for i in range(test_data_size):
        detected_saliency_prob = np.concatenate(
            [test_data[i, :, :], per_point_vals[i, :, :]], axis=-1)
        model_name = name[i] + '.txt'
        model_path = os.path.join(output_path, model_name)
        np.savetxt(model_path, detected_saliency_prob, fmt='%.3f')
Ejemplo n.º 10
0
def prediction_whole_model(data_folder=None, show=False, use_normal=False):
    data_folder = '../data/test_data/our_collected_data/MC_5k'
    phase = data_folder.split('/')[-2] + data_folder.split('/')[-1]
    save_path = os.path.join(MODEL_DIR, 'result/' + phase)

    if not os.path.exists(save_path):
        os.makedirs(save_path)
    samples = glob(data_folder + "/*.xyz")
    samples.sort(reverse=True)
    input = np.loadtxt(samples[0])

    if use_normal:
        pointclouds_ipt = tf.placeholder(tf.float32,
                                         shape=(1, input.shape[0], 6))
    else:
        pointclouds_ipt = tf.placeholder(tf.float32,
                                         shape=(1, input.shape[0], 3))
    pred, _ = MODEL_GEN.get_gen_model(pointclouds_ipt,
                                      is_training=False,
                                      scope='generator',
                                      bradius=1.0,
                                      reuse=None,
                                      use_normal=use_normal,
                                      use_bn=False,
                                      use_ibn=False,
                                      bn_decay=0.95,
                                      up_ratio=UP_RATIO)
    saver = tf.train.Saver()
    _, restore_model_path = model_utils.pre_load_checkpoint(MODEL_DIR)
    print restore_model_path

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    with tf.Session(config=config) as sess:
        saver.restore(sess, restore_model_path)
        samples = glob(data_folder + "/*.xyz")
        samples.sort()
        total_time = 0
        for i, item in enumerate(samples):
            input = np.loadtxt(item)
            gt = input

            # input = data_provider.jitter_perturbation_point_cloud(np.expand_dims(input,axis=0),sigma=0.003,clip=0.006)
            input = np.expand_dims(input, axis=0)

            if not use_normal:
                input = input[:, :, 0:3]
                gt = gt[:, 0:3]
            print item, input.shape

            start_time = time.time()
            pred_pl = sess.run(pred, feed_dict={pointclouds_ipt: input})
            total_time += time.time() - start_time
            norm_pl = np.zeros_like(pred_pl)

            ##--------------visualize predicted point cloud----------------------
            path = os.path.join(save_path, item.split('/')[-1])
            if show:
                f, axis = plt.subplots(3)
                axis[0].imshow(
                    pc_util.point_cloud_three_views(input[0, :, 0:3],
                                                    diameter=5))
                axis[1].imshow(
                    pc_util.point_cloud_three_views(pred_pl[0, :, :],
                                                    diameter=5))
                axis[2].imshow(
                    pc_util.point_cloud_three_views(gt[:, 0:3], diameter=5))
                plt.show()
            data_provider.save_pl(
                path, np.hstack((pred_pl[0, ...], norm_pl[0, ...])))
            path = path[:-4] + '_input.xyz'
            data_provider.save_pl(path, input[0])
        print total_time / 20
Ejemplo n.º 11
0
def train(assign_model_path=None, bn_decay=0.95):
    step = tf.Variable(0, trainable=False)
    learning_rate = BASE_LEARNING_RATE
    # get placeholder
    pointclouds_pl, pointclouds_gt, pointclouds_gt_normal, pointclouds_radius = MODEL_GEN.placeholder_inputs(
        BATCH_SIZE, NUM_POINT, UP_RATIO)
    # create discriminator
    if FLAGS.gan:
        d_real = MODEL_GEN.get_discriminator(pointclouds_gt,
                                             True,
                                             'd_1',
                                             reuse=False,
                                             use_bn=False,
                                             use_ibn=False,
                                             use_normal=False,
                                             bn_decay=bn_decay)
    # create the generator model
    pred, _ = MODEL_GEN.get_gen_model(pointclouds_pl,
                                      True,
                                      scope='generator',
                                      reuse=False,
                                      use_normal=False,
                                      use_bn=False,
                                      use_ibn=False,
                                      bn_decay=bn_decay,
                                      up_ratio=UP_RATIO)
    if FLAGS.gan:
        d_fake = MODEL_GEN.get_discriminator(pred,
                                             True,
                                             'd_1',
                                             reuse=True,
                                             use_bn=False,
                                             use_ibn=False,
                                             use_normal=False,
                                             bn_decay=bn_decay)
    # get cd loss
    gen_loss_cd, _ = model_utils.get_cd_loss(pred, pointclouds_gt,
                                             pointclouds_radius, 1.0)
    # get gan loss
    if FLAGS.gan:
        d_loss_real = tf.reduce_mean((d_real - 1)**2)
        d_loss_fake = tf.reduce_mean(d_fake**2)

        d_loss = 0.5 * (d_loss_real + d_loss_fake)
        # get loss for generator
        g_loss = tf.reduce_mean((d_fake - 1)**2)
    # get total loss function
    pre_gen_loss = gen_loss_cd
    if FLAGS.gan:
        pre_gen_loss = g_loss + FLAGS.lambd * pre_gen_loss
    """ Training """
    # divide trainable variables into a group for D and a group for G
    t_vars = tf.trainable_variables()
    if FLAGS.gan:
        d_vars = [var for var in t_vars if 'd_' in var.name]
    g_vars = [var for var in t_vars if 'generator' in var.name]
    # optimizers
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        if FLAGS.gan:
            d_optim = tf.train.AdamOptimizer(
                learning_rate,
                beta1=0.5).minimize(d_loss,
                                    var_list=d_vars,
                                    colocate_gradients_with_ops=True)
        if assign_model_path:
            learning_rate = learning_rate / 10
        pre_gen_train = tf.train.AdamOptimizer(
            learning_rate,
            beta1=0.9).minimize(pre_gen_loss,
                                var_list=g_vars,
                                colocate_gradients_with_ops=True,
                                global_step=step)
    # weight clipping
    if FLAGS.gan:
        clip_D = [p.assign(tf.clip_by_value(p, -0.01, 0.01)) for p in d_vars]

    # merge summary and add pointclouds summary
    tf.summary.scalar('bn_decay', bn_decay)
    tf.summary.scalar('learning_rate', learning_rate)
    tf.summary.scalar('loss/gen_cd', gen_loss_cd)
    tf.summary.scalar('loss/regularation', tf.losses.get_regularization_loss())
    tf.summary.scalar('loss/pre_gen_total', pre_gen_loss)
    if FLAGS.gan:
        tf.summary.scalar('loss/d_loss_real', d_loss_real)
        tf.summary.scalar('loss/d_loss_fake', d_loss_fake)
        tf.summary.scalar('loss/d_loss', d_loss)
        tf.summary.scalar('loss/g_loss', g_loss)
    pretrain_merged = tf.summary.merge_all()

    pointclouds_image_input = tf.placeholder(tf.float32,
                                             shape=[None, 500, 1500, 1])
    pointclouds_input_summary = tf.summary.image('pointcloud_input',
                                                 pointclouds_image_input,
                                                 max_outputs=1)
    pointclouds_image_pred = tf.placeholder(tf.float32,
                                            shape=[None, 500, 1500, 1])
    pointclouds_pred_summary = tf.summary.image('pointcloud_pred',
                                                pointclouds_image_pred,
                                                max_outputs=1)
    pointclouds_image_gt = tf.placeholder(tf.float32,
                                          shape=[None, 500, 1500, 1])
    pointclouds_gt_summary = tf.summary.image('pointcloud_gt',
                                              pointclouds_image_gt,
                                              max_outputs=1)
    image_merged = tf.summary.merge([
        pointclouds_input_summary, pointclouds_pred_summary,
        pointclouds_gt_summary
    ])

    # Create a session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    with tf.Session(config=config) as sess:
        train_writer = tf.summary.FileWriter(os.path.join(MODEL_DIR, 'train'),
                                             sess.graph)
        init = tf.global_variables_initializer()
        sess.run(init)
        ops = {
            'pointclouds_pl': pointclouds_pl,
            'pointclouds_gt': pointclouds_gt,
            'pointclouds_gt_normal': pointclouds_gt_normal,
            'pointclouds_radius': pointclouds_radius,
            'pointclouds_image_input': pointclouds_image_input,
            'pointclouds_image_pred': pointclouds_image_pred,
            'pointclouds_image_gt': pointclouds_image_gt,
            'pretrain_merged': pretrain_merged,
            'image_merged': image_merged,
            'gen_loss_cd': gen_loss_cd,
            'pre_gen_train': pre_gen_train,
            'd_optim': d_optim if FLAGS.gan else None,
            'pred': pred,
            'step': step,
            'clip': clip_D if FLAGS.gan else None,
        }
        # restore the model
        saver = tf.train.Saver(max_to_keep=6)
        restore_epoch, checkpoint_path = model_utils.pre_load_checkpoint(
            MODEL_DIR)
        if restore_epoch == 0:
            LOG_FOUT = open(os.path.join(MODEL_DIR, 'log_train.txt'), 'w')
            LOG_FOUT.write(str(socket.gethostname()) + '\n')
            LOG_FOUT.write(str(FLAGS) + '\n')
        else:
            LOG_FOUT = open(os.path.join(MODEL_DIR, 'log_train.txt'), 'a')
            saver.restore(sess, checkpoint_path)

        ###assign the generator with another model file
        if assign_model_path is not None:
            print("Load pre-train model from %s" % assign_model_path)
            assign_saver = tf.train.Saver(var_list=[
                var for var in tf.trainable_variables()
                if var.name.startswith("generator")
            ])
            assign_saver.restore(sess, assign_model_path)

        ##read data
        input_data, gt_data, data_radius, _ = data_provider.load_patch_data(
            FLAGS.dataset,
            skip_rate=1,
            norm=USE_DATA_NORM,
            use_randominput=USE_RANDOM_INPUT)

        fetchworker = data_provider.Fetcher(input_data, gt_data, data_radius,
                                            BATCH_SIZE, NUM_POINT,
                                            USE_RANDOM_INPUT, USE_DATA_NORM)
        fetchworker.start()
        for epoch in tqdm(range(restore_epoch, MAX_EPOCH + 1), ncols=55):
            log_string(LOG_FOUT, '**** EPOCH %03d ****\t' % epoch)
            train_one_epoch(sess, ops, fetchworker, train_writer, LOG_FOUT,
                            FLAGS.gan)
            if epoch % 20 == 0:
                saver.save(sess,
                           os.path.join(MODEL_DIR, "model"),
                           global_step=epoch)
        fetchworker.shutdown()
        LOG_FOUT.close()
Ejemplo n.º 12
0
def train():
    with tf.Graph().as_default():
        with tf.device('/gpu:' + str(GPU_INDEX)):
            pointclouds_pl, pointclouds_angle, pointclouds_gt = MODEL.placeholder_inputs(
                BATCH_SIZE, NUM_POINT)

            is_training_pl = tf.placeholder(tf.bool, shape=())

            # Note the global_step=batch parameter to minimize.
            # That tells the optimizer to helpfully increment the 'batch' parameter
            # for you every time it trains.
            batch = tf.get_variable('batch', [],
                                    initializer=tf.constant_initializer(0),
                                    trainable=False)
            bn_decay = get_bn_decay(batch)
            tf.summary.scalar('bn_decay', bn_decay)

            # Get model and loss
            pred_angle = MODEL.get_model(pointclouds_pl,
                                         is_training_pl,
                                         bn_decay=bn_decay)

            # loss_util.get_transform_loss2(pred_angle, pointclouds_angle)
            cd_dists, knn_dists = loss_util.get_partialNet_loss(
                pred_angle, pointclouds_angle, pointclouds_pl, pointclouds_gt)

            losses = tf.get_collection('losses')
            total_loss = tf.add_n(losses, name='total_loss')
            tf.summary.scalar('total_loss', total_loss)
            for l in losses + [total_loss]:
                tf.summary.scalar(l.op.name, l)

            print("--- Get training operator")
            # Get training operator
            learning_rate = get_learning_rate(batch)
            tf.summary.scalar('learning_rate', learning_rate)
            if OPTIMIZER == 'momentum':
                optimizer = tf.train.MomentumOptimizer(learning_rate,
                                                       momentum=MOMENTUM)
            elif OPTIMIZER == 'adam':
                optimizer = tf.train.AdamOptimizer(learning_rate)

            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            with tf.control_dependencies(update_ops):
                # train_op = optimizer.minimize(loss)
                train_op = optimizer.minimize(total_loss, global_step=batch)
            # train_op = optimizer.minimize(total_loss, global_step=batch)

            # Add ops to save and restore all the variables.
            saver = tf.train.Saver(max_to_keep=1)
        # Create a session
        config = tf.ConfigProto()
        # config.gpu_options.visible_device_list = '1'
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        # config.gpu_options.per_process_gpu_memory_fraction = 0.5
        sess = tf.Session(config=config)

        # Add summary writers
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
                                             sess.graph)
        test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'),
                                            sess.graph)

        # Init variables
        init = tf.global_variables_initializer()
        sess.run(init)

        ops = {
            'pointclouds_pl': pointclouds_pl,
            'pointclouds_gt': pointclouds_gt,
            'pointclouds_angle': pointclouds_angle,
            'is_training_pl': is_training_pl,
            'pred_angle': pred_angle,
            'loss': total_loss,
            'train_op': train_op,
            'merged': merged,
            'step': batch,
            'cd_dists': cd_dists,
            'knn_dists': knn_dists
        }
        restore_epoch = 0

        restore_epoch, checkpoint_path = model_utils.pre_load_checkpoint(
            LOG_DIR)
        global LOG_FOUT
        if restore_epoch == 0:
            LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
            LOG_FOUT.write(str(socket.gethostname()) + '\n')
            LOG_FOUT.write(str(FLAGS) + '\n')
        else:
            LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'a')
            saver.restore(sess, checkpoint_path)

        # restore_epoch = 0
        for epoch in range(restore_epoch, MAX_EPOCH):
            log_string('**** EPOCH %03d ****' % (epoch))
            sys.stdout.flush()

            train_one_epoch(sess, ops, train_writer)
            # eval_one_epoch(sess, ops, test_writer)

            # Save the variables to disk.
            if epoch % 10 == 0:
                eval_one_epoch(sess, ops, test_writer)
                save_path = saver.save(sess,
                                       os.path.join(LOG_DIR, "model.ckpt"))
                log_string("Model saved in file: %s" % save_path)
Ejemplo n.º 13
0
    def train(self, assign_model_path=None):
        # Create a session
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        if TRAIN_H5 is not None:
            if NUM_SHAPE_POINT:

                input_data, gt_data, data_radius, _ = data_provider.load_patch_data(
                    h5_filename=TRAIN_H5,
                    up_ratio=UP_RATIO,
                    step_ratio=STEP_RATIO,
                    num_point=
                    NUM_SHAPE_POINT,  # roughly 1/4 of the complete model
                    norm=USE_DATA_NORM,
                    use_randominput=USE_RANDOM_INPUT)
            else:

                input_data, gt_data, data_radius, _ = data_provider.load_patch_data(
                    h5_filename=TRAIN_H5,
                    up_ratio=UP_RATIO,
                    step_ratio=STEP_RATIO,
                    num_point=NUM_SHAPE_POINT,
                    patch_size=NUM_POINT,
                    norm=USE_DATA_NORM,
                    use_randominput=USE_RANDOM_INPUT)

            self.fetchworker = data_provider.Fetcher(
                input_data,
                gt_data,
                data_radius,
                BATCH_SIZE,
                num_in_point=NUM_POINT,
                step_ratio=STEP_RATIO,
                up_ratio=UP_RATIO,
                jitter=JITTER,
                jitter_sigma=FLAGS.jitter_sigma,
                jitter_max=FLAGS.jitter_max,
                drop_out=FLAGS.drop_out)
        elif TRAIN_RECORD is not None:
            self.fetchworker = data_provider.Fetcher(
                TRAIN_RECORD,
                batch_size=BATCH_SIZE,
                step_ratio=STEP_RATIO,
                up_ratio=UP_RATIO,
                num_in_point=NUM_POINT,
                num_shape_point=NUM_SHAPE_POINT,
                jitter=JITTER,
                drop_out=FLAGS.drop_out,
                jitter_max=FLAGS.jitter_max,
                jitter_sigma=FLAGS.jitter_sigma)
        else:
            raise (ValueError)

        with tf.Session(
                config=config) as self.sess, self.train_writer.as_default():
            tf.global_variables_initializer().run()
            tf.contrib.summary.initialize(graph=tf.get_default_graph())

            ### assign the generator with another model file
            # restore the model
            self.saver = tf.train.Saver(max_to_keep=None)
            if assign_model_path is not None:
                logger.info("Load pre-train model from %s" %
                            (assign_model_path),
                            bold=True)
                # assign_saver = tf.train.Saver(
                #     var_list=[var for var in tf.trainable_variables() if var.name.startswith("generator")])
                # assign_saver.restore(self.sess, assign_model_path)
                # self.saver.restore(self.sess, assign_model_path)
                tf_util.optimistic_restore(self.sess, assign_model_path)
                self.restore_epoch = RESTORE_EPOCH or 0
                logger.info(("Resume training from %d epoch model from %s" %
                             (self.restore_epoch, assign_model_path)),
                            bold=True)
                if RESTORE_EPOCH is not None:
                    tf.assign(self.step, RESTORE_EPOCH *
                              self.fetchworker.num_batches).eval()
            else:
                self.restore_epoch, checkpoint_path = model_utils.pre_load_checkpoint(
                    MODEL_DIR)
                if checkpoint_path is not None:
                    # self.saver.restore(self.sess, checkpoint_path)
                    tf_util.optimistic_restore(self.sess, checkpoint_path)
                    try:
                        self.restore_epoch = RESTORE_EPOCH or int(
                            self.step.eval() / self.fetchworker.num_batches)
                    except Exception:
                        self.restore_epoch = RESTORE_EPOCH or 0
                    logger.info(
                        ("Resume training from %d epoch model from %s" %
                         (self.restore_epoch, checkpoint_path)),
                        bold=True)

            global LOG_FOUT
            if self.restore_epoch == 0:
                LOG_FOUT = open(os.path.join(MODEL_DIR, 'log_train.txt'), 'w')
                LOG_FOUT.write(str(socket.gethostname()) + '\n')
                LOG_FOUT.write(str(FLAGS) + '\n')
            else:
                LOG_FOUT = open(os.path.join(MODEL_DIR, 'log_train.txt'), 'a')

            self.total_steps = self.fetchworker.num_batches * MAX_EPOCH
            self.stage_steps = STAGE_STEPS
            self.last_max_ratio = self.get_next_ratio(self.step.eval())
            self.last_is_combined = self.blend.eval(
                feed_dict={self.model_up_ratio: self.last_max_ratio}) >= 1.0
            self.fetchworker.initialize(self.sess, self.last_max_ratio,
                                        self.last_is_combined)

            for epoch in tqdm(list(range(self.restore_epoch, MAX_EPOCH + 1))):
                log_string('**** EPOCH %03d ****\t' % (epoch))
                self.train_one_epoch(epoch)
                if epoch % 10 == 0:
                    self.saver.save(self.sess,
                                    os.path.join(MODEL_DIR, "model"))
                    # self.eval_per_epoch(epoch, TEST_DATA)
            self.saver.save(self.sess, os.path.join(MODEL_DIR, "final"))
Ejemplo n.º 14
0
    def test_hierarical_prediction(self):
        data_folder = '../../PointSR_data/CAD_imperfect/simu_noise'
        phase = data_folder.split('/')[-2] + "_" + data_folder.split('/')[-1]
        save_path = os.path.join(MODEL_DIR, 'result/' + phase)
        self.saver = tf.train.Saver()
        _, restore_model_path = model_utils.pre_load_checkpoint(MODEL_DIR)
        print restore_model_path

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        with tf.Session(config=config) as sess:
            self.saver.restore(sess, restore_model_path)
            total_time = 0
            samples = glob(data_folder + "/*.xyz")
            samples.sort()
            for point_path in samples:
                print point_path
                edge_path = point_path.replace('simu_noise', 'mesh_edge')
                edge_path = edge_path.replace('_noise.xyz', '_edge.xyz')
                gm = GKNN(point_path,
                          edge_path,
                          patch_size=NUM_POINT,
                          patch_num=30,
                          add_noise=False)
                l = gm.edge.shape[0]
                idxx = range(l) * (NUM_EDGE / l) + list(
                    np.random.permutation(l)[:NUM_EDGE % l])
                edge = gm.edge[idxx]

                ## get patch seed from farthestsampling
                points = tf.convert_to_tensor(np.expand_dims(gm.data, axis=0),
                                              dtype=tf.float32)
                start = time.time()
                seed = farthest_point_sample(gm.data.shape[0] / 2,
                                             points).eval()[0]
                print "aaaaa", time.time() - start

                seed1_num = int(gm.data.shape[0] / NUM_POINT * 30)
                seed_list1 = seed[:seed1_num]
                seed_left = seed[seed1_num:]

                # seed2_num = int(gm.data.shape[0] / NUM_POINT * 1)
                # seed_list2 = gm.get_seed_fromdensity(seed2_num)
                # seed_list = np.concatenate([seed_list1, seed_list2])
                seed_list = np.unique(seed_list1)

                inputs = []
                up_point_list = []
                up_edge_list = []
                up_edgedist_list = []
                input_edge_list = []
                input_edgedist_list = []
                fail = 0
                for seed in tqdm(seed_list):
                    try:
                        patch_size = NUM_POINT * np.random.randint(1, 5)
                        point = gm.bfs_knn(seed, patch_size)
                        idx = np.random.permutation(patch_size)[:NUM_POINT]
                        idx.sort()
                        point = point[idx]
                    except:
                        fail = fail + 1
                        continue

                    #get the idx
                    idx1 = np.reshape(np.arange(NUM_POINT), [1, NUM_POINT])
                    idx0 = np.zeros((1, NUM_POINT))
                    idx = np.stack((idx0, idx1), axis=-1)

                    up_point, up_edgepoint, up_edgedist, input_edge, input_edgedist = self.patch_prediction(
                        point, edge, idx, sess)
                    inputs.append(point)
                    up_point_list.append(up_point)
                    up_edge_list.append(up_edgepoint)
                    up_edgedist_list.append(up_edgedist)
                    input_edge_list.append(input_edge)
                    input_edgedist_list.append(input_edgedist)
                print "total %d fails" % fail

                input = np.concatenate(inputs, axis=0)
                path = os.path.join(
                    save_path,
                    point_path.split('/')[-1][:-4] + "_input.xyz")
                data_provider.save_xyz(path, gm.data)

                input_edge = np.concatenate(input_edge_list, axis=0)
                input_edgedist = np.concatenate(input_edgedist_list, axis=0)
                rgba = data_provider.convert_dist2rgba(input_edgedist,
                                                       scale=10)
                path = os.path.join(
                    save_path,
                    point_path.split('/')[-1][:-4] + "_inputedge.ply")
                data_provider.save_ply(
                    path,
                    np.hstack((input_edge, rgba, input_edgedist.reshape(-1,
                                                                        1))))

                pred = np.concatenate(up_point_list, axis=0)
                path = os.path.join(
                    save_path,
                    point_path.split('/')[-1][:-4] + "_output.xyz")
                data_provider.save_xyz(path, pred)

                pred_edge = np.concatenate(up_edge_list, axis=0)

                t1 = time.time()
                print "total %d edgepoint" % pred_edge.shape[0]
                edge_dist = np.zeros(pred_edge.shape[0])
                for sid in range(0, pred_edge.shape[0], 20000):
                    eid = np.minimum(pred_edge.shape[0], sid + 20000)
                    tf_point = tf.placeholder(tf.float32, [1, eid - sid, 3])
                    tf_edge = tf.placeholder(tf.float32,
                                             [1, gm.edge.shape[0], 6])
                    pred_edge_dist_tf = model_utils.distance_point2edge(
                        tf_point, tf_edge)
                    pred_edge_dist_tf = tf.sqrt(
                        tf.reduce_min(pred_edge_dist_tf, axis=-1))
                    edge_dist[sid:eid] = sess.run(pred_edge_dist_tf,
                                                  feed_dict={
                                                      tf_point:
                                                      np.expand_dims(
                                                          pred_edge[sid:eid],
                                                          axis=0),
                                                      tf_edge:
                                                      np.expand_dims(gm.edge,
                                                                     axis=0)
                                                  })
                t3 = time.time()
                print "tf time %f" % (t3 - t1)
                rgba = data_provider.convert_dist2rgba(edge_dist, scale=10)
                path = os.path.join(
                    save_path,
                    point_path.split('/')[-1][:-4] + "_outputedgeerror.ply")
                data_provider.save_ply(
                    path, np.hstack((pred_edge, rgba, edge_dist.reshape(-1,
                                                                        1))))

                pred_edgedist = np.concatenate(up_edgedist_list, axis=0)
                rgba = data_provider.convert_dist2rgba(pred_edgedist, scale=10)
                path = os.path.join(
                    save_path,
                    point_path.split('/')[-1][:-4] + "_outputedge.ply")
                data_provider.save_ply(
                    path,
                    np.hstack((pred_edge, rgba, pred_edgedist.reshape(-1, 1))))

            print total_time / len(samples)
Ejemplo n.º 15
0
def train():
    with tf.Graph().as_default():
        with tf.device('/cpu:0'):
            pointclouds_pl, pointclouds_angle, pointclouds_gt,pointclouds_gt_big = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT,CD_Ratio,K=4)
            pointclouds_gt_small = tf.placeholder(tf.float32, shape=(BATCH_SIZE, 1024, 3))
            is_training_pl = tf.placeholder(tf.bool, shape=())

            # Note the global_step=batch parameter to minimize.
            # That tells the optimizer to helpfully increment the 'batch' parameter
            # for you every time it trains.
            batch = tf.get_variable('batch', [],
                                    initializer=tf.constant_initializer(0), trainable=False)
            bn_decay = get_bn_decay(batch)
            tf.summary.scalar('bn_decay', bn_decay)

            print("--- Get training operator")
            # Get training operator
            learning_rate = get_learning_rate(batch)
            tf.summary.scalar('learning_rate', learning_rate)
            if OPTIMIZER == 'momentum':
                optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
            elif OPTIMIZER == 'adam':
                optimizer = tf.train.AdamOptimizer(learning_rate)
            #MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)

            tower_grads = []
            pred_gpu = []
            cd_dists_gpu = []
            knn_dists_gpu = []
            total_loss_gpu = []

            for i in range(NUM_GPUS):
                with tf.variable_scope(tf.get_variable_scope(), reuse=bool(i != 0)):
                    with tf.device('/gpu:%d' % (i)), tf.name_scope('gpu_%d' % (i)) as scope:
                        # Evenly split input data to each GPU
                        input_pc_batch = tf.slice(pointclouds_pl,
                                            [i * DEVICE_BATCH_SIZE, 0, 0], [DEVICE_BATCH_SIZE, -1, -1])
                        input_angle_batch = tf.slice(pointclouds_angle,
                                               [i * DEVICE_BATCH_SIZE, 0], [DEVICE_BATCH_SIZE, -1])
                        input_gt_batch = tf.slice(pointclouds_gt,
                                            [i * DEVICE_BATCH_SIZE, 0, 0], [DEVICE_BATCH_SIZE, -1, -1])
                        input_gt_big_batch = tf.slice(pointclouds_gt_big,
                                                  [i * DEVICE_BATCH_SIZE, 0, 0], [DEVICE_BATCH_SIZE, -1, -1])
                        input_gt_small_batch = tf.slice(pointclouds_gt_small,
                                                      [i * DEVICE_BATCH_SIZE, 0, 0], [DEVICE_BATCH_SIZE, -1, -1])
                        pred_result = MODEL.get_model_new(input_pc_batch, is_training=is_training_pl, bn_decay=bn_decay)
                        cd_dists_g, knn_dists_g = loss_util.get_partialNet_loss(pred_result, input_angle_batch, input_pc_batch,
                                                                    input_gt_batch, input_gt_big_batch,input_gt_small_batch)
                        losses = tf.get_collection('losses', scope)
                        total_loss = tf.add_n(losses, name='total_loss')
                        for l in losses + [total_loss]:
                            tf.summary.scalar(l.op.name, l)

                        update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
                        update_ops = tf.group(*update_op)
                        with tf.control_dependencies([update_ops]):
                            pc_l2_loss = tf.losses.get_regularization_loss()
                            pc_l2_loss = 0.00001 * pc_l2_loss
                        total_loss  = total_loss+pc_l2_loss
                        grads = optimizer.compute_gradients(total_loss)
                        tower_grads.append(grads)

                        pred_gpu.append(pred_result)
                        total_loss_gpu.append(total_loss)
                        cd_dists_gpu.append(cd_dists_g)
                        knn_dists_gpu.append(knn_dists_g)


            # Merge pred and losses from multiple GPUs
            pred_angle = tf.concat(pred_gpu, 0)
            cd_dists = tf.concat(cd_dists_gpu,0)
            knn_dists = tf.concat(knn_dists_gpu, 0)
            total_loss = tf.reduce_mean(total_loss_gpu)
            # Get training operator
            grads = average_gradients(tower_grads)
            apply_gradient_op = optimizer.apply_gradients(grads, global_step=batch)

            variable_averages = tf.train.ExponentialMovingAverage(bn_decay, batch)
            variables_averages_op = variable_averages.apply(tf.trainable_variables())
            train_op = tf.group(apply_gradient_op, variables_averages_op)

        # Add ops to save and restore all the variables.
        saver = tf.train.Saver(max_to_keep=2)
        # Create a session
        config = tf.ConfigProto()
        # config.gpu_options.visible_device_list = '1'
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        # config.gpu_options.per_process_gpu_memory_fraction = 0.5
        sess = tf.Session(config=config)

        # Add summary writers
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'), sess.graph)
        test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'), sess.graph)

        # Init variables
        init = tf.global_variables_initializer()
        sess.run(init)

        ops = {'pointclouds_pl'   : pointclouds_pl,
               'pointclouds_gt'   : pointclouds_gt,
               'pointclouds_gt_big': pointclouds_gt_big,
               'pointclouds_gt_small': pointclouds_gt_small,
               'pointclouds_angle': pointclouds_angle,
               'is_training_pl'   : is_training_pl,
               'pred_angle'       : pred_angle,
               'loss'             : total_loss,
               'train_op'         : train_op,
               'merged'           : merged,
               'step'             : batch,
               'cd_dists'         : cd_dists,
               'knn_dists'        : knn_dists}

        restore_epoch, checkpoint_path = model_utils.pre_load_checkpoint(LOG_DIR)
        global LOG_FOUT
        if restore_epoch == 0:
            LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
            LOG_FOUT.write(str(socket.gethostname()) + '\n')
            LOG_FOUT.write(str(FLAGS) + '\n')
        else:
            LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'a')
            saver.restore(sess, checkpoint_path)

        # restore_epoch = 0
        for epoch in tqdm(range(restore_epoch,MAX_EPOCH)):
            log_string('**** EPOCH %03d ****' % (epoch))
            sys.stdout.flush()

            train_one_epoch(sess, ops, train_writer)
            # eval_one_epoch(sess, ops, test_writer)

            # Save the variables to disk.
            if epoch % 10 == 0:
                #eval_one_epoch(sess, ops, test_writer)
                save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"),global_step=epoch)
                log_string("Model saved in file: %s" % save_path)
            if epoch % 50 == 0:
                eval_one_epoch(sess, ops, test_writer)
Ejemplo n.º 16
0
def train(palette):

    with tf.Graph().as_default():
        with tf.device('/cpu:0'):
            ###################################################
            ### placeholders  # triplet input
            data_train_placeholder = tf.placeholder(tf.float32,
                                                    shape=(BATCH_SIZE,
                                                           NUM_POINT,
                                                           data_dim))
            data_positive_train_placeholder = tf.placeholder(tf.float32,
                                                             shape=(BATCH_SIZE,
                                                                    NUM_POINT,
                                                                    data_dim))
            data_negative_train_placeholder = tf.placeholder(tf.float32,
                                                             shape=(BATCH_SIZE,
                                                                    NUM_POINT,
                                                                    data_dim))

            is_training_placeholder = tf.placeholder(tf.bool, shape=())

            cluster_mean_placeholder = tf.placeholder(tf.float32,
                                                      shape=(cluster_num, 128))
            indices_placeholder = tf.placeholder(tf.int64, shape=(BATCH_SIZE))

            batch = tf.get_variable('batch', [],
                                    initializer=tf.constant_initializer(0),
                                    trainable=False)
            bn_decay = get_bn_decay(batch)
            tf.summary.scalar('bn_decay', bn_decay)

            ###################################################
            ### get training operator
            learning_rate = get_learning_rate(batch)
            tf.summary.scalar('learning_rate', learning_rate)
            if OPTIMIZER == 'momentum':
                optimizer = tf.train.MomentumOptimizer(learning_rate,
                                                       momentum=MOMENTUM)
            elif OPTIMIZER == 'adam':
                optimizer = tf.train.AdamOptimizer(learning_rate)

            ###################################################
            tower_grads = []
            feature_anchor_gpu = []
            net_anchor_gpu = []
            net_positive_gpu = []
            net_negative_gpu = []
            cluster_loss_gpu = []
            contrastive_loss_gpu = []
            total_loss_gpu = []
            l2_loss_gpu = []
            all_loss_gpu = []
            ### get model and loss
            for i in range(NUM_GPUS):
                with tf.variable_scope(tf.get_variable_scope(),
                                       reuse=bool(i != 0)):
                    with tf.device('/gpu:%d' % (i)), tf.name_scope(
                            'gpu_%d' % (i)) as scope:
                        pc_patch = tf.slice(data_train_placeholder,
                                            [i * DEVICE_BATCH_SIZE, 0, 0],
                                            [DEVICE_BATCH_SIZE, -1, -1])
                        pc_patch_positive = tf.slice(
                            data_positive_train_placeholder,
                            [i * DEVICE_BATCH_SIZE, 0, 0],
                            [DEVICE_BATCH_SIZE, -1, -1])
                        pc_patch_negative = tf.slice(
                            data_negative_train_placeholder,
                            [i * DEVICE_BATCH_SIZE, 0, 0],
                            [DEVICE_BATCH_SIZE, -1, -1])

                        pc_features, pc_net, _ = MODEL.get_model(
                            pc_patch, is_training_placeholder, bn_decay)
                        tf.get_variable_scope().reuse_variables()
                        pc_features_positive, pc_net_positive, _ = MODEL.get_model(
                            pc_patch_positive, is_training_placeholder,
                            bn_decay)
                        pc_features_negative, pc_net_negative, _ = MODEL.get_model(
                            pc_patch_negative, is_training_placeholder,
                            bn_decay)

                        pc_indices = tf.slice(indices_placeholder,
                                              [i * DEVICE_BATCH_SIZE],
                                              [DEVICE_BATCH_SIZE])
                        pc_cluster_loss, _, _, pc_contrastive_loss, pc_total_loss = MODEL.get_loss(
                            pc_net, pc_net_positive, pc_net_negative,
                            cluster_mean_placeholder, pc_indices)
                        update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
                        update_ops = tf.group(*update_op)
                        with tf.control_dependencies([update_ops]):
                            pc_l2_loss = tf.losses.get_regularization_loss()
                            pc_l2_loss = 0.00001 * pc_l2_loss

                        pc_all_loss = pc_total_loss + pc_l2_loss
                        grads = optimizer.compute_gradients(pc_all_loss)
                        tower_grads.append(grads)

                        cluster_loss_gpu.append(pc_cluster_loss)
                        contrastive_loss_gpu.append(pc_contrastive_loss)
                        total_loss_gpu.append(pc_total_loss)
                        l2_loss_gpu.append(pc_l2_loss)
                        all_loss_gpu.append(pc_all_loss)

                        feature_anchor_gpu.append(pc_features)

                        net_anchor_gpu.append(pc_net)
                        net_positive_gpu.append(pc_net_positive)
                        net_negative_gpu.append(pc_net_negative)

            cluster_loss = tf.reduce_mean(cluster_loss_gpu)
            contrastive_loss = tf.reduce_mean(contrastive_loss_gpu)
            total_loss = tf.reduce_mean(total_loss_gpu)
            l2_loss = tf.reduce_mean(l2_loss_gpu)
            all_loss = tf.reduce_mean(all_loss_gpu)

            tf.summary.scalar('cluster_loss', cluster_loss)
            tf.summary.scalar('contrastive_loss', contrastive_loss)
            tf.summary.scalar('l2_loss', l2_loss)
            tf.summary.scalar('total loss', all_loss)

            feature_anchor = tf.concat(feature_anchor_gpu, 0)
            net_anchor = tf.concat(net_anchor_gpu, 0)
            net_positive = tf.concat(net_positive_gpu, 0)
            net_negative = tf.concat(net_negative_gpu, 0)

            grads = average_gradients(tower_grads)
            apply_gradient_op = optimizer.apply_gradients(grads,
                                                          global_step=batch)

            variable_averages = tf.train.ExponentialMovingAverage(
                bn_decay, batch)
            variables_averages_op = variable_averages.apply(
                tf.trainable_variables())
            train_op = tf.group(apply_gradient_op, variables_averages_op)

        saver = tf.train.Saver(max_to_keep=11)

        # Create a session
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)

        # Add summary writers
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
                                             sess.graph)

        # Init variables
        init = tf.global_variables_initializer()
        sess.run(init, {is_training_placeholder: True})

        restore_epoch, checkpoint_path = model_utils.pre_load_checkpoint(
            LOG_DIR)
        global LOG_FOUT
        if restore_epoch == 0:
            LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
            LOG_FOUT.write(str(socket.gethostname()) + '\n')
            LOG_FOUT.write(str(FLAGS) + '\n')
        else:
            LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'a')
            saver.restore(sess, checkpoint_path)

        ops = {
            'pointclouds_pl': data_train_placeholder,
            'pointclouds_pl_positive': data_positive_train_placeholder,
            'pointclouds_pl_negative': data_negative_train_placeholder,
            'is_training_pl': is_training_placeholder,
            'buffer': cluster_mean_placeholder,
            'indics': indices_placeholder,
            'feature_anchor': feature_anchor,
            'net_anchor': net_anchor,
            'net_positive': net_positive,
            'net_negative': net_negative,
            'cluster_loss': cluster_loss,
            'contrastive_loss': contrastive_loss,
            'total_loss': total_loss,
            'l2_loss': l2_loss,
            'all_loss': all_loss,
            'train_op': train_op,
            'merged': merged,
            'step': batch,
        }

        feature_buffer = np.random.rand(file_size, 128)
        feature_buffer = trafo.unit_vector(feature_buffer, axis=1)  # normalize
        for epoch in tqdm(range(restore_epoch, MAX_EPOCH)):
            # for epoch in tqdm(range(MAX_EPOCH)):
            print('******* EPOCH %03d *******' % (epoch))
            sys.stdout.flush()

            ## clustering
            # cluster the memory buffer
            cluster_pred = SpectralClustering(
                n_clusters=cluster_num, gamma=1).fit_predict(feature_buffer)
            # cluster_pred = KMeans(n_clusters=cluster_num).fit_predict(feature_buffer)
            # cluster_pred = AgglomerativeClustering(n_clusters=cluster_num).fit_predict(feature_buffer)
            cluster_mean = np.zeros(shape=(cluster_num, 128))
            for cluster_idx in range(cluster_num):
                indices = np.where(cluster_pred == cluster_idx)[0]
                cluster_avg = np.mean(feature_buffer[indices, :], axis=0)
                cluster_mean[cluster_idx, :] = cluster_avg
            cluster_mean = trafo.unit_vector(cluster_mean, axis=1)

            train_one_epoch(sess, cluster_mean, cluster_pred, data, ops,
                            train_writer)

            ## get the feature buffer after each epoch
            list = range(pts_num)
            seed = random.sample(list, NUM_POINT)
            data_2048 = data[:, seed, :]
            num_batches = file_size // BATCH_SIZE
            feature_buffer = np.zeros(shape=(file_size, 128))
            for batch_idx in range(num_batches + 1):
                if batch_idx != num_batches:
                    start_idx = batch_idx * BATCH_SIZE
                    end_idx = (batch_idx + 1) * BATCH_SIZE
                else:
                    start_idx = file_size - BATCH_SIZE
                    end_idx = file_size
                data_input = data_2048[start_idx:end_idx, :, :]
                feed_dict = {
                    ops['pointclouds_pl']: data_input,
                    ops['is_training_pl']: False,
                }
                features = sess.run([ops['net_anchor']], feed_dict=feed_dict)
                feature_buffer[start_idx:end_idx, :] = features[0]

            if epoch % 50 == 0:
                # start_time = time.time()
                # tsne_visualization_color(feature_buffer, cluster_pred, palette, epoch)
                # end_time = time.time()
                # print 'tsne running time is %f' % (end_time-start_time)

                # start_time = time.time()
                # pca_visualization_color(feature_buffer, cluster_pred, palette, epoch)
                # end_time = time.time()
                # print 'pca running time is %f' % (end_time-start_time)

                # if epoch % 10 == 0:
                save_path = saver.save(sess,
                                       os.path.join(LOG_DIR, "model.ckpt"),
                                       global_step=epoch)
                print("Model saved in file: %s" % save_path)
Ejemplo n.º 17
0
    def test_hierarical_prediction(self, input_folder=None, save_path=None):
        _, restore_model_path = model_utils.pre_load_checkpoint(MODEL_DIR)
        logger.info(restore_model_path, bold=True)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        with tf.Session(config=config) as sess:
            tf_util.optimistic_restore(sess, restore_model_path)
            total_time = 0
            samples = glob(input_folder, recursive=True)
            samples.sort()
            # if len(samples)>100:
            #     samples = samples[:100]
            for point_path in samples:
                start = time.time()
                data = pc_util.load(point_path, count=NUM_SHAPE_POINT)
                num_shape_point = data.shape[0]
                data = data[:, 0:3]
                is_2D = np.all(data[:, 2] == 0)
                data, centroid, furthest_distance = pc_util.normalize_point_cloud(
                    data)
                if FLAGS.drop_out < 1:
                    idx = farthest_point_sample(
                        int(num_shape_point * FLAGS.drop_out),
                        data[np.newaxis, ...]).eval()[0]
                    data = data[idx, 0:3]
                if JITTER:
                    data = pc_util.jitter_perturbation_point_cloud(
                        data[np.newaxis, ...],
                        sigma=FLAGS.jitter_sigma,
                        clip=FLAGS.jitter_max,
                        is_2D=is_2D)
                    data = data[0, ...]
                ## get the edge information
                logger.info(os.path.basename(point_path))
                input_list, pred_list = self.pc_prediction(
                    data, sess, patch_num_ratio=PATCH_NUM_RATIO)
                end = time.time()
                print("total time: ", end - start)
                pred_pc = np.concatenate(pred_list, axis=0)
                pred_pc = (pred_pc * furthest_distance) + centroid
                data = (data * furthest_distance) + centroid
                folder = os.path.basename(os.path.dirname(point_path))
                path = os.path.join(save_path, folder,
                                    point_path.split('/')[-1][:-4] + '.ply')
                # pc_util.save_ply(pred_pc, path[:-4]+'_overlapped.ply')
                pc_util.save_ply(data, path[:-4] + '_input.ply')
                idx = farthest_point_sample(
                    int(num_shape_point * FLAGS.drop_out) * UP_RATIO,
                    pred_pc[np.newaxis, ...]).eval()[0]
                pred_pc = pred_pc[idx, 0:3]
                # pred_pc, _, _ = pc_util.normalize_point_cloud(pred_pc)
                # pred_pc = (pred_pc * furthest_distance) + centroid
                pc_util.save_ply(pred_pc, path[:-4] + '.ply')

                # if len(input_list) > 1:
                #     counter = 0
                #     for in_p, pred_p in zip(input_list, pred_list):
                #         pc_util.save_ply(in_p*furthest_distance+centroid, path[:-4]+"_input_patch_%d.ply" % counter)
                #         pc_util.save_ply(pred_p*furthest_distance+centroid, path[:-4]+"_pred_patch_%d.ply" % counter)
                #         counter += 1

            print(total_time / len(samples))