for i in range(1, len(class_name)):
    syn_id = snc_category_to_synth_id()[class_name[i]]
    class_dir = osp.join(top_in_dir, syn_id)
    pc_data_train_curr, pc_data_val_curr, _ = load_and_split_all_point_clouds_under_folder(
        class_dir, n_threads=8, file_ending='.ply', verbose=True)
    pc_data_train.merge(pc_data_train_curr)
    pc_data_val.merge(pc_data_val_curr)

if flags.object_class == 'multi':
    pc_data_train.shuffle_data(seed=55)
    pc_data_val.shuffle_data(seed=55)

ae_dir = osp.join(top_out_dir, flags.ae_folder)

# Load autoencoder configuration
conf = Conf.load(osp.join(ae_dir, 'configuration'))

# Update autoencoder configuration
conf.ae_dir = ae_dir
conf.ae_name = 'autoencoder'
conf.restore_ae = flags.restore_ae
conf.ae_restore_epoch = 500
conf.fixed_ae = flags.fixed_ae
if conf.fixed_ae:
    conf.encoder_args[
        'b_norm_decay'] = 1.  # for avoiding the update of batch normalization moving_mean and moving_variance parameters
    conf.decoder_args[
        'b_norm_decay'] = 1.  # for avoiding the update of batch normalization moving_mean and moving_variance parameters
    conf.decoder_args[
        'b_norm_decay_finish'] = 1.  # for avoiding the update of batch normalization moving_mean and moving_variance parameters
syn_id = snc_category_to_synth_id()[class_name[0]]
class_dir = osp.join(top_in_dir, syn_id)
_, _, pc_data_test = load_and_split_all_point_clouds_under_folder(
    class_dir, n_threads=8, file_ending='.ply', verbose=True)

for i in range(1, len(class_name)):
    syn_id = snc_category_to_synth_id()[class_name[i]]
    class_dir = osp.join(top_in_dir, syn_id)
    _, _, pc_data_test_curr = load_and_split_all_point_clouds_under_folder(
        class_dir, n_threads=8, file_ending='.ply', verbose=True)
    pc_data_test.merge(pc_data_test_curr)

# Load configuration
train_dir = osp.join(top_out_dir, flags.train_folder)
restore_epoch = 500
conf = Conf.load(osp.join(train_dir, 'configuration'))

conf.encoder_args['return_layer_before_symmetry'] = True

# Reload a saved model
reset_tf_graph()
ae = SNetPointNetAutoEncoder(conf.experiment_name, conf)
ae.restore_model(train_dir, epoch=restore_epoch, verbose=True)

n_sample_points = conf.n_samp[0]

# create evaluation dir
eval_dir = create_dir(osp.join(train_dir, "eval"))

# sample point clouds
_, sampled_pc, sample_idx, _ = ae.get_samples(
class_dir = osp.join(top_in_dir, syn_id)
_, _, pc_data_test = load_and_split_all_point_clouds_under_folder(
    class_dir, n_threads=8, file_ending=".ply", verbose=True
)

for i in range(1, len(class_name)):
    syn_id = snc_category_to_synth_id()[class_name[i]]
    class_dir = osp.join(top_in_dir, syn_id)
    _, _, pc_data_test_curr = load_and_split_all_point_clouds_under_folder(
        class_dir, n_threads=8, file_ending=".ply", verbose=True
    )
    pc_data_test.merge(pc_data_test_curr)

# load train configuration
train_dir = create_dir(osp.join(top_out_dir, flags.train_folder))
conf = Conf.load(train_dir + "/configuration")
conf.use_fps = flags.use_fps
conf.n_sample_points = flags.n_sample_points

# build AE Model
reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)

# reload a saved model
ae.restore_model(train_dir, epoch=flags.restore_epoch, verbose=True)

n_sample_points = flags.n_sample_points

# create evaluation dir
eval_dir = create_dir(osp.join(train_dir, "eval"))
class_dir = osp.join(top_in_dir, syn_id)
_, _, pc_data_test = load_and_split_all_point_clouds_under_folder(
    class_dir, n_threads=8, file_ending=".ply", verbose=True
)

for i in range(1, len(class_name)):
    syn_id = snc_category_to_synth_id()[class_name[i]]
    class_dir = osp.join(top_in_dir, syn_id)
    _, _, pc_data_test_curr = load_and_split_all_point_clouds_under_folder(
        class_dir, n_threads=8, file_ending=".ply", verbose=True
    )
    pc_data_test.merge(pc_data_test_curr)

# load configuration
train_dir = osp.join(top_out_dir, flags.train_folder)
conf = Conf.load(osp.join(train_dir, "configuration"))

# update configuration
conf.use_batch_size_for_place_holder = False
conf.encoder_args["return_layer_before_symmetry"] = False
conf.hard_projection = flags.hard_projection
conf.pc_size = [flags.n_sample_points]
conf.n_samp = [flags.n_sample_points, 3]

# reload a saved model
reset_tf_graph()
ae = SampleNetProgressivePointNetAE(conf.experiment_name, conf)
ae.restore_model(train_dir, epoch=flags.restore_epoch, verbose=True)

n_input_points = conf.n_input[0]
n_sample_points = conf.n_samp[0]
Esempio n. 5
0
        pc_data_val_curr,
        _,
    ) = load_and_split_all_point_clouds_under_folder(class_dir,
                                                     n_threads=8,
                                                     file_ending=".ply",
                                                     verbose=True)
    pc_data_train.merge(pc_data_train_curr)
    pc_data_val.merge(pc_data_val_curr)

if flags.object_class == "multi":
    pc_data_train.shuffle_data(seed=55)
    pc_data_val.shuffle_data(seed=55)

# load autoencoder configuration
ae_dir = osp.join(top_out_dir, flags.ae_folder)
conf = Conf.load(osp.join(ae_dir, "configuration"))

# update autoencoder configuration
conf.ae_dir = ae_dir
conf.ae_name = "autoencoder"
conf.restore_ae = flags.restore_ae
conf.ae_restore_epoch = flags.ae_restore_epoch
conf.fixed_ae = flags.fixed_ae
if conf.fixed_ae:
    conf.encoder_args[
        "b_norm_decay"] = 1.0  # for avoiding the update of batch normalization moving_mean and moving_variance parameters
    conf.decoder_args[
        "b_norm_decay"] = 1.0  # for avoiding the update of batch normalization moving_mean and moving_variance parameters
    conf.decoder_args[
        "b_norm_decay_finish"] = 1.0  # for avoiding the update of batch normalization moving_mean and moving_variance parameters