syn_id = snc_category_to_synth_id()[class_name[0]]
class_dir = osp.join(top_in_dir, syn_id)
_, _, pc_data_test = load_and_split_all_point_clouds_under_folder(
    class_dir, n_threads=8, file_ending='.ply', verbose=True)

for i in range(1, len(class_name)):
    syn_id = snc_category_to_synth_id()[class_name[i]]
    class_dir = osp.join(top_in_dir, syn_id)
    _, _, pc_data_test_curr = load_and_split_all_point_clouds_under_folder(
        class_dir, n_threads=8, file_ending='.ply', verbose=True)
    pc_data_test.merge(pc_data_test_curr)

# Load configuration
train_dir = osp.join(top_out_dir, flags.train_folder)
restore_epoch = 500
conf = Conf.load(osp.join(train_dir, 'configuration'))

conf.encoder_args['return_layer_before_symmetry'] = True

# Reload a saved model
reset_tf_graph()
ae = SNetPointNetAutoEncoder(conf.experiment_name, conf)
ae.restore_model(train_dir, epoch=restore_epoch, verbose=True)

n_sample_points = conf.n_samp[0]

# create evaluation dir
eval_dir = create_dir(osp.join(train_dir, "eval"))

# sample point clouds
_, sampled_pc, sample_idx, _ = ae.get_samples(
for i in range(1, len(class_name)):
    syn_id = snc_category_to_synth_id()[class_name[i]]
    class_dir = osp.join(top_in_dir, syn_id)
    pc_data_train_curr, pc_data_val_curr, _ = load_and_split_all_point_clouds_under_folder(
        class_dir, n_threads=8, file_ending='.ply', verbose=True)
    pc_data_train.merge(pc_data_train_curr)
    pc_data_val.merge(pc_data_val_curr)

if flags.object_class == 'multi':
    pc_data_train.shuffle_data(seed=55)
    pc_data_val.shuffle_data(seed=55)

ae_dir = osp.join(top_out_dir, flags.ae_folder)

# Load autoencoder configuration
conf = Conf.load(osp.join(ae_dir, 'configuration'))

# Update autoencoder configuration
conf.ae_dir = ae_dir
conf.ae_name = 'autoencoder'
conf.restore_ae = flags.restore_ae
conf.ae_restore_epoch = 500
conf.fixed_ae = flags.fixed_ae
if conf.fixed_ae:
    conf.encoder_args[
        'b_norm_decay'] = 1.  # for avoiding the update of batch normalization moving_mean and moving_variance parameters
    conf.decoder_args[
        'b_norm_decay'] = 1.  # for avoiding the update of batch normalization moving_mean and moving_variance parameters
    conf.decoder_args[
        'b_norm_decay_finish'] = 1.  # for avoiding the update of batch normalization moving_mean and moving_variance parameters
class_dir = osp.join(top_in_dir, syn_id)
_, _, pc_data_test = load_and_split_all_point_clouds_under_folder(
    class_dir, n_threads=8, file_ending=".ply", verbose=True
)

for i in range(1, len(class_name)):
    syn_id = snc_category_to_synth_id()[class_name[i]]
    class_dir = osp.join(top_in_dir, syn_id)
    _, _, pc_data_test_curr = load_and_split_all_point_clouds_under_folder(
        class_dir, n_threads=8, file_ending=".ply", verbose=True
    )
    pc_data_test.merge(pc_data_test_curr)

# load configuration
train_dir = osp.join(top_out_dir, flags.train_folder)
conf = Conf.load(osp.join(train_dir, "configuration"))

# update configuration
conf.use_batch_size_for_place_holder = False
conf.encoder_args["return_layer_before_symmetry"] = False
conf.hard_projection = flags.hard_projection
conf.pc_size = [flags.n_sample_points]
conf.n_samp = [flags.n_sample_points, 3]

# reload a saved model
reset_tf_graph()
ae = SampleNetProgressivePointNetAE(conf.experiment_name, conf)
ae.restore_model(train_dir, epoch=flags.restore_epoch, verbose=True)

n_input_points = conf.n_input[0]
n_sample_points = conf.n_samp[0]
class_dir = osp.join(top_in_dir, syn_id)
_, _, pc_data_test = load_and_split_all_point_clouds_under_folder(
    class_dir, n_threads=8, file_ending=".ply", verbose=True
)

for i in range(1, len(class_name)):
    syn_id = snc_category_to_synth_id()[class_name[i]]
    class_dir = osp.join(top_in_dir, syn_id)
    _, _, pc_data_test_curr = load_and_split_all_point_clouds_under_folder(
        class_dir, n_threads=8, file_ending=".ply", verbose=True
    )
    pc_data_test.merge(pc_data_test_curr)

# load train configuration
train_dir = create_dir(osp.join(top_out_dir, flags.train_folder))
conf = Conf.load(train_dir + "/configuration")
conf.use_fps = flags.use_fps
conf.n_sample_points = flags.n_sample_points

# build AE Model
reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)

# reload a saved model
ae.restore_model(train_dir, epoch=flags.restore_epoch, verbose=True)

n_sample_points = flags.n_sample_points

# create evaluation dir
eval_dir = create_dir(osp.join(train_dir, "eval"))
Esempio n. 5
0
        pc_data_val_curr,
        _,
    ) = load_and_split_all_point_clouds_under_folder(class_dir,
                                                     n_threads=8,
                                                     file_ending=".ply",
                                                     verbose=True)
    pc_data_train.merge(pc_data_train_curr)
    pc_data_val.merge(pc_data_val_curr)

if flags.object_class == "multi":
    pc_data_train.shuffle_data(seed=55)
    pc_data_val.shuffle_data(seed=55)

# load autoencoder configuration
ae_dir = osp.join(top_out_dir, flags.ae_folder)
conf = Conf.load(osp.join(ae_dir, "configuration"))

# update autoencoder configuration
conf.ae_dir = ae_dir
conf.ae_name = "autoencoder"
conf.restore_ae = flags.restore_ae
conf.ae_restore_epoch = flags.ae_restore_epoch
conf.fixed_ae = flags.fixed_ae
if conf.fixed_ae:
    conf.encoder_args[
        "b_norm_decay"] = 1.0  # for avoiding the update of batch normalization moving_mean and moving_variance parameters
    conf.decoder_args[
        "b_norm_decay"] = 1.0  # for avoiding the update of batch normalization moving_mean and moving_variance parameters
    conf.decoder_args[
        "b_norm_decay_finish"] = 1.0  # for avoiding the update of batch normalization moving_mean and moving_variance parameters
Esempio n. 6
0
# load default training parameters
train_params = default_train_params()

encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(
    n_pc_points, bneck_size)

train_dir = create_dir(osp.join(top_out_dir, flags.train_folder))

conf = Conf(
    n_input=[n_pc_points, 3],
    loss=ae_loss,
    training_epochs=train_params["training_epochs"],
    batch_size=train_params["batch_size"],
    denoising=train_params["denoising"],
    learning_rate=train_params["learning_rate"],
    train_dir=train_dir,
    loss_display_step=train_params["loss_display_step"],
    saver_step=train_params["saver_step"],
    z_rotate=train_params["z_rotate"],
    encoder=encoder,
    decoder=decoder,
    encoder_args=enc_args,
    decoder_args=dec_args,
)
conf.experiment_name = experiment_name
conf.held_out_step = 5  # how often to evaluate/print(out loss on)
# held_out data (if they are provided in ae.train()).
conf.class_name = class_name
conf.use_fps = flags.use_fps
conf.n_sample_points = flags.n_sample_points
conf.n_samp_out = [2048, 3]
conf.training_epochs = flags.training_epochs
Esempio n. 7
0
# Load default training parameters
train_params = default_train_params()

encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(
    n_pc_points, bneck_size)

train_dir = create_dir(osp.join(top_out_dir, flags.train_folder))

conf = Conf(n_input=[n_pc_points, 3],
            loss=ae_loss,
            training_epochs=train_params['training_epochs'],
            batch_size=train_params['batch_size'],
            denoising=train_params['denoising'],
            learning_rate=train_params['learning_rate'],
            train_dir=train_dir,
            loss_display_step=train_params['loss_display_step'],
            saver_step=train_params['saver_step'],
            z_rotate=train_params['z_rotate'],
            encoder=encoder,
            decoder=decoder,
            encoder_args=enc_args,
            decoder_args=dec_args)
conf.experiment_name = experiment_name
conf.held_out_step = 5  # How often to evaluate/print out loss on
# held_out data (if they are provided in ae.train()).
conf.class_name = class_name
conf.use_fps = flags.use_fps
conf.n_sample_points = flags.n_sample_points
conf.n_samp_out = [2048, 3]
conf.save(osp.join(train_dir, 'configuration'))