###############################################################################################

###############################################################################################
normalize_shape = args.normalize_shape
ae_loss = 'chamfer'  # Which distance to use for the matchings.
batch_size = 100  # Find appropriate number that fits in GPU.
normalize = True  # Matched distances are divided by the number of points of thepoint-clouds.
top_in_dir = args.dataset_dir
class_name = args.class_name
syn_id = snc_category_to_synth_id()[class_name]
class_dir = osp.join(top_in_dir, syn_id, 'val')
###############################################################################################

all_pc_data = load_all_point_clouds_under_folder(class_dir,
                                                 n_threads=8,
                                                 file_ending='.npy',
                                                 verbose=True,
                                                 normalize=normalize_shape)

all_ids = np.arange(all_pc_data.num_examples)
print("Lane of all data ids:%d" % len(all_ids))

pidxs = np.random.choice(range(15000), 2048 * 2, replace=False)
tr_idxs = pidxs[:2048]
te_idxs = pidxs[2048:]
ref_pcs = all_pc_data.point_clouds[:, tr_idxs, :]
sample_pcs = all_pc_data.point_clouds[:, te_idxs, :]

print("Dump the output so that we can use other codes to evaluate it :(")
np.save(args.ref_outfname % args.class_name, ref_pcs)
np.save(args.smp_outfname % args.class_name, sample_pcs)
experiment_name = 'test'
n_pc_points = 2048  # Number of points per model.
bneck_size = 128  # Bottleneck-AE size
ae_loss = 'chamfer'  # Loss to optimize: 'emd' or 'chamfer'
# class_name = raw_input('Give me the class name (e.g. "chair"): ').lower()
class_name = 'chair'

# Load Point-Clouds

# In[5]:

syn_id = snc_category_to_synth_id()[class_name]
class_dir = osp.join(top_in_dir, syn_id)
all_pc_data = load_all_point_clouds_under_folder(class_dir,
                                                 n_threads=8,
                                                 file_ending='.ply',
                                                 verbose=True)

# Load default training parameters (some of which are listed beloq). For more details please print the configuration object.
#
#     'batch_size': 50
#
#     'denoising': False     (# by default AE is not denoising)
#
#     'learning_rate': 0.0005
#
#     'z_rotate': False      (# randomly rotate models of each batch)
#
#     'loss_display_step': 1 (# display loss at end of these many epochs)
#     'saver_step': 10       (# over how many epochs to save neural-network)
syn_id = snc_category_to_synth_id()[class_name]
class_dir = osp.join(top_in_dir, syn_id, 'train')
print(syn_id)
print(class_dir)

if args.split_file is not None:
    file_names = np.load(args.split_file).item()[syn_id]['train']
    file_names = [
        os.path.join(args.dataset_dir, syn_id, f + ".npy") for f in file_names
    ]
else:
    file_names = None
all_pc_data = load_all_point_clouds_under_folder(
    class_dir,
    n_threads=8,
    file_ending='.npy',
    max_num_points=2048,
    verbose=True,
    normalize=args.normalize_shape,
    file_names=file_names)
print 'Shape of DATA =', all_pc_data.point_clouds.shape

#######################
# Load pre-trained AE #
#######################
reset_tf_graph()
ae_conf = Conf.load(ae_configuration)
ae_conf.encoder_args['verbose'] = False
ae_conf.decoder_args['verbose'] = False
ae = PointNetAutoEncoder(ae_conf.experiment_name, ae_conf)
ae.restore_model(ae_conf.train_dir, ae_epoch, verbose=True)
Пример #4
0
        -p path/<filename>.npy
    """
    # construct the argument parser and parse the arguments
    #ap = argparse.ArgumentParser()
    #ap.add_argument("-l", "--list", type=str,
    #                help="list of point cloud files")
    #args = vars(ap.parse_args())

    top_out_dir = '../data/'  # Use to save Neural-Net check-points etc.

    experiment_name = 'raw_ear_gan_with_w_gan_loss'
    n_pc_points = 2048  # Number of points per model.

    all_pc_data = load_all_point_clouds_under_folder(
        '/home/dmri/Documents/github/latent_3d_points/data/ear_data/unordered/',
        n_threads=8,
        file_ending='.ply',
        verbose=True)

    use_wgan = True  # Wasserstein with gradient penalty, or not?
    n_epochs = 6000  # Epochs to train.

    plot_train_curve = True
    save_gan_model = True
    saver_step = np.array(
        [10, 20, 30, 40, 50, 100, 1000, 2500, 4000, 6000, 10000, 20000])

    # If true, every 'saver_step' epochs we produce & save synthetic pointclouds.
    save_synthetic_samples = True
    # How many synthetic samples to produce at each save step.
    n_syn_samples = all_pc_data.num_examples
Пример #5
0
n_pc_points = int(sys.argv[2])  #600  # Number of points per model.
bneck_size = 128  # Bottleneck-AE size
ae_loss = sys.argv[
    3]  #'chamfer'                             # Loss to optimize: 'emd' or 'chamfer'
class_name = sys.argv[4]  #'airplane'
z_rotate = sys.argv[5]  # 'True' or 'False'
fixed_points = sys.argv[6]  #'True' or 'False'

syn_id = snc_category_to_synth_id()[class_name]
class_dir = osp.join(top_in_dir, syn_id)

# point cloud instance
train_pc, val_pc, test_pc = load_all_point_clouds_under_folder(
    class_dir,
    n_threads=2,
    file_ending='.ply',
    verbose=True,
    fixed_points=fixed_points == 'True',
    num_points=n_pc_points)
train_dir = create_dir(osp.join(top_out_dir, 'train_' + experiment_name))
val_dir = create_dir(osp.join(top_out_dir, 'val_' + experiment_name))
test_dir = create_dir(osp.join(top_out_dir, 'test_' + experiment_name))

pickle_data(osp.join(train_dir, 'train_pc.pkl'), train_pc)
pickle_data(osp.join(val_dir, 'val_pc.pkl'), val_pc)
pickle_data(osp.join(test_dir, 'test_pc.pkl'), test_pc)

# dictionary
train_params = default_train_params()
encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(
    n_pc_points, bneck_size)
Пример #6
0
#####################
print("Load data (train set)")
tr_shape_lst = []
te_shape_lst = []
tr_lbl = []
te_lbl = []
class_lst = []
for i, f in enumerate(os.listdir(top_in_dir)):
    # Train
    tr_class_dir = os.path.join(top_in_dir, f, 'train')
    if not os.path.isdir(tr_class_dir):
        continue
    class_lst.append(f)

    all_tr_pc_data = load_all_point_clouds_under_folder(
        tr_class_dir, n_threads=8, file_ending='.npy', max_num_points=n_pc_points,
        verbose=True, normalize=args.normalize_shape, rotation_axis=None
    )
    tr_pc, _, _ = all_tr_pc_data.full_epoch_data()
    N = tr_pc.shape[0]
    tr_shape_lst.append(tr_pc)
    for _ in range(N):
        tr_lbl.append(i)

    # Test
    te_class_dir = os.path.join(top_in_dir, f, 'test')
    all_te_pc_data = load_all_point_clouds_under_folder(
        te_class_dir, n_threads=8, file_ending='.npy', max_num_points=n_pc_points,
        verbose=True, normalize=args.normalize_shape, rotation_axis=None
    )
    te_pc, _, _ = all_te_pc_data.full_epoch_data()
    M = te_pc.shape[0]
Пример #7
0
from latent_3d_points.src.general_utils import plot_3d_point_cloud

TRAIN = True
load_pre_trained_ae = False
restore_epoch = 0

if __name__ == '__main__':
    top_out_dir = '../data/'  # Use to save Neural-Net check-points etc.
    n_pc_points = 1024  # Number of points per model.
    bneck_size = 16  # Bottleneck-AE size
    ae_loss = 'emd'  # Loss to optimize: 'emd' or 'chamfer'
    experiment_name = 'kin_laying_{}_{}_{}'.format(ae_loss, n_pc_points,
                                                   bneck_size)
    train_pc_data = load_all_point_clouds_under_folder(
        '/home/dmri/datasets/in_use/train_1024/',
        n_threads=8,
        file_ending='.ply',
        verbose=True)
    val_pc_data = load_all_point_clouds_under_folder(
        '/home/dmri/datasets/in_use/val_1024/',
        n_threads=8,
        file_ending='.ply',
        verbose=True)
    print("batch size should be < {} and {}".format(train_pc_data.num_examples,
                                                    val_pc_data.num_examples))

    if TRAIN:
        train_params = default_train_params(single_class=False)
        encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18_small(
            n_pc_points, bneck_size)
        train_dir = create_dir(osp.join(top_out_dir, experiment_name))