##################
# Train the GAN. #
##################
for _ in range(n_epochs):
    loss, duration = gan._single_epoch_train(latent_data, batch_size,
                                             noise_params)
    epoch = int(gan.sess.run(gan.increment_epoch))
    print epoch, loss

    if save_gan_model and epoch in saver_step:
        checkpoint_path = osp.join(train_dir, MODEL_SAVER_ID)
        gan.saver.save(gan.sess, checkpoint_path, global_step=gan.epoch)

    if save_synthetic_samples and epoch in saver_step:
        syn_latent_data = gan.generate(n_syn_samples, noise_params)
        syn_data = ae.decode(syn_latent_data)
        np.savez(osp.join(synthetic_data_out_dir, 'epoch_' + str(epoch)),
                 syn_data)
        # for k in range(3):  # plot three (syntetic) random examples.
        #     plot_3d_point_cloud(syn_data[k][:, 0], syn_data[k][:, 1], syn_data[k][:, 2],
        #                        in_u_sphere=True)

    train_stats.append((epoch, ) + loss)

# # if plot_train_curve:
# x = range(len(train_stats))
# d_loss = [t[1] for t in train_stats]
# g_loss = [t[2] for t in train_stats]
# plt.plot(x, d_loss, '--')
# plt.plot(x, g_loss)
# plt.title('Latent GAN training. (%s)' %(class_name))
Пример #2
0
num_pts_to_mask = [600]

l2_vecs = []

for j in num_pts_to_mask:
    lv_array = np.zeros([array_row_size, bneck_size])
    for i in range(num_iters):
        feed_pc, feed_model_names, _ = all_pc_data.next_batch(batch_size)
        # latent_codes = ae.transform(feed_pc) ##also might want to switch to encoder_with_convs_and_symmetry in ae_template, tho not necessary###
        latent_codes, x_masked, x = ae.transform_with_mask(feed_pc,
                                                           num_pts_removed=j,
                                                           mask_type=2)
        lv_array[i * batch_size:(i + 1) * batch_size, :] = latent_codes

    l2_vecs.append(lv_array[0])
    reconstructions = ae.decode(lv_array)
    pref = './recon_from_ac/'
    for k in range(5):
        write_ply(
            pref + "airplane_test_aerecon_" + str(j) + "_" + str(k) + "_.ply",
            reconstructions[k, :, :])
        write_ply(pref + "airplane_test_" + str(j) + "_gt_" + str(k) + "_.ply",
                  x[k, :, :])
        write_ply(
            pref + "airplane_test_" + str(j) + "_gtmasked_" + str(k) + "_.ply",
            x_masked[k, :, :])

np.savetxt(latent_vec_file, lv_array)  #uncomment to save masked lvs
# for i in range(len(l2_vecs)):
#     dist = np.linalg.norm(l2_vecs[i] - l2_vecs[0])
#     print("l2 dist betwen " + str(i) + " and 0 :" + str(dist))
    reconstructions = ae.reconstruct_with_mask(feed_pc)
    # shape2 = reconstructions[0][2,:,:]
    print "loss : " + str(reconstructions[1])

    # write_ply(pref+"airplane0_acrecon_upsampling.ply",reconstructions[0][0,:,:])
    # write_ply(pref+"airplane1_acrecon_upsampling.ply",reconstructions[0][1,:,:])
    # write_ply(pref+"airplane2_acrecon_upsampling.ply",reconstructions[0][2,:,:])
    # write_ply(pref+"airplane3_acrecon.ply",reconstructions[0][3,:,:])
    # write_ply(pref+"airplane4_acrecon.ply",reconstructions[0][4,:,:])
    # # pdb.set_trace()
    # print "reconstructed, shape:" + str(reconstructions.shape)
    # latent_codes = ae.transform(feed_pc)

else:
    print "reconstructing from lvs"
    pref = './recon_from_ac/'

    # lv_array  = np.loadtxt('/home/shubham/latent_3d_points/notebooks/cleaned_vector_0.01.txt')
    # lv_array  = np.loadtxt('/home/shubham/latent_3d_points/notebooks/cleaned_vector_test_0.01.txt')
    # lv_array  = np.loadtxt('/home/shubham/latent_3d_points/notebooks/test_lvs.txt') ##directly use input vecs
    # lv_array  = np.loadtxt('/home/shubham/latent_3d_points/data/single_class_ae/clean/lv_with_mask_5.txt') ##noisy vecs
    lv_array = np.loadtxt('cleaned_aefull_wgan_chd_0.001.txt')  ##noisy vecs
    lv_batch = lv_array

    reconstructions = ae.decode(lv_batch)
    for i in range(5):
        write_ply(pref + "airplane_test_wgan_chd0.001_" + str(i) + "_.ply",
                  reconstructions[i, :, :])

# Use any plotting mechanism such as matplotlib to visualize the results.
Пример #4
0
    #syn_id = snc_category_to_synth_id()[class_name]
    #class_dir = osp.join(top_in_dir , syn_id)
    class_dir = top_in_dir
    all_pc_data = load_all_point_clouds_under_folder(class_dir, n_threads=8, file_ending='.ply', verbose=True)



    reset_tf_graph()
    ae_conf = Conf.load(ae_configuration)
    ae_conf.encoder_args['verbose'] = False
    ae_conf.decoder_args['verbose'] = False
    ae = PointNetAutoEncoder(ae_conf.experiment_name, ae_conf)
    ae.restore_model(ae_conf.train_dir, restore_epoch, verbose=True)

    latent_codes = ae.get_latent_codes(all_pc_data.point_clouds)


    reconstructions = ae.decode(latent_codes)


    i = 1
    plot_3d_point_cloud(all_pc_data.point_clouds[i][:, 0],
                        all_pc_data.point_clouds[i][:, 1],
                        all_pc_data.point_clouds[i][:, 2], in_u_sphere=True);


    plot_3d_point_cloud(reconstructions[i][:, 0],
                        reconstructions[i][:, 1],
                        reconstructions[i][:, 2], in_u_sphere=True);
Пример #5
0
    reset_tf_graph()
    ae_configuration = model_dir + '/configuration'
    ae_conf = Conf.load(ae_configuration)
    ae_conf.encoder_args['verbose'] = False
    ae_conf.decoder_args['verbose'] = False
    ae = PointNetAutoEncoder(ae_conf.experiment_name, ae_conf)

    ae.restore_model(model_dir, restore_epoch, verbose=True)

    #latent_code = ae.transform(test_pcs[:1])
    latent_codes = ae.get_latent_codes(test_pcs)

    for pc_idx in range(len(latent_codes) - 1):
        a = latent_codes[pc_idx]
        b = latent_codes[pc_idx + 1]  #aug_latent_codes[0]
        diff = a - b
        steps = np.linspace(0.0, 1.0, num=9)
        interpolations = []
        for step in steps[:-1]:
            interpolations.append(a - step * diff)

        reconstructions = ae.decode(interpolations)

        for inter_id, rec in enumerate(reconstructions):
            #plot_3d_point_cloud(rec[:, 0], rec[:, 1], rec[:, 2], in_u_sphere=True)
            points2file(
                rec, "output/{}-{}_{}-{}.ply".format(pc_idx, pc_idx + 1,
                                                     inter_id,
                                                     len(reconstructions) - 1))
Пример #6
0
    Command:
        -p path/<filename>.npy
    """
    # construct the argument parser and parse the arguments
    '''
    ap = argparse.ArgumentParser()
    ap.add_argument("-e", "--enc", type=str,
                    help="numpy saved array")
    args = vars(ap.parse_args())

    enc = np.load(args["enc"])
    '''
    enc = np.load("output/{}_latent.npy".format(DATASET))
    names = np.load("output/{}_names.npy".format(DATASET))

    reset_tf_graph()
    ae_configuration = MODEL_DIR + '/configuration'
    ae_conf = Conf.load(ae_configuration)
    ae_conf.encoder_args['verbose'] = False
    ae_conf.decoder_args['verbose'] = False
    ae = PointNetAutoEncoder(ae_conf.experiment_name, ae_conf)

    ae.restore_model(MODEL_DIR, RESTORE_EPOCH, verbose=True)

    reconstructions = ae.decode(enc)

    for id in range(0, 3):
        #for id in range(0,len(reconstructions)):
        print(names[id])
        points2file(reconstructions[id], "output/rec_{}".format(names[id]))
Пример #7
0
            batch_size = train_params['batch_size'],
            denoising = train_params['denoising'],
            learning_rate = train_params['learning_rate'],
            loss_display_step = train_params['loss_display_step'],
            saver_step = train_params['saver_step'],
            z_rotate = train_params['z_rotate'],
            train_dir = train_dir,
            encoder = encoder,
            decoder = decoder,
            encoder_args = enc_args,
            decoder_args = dec_args
           )
conf.experiment_name = experiment_name
conf.held_out_step = 5    

reset_tf_graph()
pdb.set_trace()
ae = PointNetAutoEncoder(conf.experiment_name, conf)
ae.restore_model(model_dir, 500)

model_path = '../data/lgan_plane/G_network_299.pth'
g = torch.load(model_path)
batch_size=50

for i in xrange(100):
    noise=Variable(torch.cuda.FloatTensor(batch_size, 128))
    generate_noise(noise)
    fake_x = g(noise).data.cpu().numpy()
    fake_pc = ae.decode(fake_x)
    print fake_pc.shape