def GAN_cleaner(latent_vec=None,
                masked_cloud=None,
                ae=None,
                gt=None,
                num_epochs=20000):

    # latent_vec = np.loadtxt('/home/shubham/latent_3d_points/data/single_class_ae/clean/lv_with_mask_5.txt')
    # latent_vec = np.loadtxt('/home/shubham/latent_3d_points/notebooks/test_lvs.txt')
    # if(latent_vec is None):
    # 	latent_vec = np.loadtxt('/home/shubham/latent_3d_points/notebooks/gt_noisy_airplane_full.txt')

    bneck_size = latent_vec.shape[1]
    latent_vec = latent_vec[:10]
    batch_size = latent_vec.shape[0]
    if (masked_cloud is None):
        print("Error, masked clouds not given")
        # exit()
    # latent_vec_class = latent_dataset(latent_vec)
    latentgan = LatentGAN(name='latentgan',
                          learning_rate=0.0001,
                          n_output=[bneck_size],
                          noise_dim=128,
                          discriminator=discriminator,
                          generator=generator,
                          beta=0.9,
                          batch_size=batch_size,
                          masked_cloud_size=masked_cloud.shape[1],
                          ae=ae)

    #(d_loss, g_loss), time = latentgan._single_epoch_train(latent_vec,masked_cloud,epoch = num_epochs,
    latentgan._single_epoch_train(
        latent_vec,
        masked_cloud,
        gt,
        epoch=num_epochs,
        save_path=
        '/home/swami/deeprl/latent_3d_points/data/gan_model/wgan_ae_train',
        restore_epoch='1599')
    # import pdb
    # pdb.set_trace()
    feed_dict = None
    decodes, noise = latentgan.sess.run(
        [latentgan.gen_reconstr, latentgan.noise], feed_dict=feed_dict)

    from latent_3d_points.src.IO import write_ply
    pref = './recon_from_ac/'
    # pdb.set_trace()
    for i in range(5):
        write_ply(pref + "airplane_wgan_test" + str(i) + "_.ply",
                  decodes[i, :, :])
Exemplo n.º 2
0
for j in num_pts_to_mask:
    lv_array = np.zeros([array_row_size, bneck_size])
    for i in range(num_iters):
        feed_pc, feed_model_names, _ = all_pc_data.next_batch(batch_size)
        # latent_codes = ae.transform(feed_pc) ##also might want to switch to encoder_with_convs_and_symmetry in ae_template, tho not necessary###
        latent_codes, x_masked, x = ae.transform_with_mask(feed_pc,
                                                           num_pts_removed=j,
                                                           mask_type=2)
        lv_array[i * batch_size:(i + 1) * batch_size, :] = latent_codes

    l2_vecs.append(lv_array[0])
    reconstructions = ae.decode(lv_array)
    pref = './recon_from_ac/'
    for k in range(5):
        write_ply(
            pref + "airplane_test_aerecon_" + str(j) + "_" + str(k) + "_.ply",
            reconstructions[k, :, :])
        write_ply(pref + "airplane_test_" + str(j) + "_gt_" + str(k) + "_.ply",
                  x[k, :, :])
        write_ply(
            pref + "airplane_test_" + str(j) + "_gtmasked_" + str(k) + "_.ply",
            x_masked[k, :, :])

np.savetxt(latent_vec_file, lv_array)  #uncomment to save masked lvs
# for i in range(len(l2_vecs)):
#     dist = np.linalg.norm(l2_vecs[i] - l2_vecs[0])
#     print("l2 dist betwen " + str(i) + " and 0 :" + str(dist))
#     dist = np.linalg.norm(l2_vecs[i] - l2_vecs[0],ord=1)
#     print("l1: "+ str(dist))

clean_with_gan_and_reconstruct = True
num_iters = int(math.ceil(num_input / float(batch_size)))
array_row_size = int(num_iters * batch_size)
print "lv num rows:" + str(array_row_size)
lv_array = np.zeros([array_row_size, bneck_size])
for i in range(num_iters):
    feed_pc, feed_model_names, _ = all_pc_data.next_batch(batch_size)
    # latent_codes = ae.transform(feed_pc) ##also might want to switch to encoder_with_convs_and_symmetry in ae_template, tho not necessary###
    latent_codes, mask, noise = ae.transform(feed_pc)
    lv_array[i * batch_size:(i + 1) * batch_size, :] = latent_codes

np.savetxt(latent_vec_file, lv_array)
print("Latent codes:")
print(str(latent_codes))
print(mask)
pdb.set_trace()

# pdb.set_trace()

reconstructions = ae.reconstruct(feed_pc)
# shape2 = reconstructions[0][2,:,:]
# print "loss : " + str(reconstructions[1])
write_ply("airplane_ae.ply", reconstructions[0][1, :, :])
write_ply("airplane_ae2.ply", reconstructions[0][2, :, :])
write_ply("airplane_ae3.ply", reconstructions[0][3, :, :])
# write_ply("airplane4.ply",reconstructions[0][4,:,:])
# pdb.set_trace()
# print "reconstructed, shape:" + str(reconstructions.shape)

# Use any plotting mechanism such as matplotlib to visualize the results.
Exemplo n.º 4
0
    def _single_epoch_train(self,
                            batch,
                            masked_cloud,
                            epoch,
                            save_path='../data/gan_model/',
                            restore_epoch='99',
                            lc_weight=0.01):
        '''
        see: http://blog.aylien.com/introduction-generative-adversarial-networks-code-tensorflow/
             http://wiseodd.github.io/techblog/2016/09/17/gan-tensorflow/
        '''
        self.saver.restore(self.sess, save_path + '-' + restore_epoch)

        epoch_loss_l2 = 0.
        epoch_loss_g = 0.
        start_time = time.time()
        # final_lc_weightt =0.01

        is_training(True, session=self.sess)
        # lc_wt_mat = [0.0001, 0.0005,0.001,0.005,0.01,0.05]
        lc_wt_mat = [0.001]  #,0.1,0.5]
        g_losses = []
        l2_losses = []
        chd_losses = []
        norm_losses = []

        # for i in xrange(epoch):
        #     feed_dict = {self.gt_data: batch, self.lc_wt: 5}
        #     loss_l2, _,noise_params = self.sess.run([self.loss_l2, self.opt_l2,self.noise_params[0]], feed_dict=feed_dict)
        #     print("l2_loss:" + str(loss_l2))

        for l in lc_wt_mat:
            # self.sess.run(tf.assign(self.noise_params[0],noise_params))
            self.sess.run(tf.variables_initializer(self.noise_params))

            try:
                # Loop over all batches
                is_training(True, session=self.sess)
                lc_wt = np.linspace(l, l / 10.0, epoch)
                for i in xrange(20000):
                    #pdb.set_trace()
                    feed_dict = {
                        self.gt_data: batch,
                        self.lc_wt: lc_wt[i],
                        self.masked_cloud: masked_cloud
                    }
                    loss_g, loss_l2, _, loss_norm = self.sess.run(
                        [
                            self.loss_g, self.loss_l2, self.opt_g,
                            self.loss_norm
                        ],
                        feed_dict=feed_dict)
                    if i % 1000 == 0:
                        print("l2 loss:" + str(loss_l2) + " g_loss:" +
                              str(loss_g) + " loss norm:" + str(loss_norm)
                              )  # + " loss chamfer:" + str(loss_chd) )
                reconstructions = self.sess.run(self.gen_reconstr,
                                                feed_dict=feed_dict)
                from sklearn.neighbors import NearestNeighbors as NN
                x_masked_recon = np.zeros(reconstructions.shape)
                pref = './recon_from_ac/'
                for k in range(10):  #was originally 5 ?
                    recons = reconstructions[k, :, :]
                    for pt in masked_cloud[k, :, :]:
                        nbrs = NN(n_neighbors=1,
                                  algorithm='kd_tree').fit(recons)
                        distances, indx = nbrs.kneighbors(np.expand_dims(
                            pt, 0))
                        recons = np.delete(recons, indx, 0)
                    #pdb.set_trace()
                    x_masked_recon[k, :, :] = np.concatenate(
                        [masked_cloud[k, :, :], recons], axis=0)
                for k in range(5):
                    write_ply(
                        pref + "airplane_test_" + str(0) + "_mixedmasked_" +
                        str(k) + "_.ply", x_masked_recon[k, :, :])

                mixed_masked = x_masked_recon
                for i in xrange(epoch):
                    #pdb.set_trace()
                    feed_dict = {
                        self.gt_data: batch,
                        self.lc_wt: lc_wt[i],
                        self.masked_cloud: mixed_masked
                    }
                    loss_g, loss_l2, _, loss_chd, loss_norm = self.sess.run(
                        [
                            self.loss_g, self.loss_l2, self.opt_cd,
                            self.loss_chd, self.loss_norm
                        ],
                        feed_dict=feed_dict)
                    if i % 1000 == 0:
                        print("l2 loss:" + str(loss_l2) + " g_loss:" +
                              str(loss_g) + "loss chamfer:" + str(loss_chd) +
                              " loss norm:" + str(loss_norm))

                    if (i % 10000 == 0 and i > 0):
                        ##every 10k epochs, update target##
                        print("Updating target point cloud...")
                        feed_dict = {
                            self.gt_data: batch,
                            self.lc_wt: lc_wt[i],
                            self.masked_cloud: mixed_masked
                        }
                        reconstructions = self.sess.run(self.gen_reconstr,
                                                        feed_dict=feed_dict)
                        for k in range(10):
                            recons = reconstructions[k, :, :]
                            for pt in masked_cloud[k, :, :]:
                                nbrs = NN(n_neighbors=1,
                                          algorithm='kd_tree').fit(recons)
                                distances, indx = nbrs.kneighbors(
                                    np.expand_dims(pt, 0))
                                recons = np.delete(recons, indx, 0)

                            mixed_masked[k, :, :] = np.concatenate(
                                [masked_cloud[k, :, :], recons], axis=0)

                    # Compute average loss
                    epoch_loss_l2 += loss_l2
                    epoch_loss_g += loss_g

                cleaned_vector = self.sess.run(self.generator_out)
                is_training(False, session=self.sess)

            except Exception:
                raise
            g_losses.append(loss_g)
            l2_losses.append(loss_l2)
            chd_losses.append(loss_chd)
            norm_losses.append(loss_norm)

            save_path = 'cleaned_aefull_wgan_chd_' + str(l) + '.txt'
            np.savetxt(save_path, cleaned_vector)
            print("cleaned vecs saved to " + save_path)

        print("final losses:")

        for i, l in enumerate(lc_wt_mat):
            print(l, "l2", l2_losses[i], "g_loss", g_losses[i], "chd_loss",
                  chd_losses[i], "norm_losses", norm_losses[i])
        if True:
            print "reconstructing from lvs"
            pref = './recon_from_ac/'
            reconstructions = self.sess.run(self.gen_reconstr)
            for i in range(10):  #save all 10 outputs
                write_ply(
                    pref + "airplane_test_wgan_chd" + str(l) + "_" + str(i) +
                    "_.ply", reconstructions[i, :, :])
        epoch_loss_d /= epoch
        epoch_loss_g /= epoch
        duration = time.time() - start_time
        return (epoch_loss_d, epoch_loss_g), duration
    reconstructions = ae.reconstruct_with_mask(feed_pc)
    # shape2 = reconstructions[0][2,:,:]
    print "loss : " + str(reconstructions[1])

    # write_ply(pref+"airplane0_acrecon_upsampling.ply",reconstructions[0][0,:,:])
    # write_ply(pref+"airplane1_acrecon_upsampling.ply",reconstructions[0][1,:,:])
    # write_ply(pref+"airplane2_acrecon_upsampling.ply",reconstructions[0][2,:,:])
    # write_ply(pref+"airplane3_acrecon.ply",reconstructions[0][3,:,:])
    # write_ply(pref+"airplane4_acrecon.ply",reconstructions[0][4,:,:])
    # # pdb.set_trace()
    # print "reconstructed, shape:" + str(reconstructions.shape)
    # latent_codes = ae.transform(feed_pc)

else:
    print "reconstructing from lvs"
    pref = './recon_from_ac/'

    # lv_array  = np.loadtxt('/home/shubham/latent_3d_points/notebooks/cleaned_vector_0.01.txt')
    # lv_array  = np.loadtxt('/home/shubham/latent_3d_points/notebooks/cleaned_vector_test_0.01.txt')
    # lv_array  = np.loadtxt('/home/shubham/latent_3d_points/notebooks/test_lvs.txt') ##directly use input vecs
    # lv_array  = np.loadtxt('/home/shubham/latent_3d_points/data/single_class_ae/clean/lv_with_mask_5.txt') ##noisy vecs
    lv_array = np.loadtxt('cleaned_aefull_wgan_chd_0.001.txt')  ##noisy vecs
    lv_batch = lv_array

    reconstructions = ae.decode(lv_batch)
    for i in range(5):
        write_ply(pref + "airplane_test_wgan_chd0.001_" + str(i) + "_.ply",
                  reconstructions[i, :, :])

# Use any plotting mechanism such as matplotlib to visualize the results.