# In[11]:


reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)


# Train the AE (save output to train_stats.txt) 

# In[1]:


buf_size = 1 # Make 'training_stats' file to flush each output line regarding training.
fout = open(osp.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
train_stats = ae.train(all_pc_data, conf, log_file=fout,mask_type =0)
fout.close()


# Get a batch of reconstuctions and their latent-codes.

# In[13]:


feed_pc, feed_model_names, _ = all_pc_data.next_batch(10)
reconstructions = ae.reconstruct(feed_pc)
latent_codes = ae.transform(feed_pc)


# Use any plotting mechanism such as matplotlib to visualize the results.
Exemple #2
0
    enc = np.load(args["enc"])
    '''
    pcs = np.load("output/{}_pcs.npy".format(DATASET))
    names = np.load("output/{}_names.npy".format(DATASET))

    reset_tf_graph()
    ae_configuration = MODEL_DIR + '/configuration'
    ae_conf = Conf.load(ae_configuration)
    ae_conf.encoder_args['verbose'] = False
    ae_conf.decoder_args['verbose'] = False
    ae = PointNetAutoEncoder(ae_conf.experiment_name, ae_conf)

    ae.restore_model(MODEL_DIR, RESTORE_EPOCH, verbose=True)

    recs = []
    rec_losses = []
    for pc in pcs:
        pc = np.expand_dims(pc, axis=0)
        rec, l = ae.reconstruct(pc, GT=pc, compute_loss=True)
        recs.append(rec)
        rec_losses.append(l)

    np.save("output/{}_rec_loss".format(DATASET), np.array(rec_losses))
    np.save("output/{}_recs".format(DATASET), np.array(recs))

    #for id in range(0,3):
    #for id in range(0,len(reconstructions)):
    #print(names[id])
    #points2file(reconstructions[id],"output/rec_{}".format(names[id]))
# In[11]:


reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)


# Train the AE (save output to train_stats.txt) 

# In[1]:


buf_size = 1 # Make 'training_stats' file to flush each output line regarding training.
fout = open(osp.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
train_stats = ae.train(all_pc_data, conf, log_file=fout)
fout.close()


# Get a batch of reconstuctions and their latent-codes.

# In[13]:


feed_pc, feed_model_names, feed_part_pc = all_pc_data.part_next_batch(30)
reconstructions = ae.reconstruct(feed_part_pc, GT=feed_pc)
latent_codes = ae.transform(feed_part_pc)


# Use any plotting mechanism such as matplotlib to visualize the results.
        class_dir, n_threads=8, file_ending='.npy', max_num_points=2048, verbose=True, normalize=args.normalize_shape)


    feed_pc, _, _ = all_pc_data.full_epoch_data()
    feed_pc_tr_all = feed_pc[:, :n_pc_points]
    feed_pc_te_all = feed_pc[:, -n_pc_points:]
    print(feed_pc_tr_all.shape)
    print(feed_pc_te_all.shape)

    print("Gather samples")
    all_sample = []
    all_ref = []
    for i in range(feed_pc_tr_all.shape[0]):
        feed_pc_tr = feed_pc_tr_all[i:i+1]
        feed_pc_te = feed_pc_te_all[i:i+1]
        reconstructions = ae.reconstruct(feed_pc_tr)[0]
        all_sample.append(reconstructions)
        all_ref.append(feed_pc_te)
    all_sample = np.concatenate(all_sample)
    all_ref = np.concatenate(all_ref)
    print(all_sample.shape, all_ref.shape)

    sample_save_path = os.path.join(conf.train_dir, '%s_%s_all_sample.npy'%(class_name, syn_id))
    np.save(sample_save_path, all_sample)
    print("Samples save path:%s"%sample_save_path)

    reference_save_path = os.path.join(conf.train_dir, '%s_%s_all_reference.npy'%(class_name, syn_id))
    np.save(reference_save_path, all_ref)
    print("Reference save path:%s"%reference_save_path)

    # from latent_3d_points.src.evaluation_metrics_fast import MMD_COV_EMD_CD