# held_out data (if they are provided in ae.train() ).
conf.save(osp.join(train_dir, 'configuration'))

# If you ran the above lines, you can reload a saved model like this:

# In[9]:

load_pre_trained_ae = True
restore_epoch = 400
if load_pre_trained_ae:
    conf = Conf.load(train_dir + '/configuration')
    reset_tf_graph()
    ae = PointNetAutoEncoder(conf.experiment_name, conf)
    ae.restore_model(conf.train_dir, epoch=restore_epoch)

# Build AE Model.

# In[10]:

reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)

# Train the AE (save output to train_stats.txt)

# In[ ]:

buf_size = 1  # Make 'training_stats' file to flush each output line regarding training.
fout = open(osp.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
train_stats = ae.train(all_pc_data, conf, log_file=fout)
fout.close()
# held_out data (if they are provided in ae.train() ).
conf.save(osp.join(train_dir, 'configuration'))

# If you ran the above lines, you can reload a saved model like this:

# In[9]:

load_pre_trained_ae = False
restore_epoch = 500
if load_pre_trained_ae:
    conf = Conf.load(train_dir + '/configuration')
    reset_tf_graph()
    ae = PointNetAutoEncoder(conf.experiment_name, conf)
    ae.restore_model(conf.train_dir, epoch=restore_epoch)

# Build AE Model.

# In[10]:

reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)

# Train the AE (save output to train_stats.txt)

# In[ ]:

buf_size = 1  # Make 'training_stats' file to flush each output line regarding training.
fout = open(osp.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
train_stats = ae.train(train_data, conf, held_out_data=val_data, log_file=fout)
fout.close()
示例#3
0
            decoder = decoder,
            encoder_args = enc_args,
            decoder_args = dec_args,
            experiment_name = experiment_name,
            val_step = 5,
            test_step = 200
           )
            # How often to evaluate/print out loss on held_out data (if any). # epochs
conf.save(osp.join(train_dir, 'configuration'))

reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)

buf_size = 1 # flush each line
fout = open(osp.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
train_stats = ae.train(train_pc, conf, log_file=fout, val_data=val_pc, test_data=test_pc)
fout.close()

print('On train hidden transform')
train_hidden, _, _ = train_pc.full_epoch_data()
train_hidden = apply_augmentations(train_hidden, conf)
train_hidden = ae.transform(train_hidden)
np.save(osp.join(train_dir, 'hidden.npy'), train_hidden)

print('On val hidden transform')
val_hidden, _, _ = val_pc.full_epoch_data()
val_hidden = apply_augmentations(val_hidden, conf)
val_hidden = ae.transform(val_hidden)
np.save(osp.join(val_dir, 'hidden.npy'), val_hidden)

# In[11]:


reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)


# Train the AE (save output to train_stats.txt) 

# In[1]:


buf_size = 1 # Make 'training_stats' file to flush each output line regarding training.
fout = open(osp.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
train_stats = ae.train(all_pc_data, conf, log_file=fout,mask_type =0)
fout.close()


# Get a batch of reconstuctions and their latent-codes.

# In[13]:


feed_pc, feed_model_names, _ = all_pc_data.next_batch(10)
reconstructions = ae.reconstruct(feed_pc)
latent_codes = ae.transform(feed_pc)


# Use any plotting mechanism such as matplotlib to visualize the results.
示例#5
0
train_params['training_epochs'] = 100
encoder, decoder, enc_args, dec_args = washington_xyz_rgb(
    n_pc_points, bneck_size)

pcd_dataset = PointCloudDataSet(X, copy=False)

conf = Conf(n_input=[n_pc_points, 3],
            loss=ae_loss,
            training_epochs=train_params['training_epochs'],
            batch_size=train_params['batch_size'],
            denoising=train_params['denoising'],
            learning_rate=train_params['learning_rate'],
            train_dir=train_dir,
            loss_display_step=train_params['loss_display_step'],
            saver_step=train_params['saver_step'],
            z_rotate=train_params['z_rotate'],
            encoder=encoder,
            decoder=decoder,
            encoder_args=enc_args,
            decoder_args=dec_args)

conf.experiment_name = experiment_name
conf.save(os.path.join(train_dir, 'configuration'))

reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)

buf_size = 1  # flush each line
fout = open(os.path.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
train_stats = ae.train(pcd_dataset, conf, log_file=fout)
fout.close()
示例#6
0
                    batch_size=train_params['batch_size'],
                    denoising=train_params['denoising'],
                    learning_rate=train_params['learning_rate'],
                    train_dir=train_dir,
                    loss_display_step=train_params['loss_display_step'],
                    saver_step=train_params['saver_step'],
                    z_rotate=train_params['z_rotate'],
                    encoder=encoder,
                    decoder=decoder,
                    encoder_args=enc_args,
                    decoder_args=dec_args)
        conf.experiment_name = experiment_name
        conf.held_out_step = 50  # How often to evaluate/print out loss on
        conf.save(osp.join(train_dir, 'configuration'))
        conf = Conf.load(
            osp.join(top_out_dir, experiment_name) + '/configuration')

        reset_tf_graph()
        ae = PointNetAutoEncoder(conf.experiment_name, conf)
    if load_pre_trained_ae:
        ae.restore_model(conf.train_dir, epoch=restore_epoch)

    if TRAIN:
        buf_size = 1  # Make 'training_stats' file to flush each output line regarding training.
        fout = open(osp.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
        train_stats = ae.train(train_pc_data,
                               conf,
                               log_file=fout,
                               held_out_data=val_pc_data)
        fout.close()