Exemplo n.º 1
0
conf = Conf(n_input=[n_pc_points, 3],
            loss=ae_loss,
            training_epochs=train_params['training_epochs'],
            batch_size=train_params['batch_size'],
            denoising=train_params['denoising'],
            learning_rate=train_params['learning_rate'],
            train_dir=train_dir,
            loss_display_step=train_params['loss_display_step'],
            saver_step=train_params['saver_step'],
            z_rotate=train_params['z_rotate'],
            encoder=encoder,
            decoder=decoder,
            encoder_args=enc_args,
            decoder_args=dec_args)
conf.experiment_name = experiment_name
conf.held_out_step = 5  # how often to evaluate/print the loss on held_out data (if they are provided)
conf.object_class = object_class
conf.class_names = class_names
conf.sort_axes = flags.sort_axes
conf.encoder_args['return_layer_before_symmetry'] = True
conf.save(osp.join(train_dir, 'configuration'))

if flags.save_config_and_exit:
    exit()

# Load point clouds
pc_data_train, _, _ = load_dataset(class_names, 'train_set', top_in_dir)
pc_data_val, _, _ = load_dataset(class_names, 'val_set', top_in_dir)

# Sort point cloud axes
if flags.sort_axes:
Exemplo n.º 2
0
            loss = ae_loss,
            training_epochs = train_params['training_epochs'],
            batch_size = train_params['batch_size'],
            denoising = train_params['denoising'],
            learning_rate = train_params['learning_rate'],
            train_dir = train_dir,
            loss_display_step = train_params['loss_display_step'],
            saver_step = train_params['saver_step'],
            z_rotate = train_params['z_rotate'],
            encoder = encoder,
            decoder = decoder,
            encoder_args = enc_args,
            decoder_args = dec_args
           )
conf.experiment_name = experiment_name
conf.held_out_step = 5   # How often to evaluate/print out loss on 
                         # held_out data (if they are provided in ae.train() ).
conf.save(osp.join(train_dir, 'configuration'))

load_pre_trained_ae = False
restore_epoch = 500
if load_pre_trained_ae:
    conf = Conf.load(train_dir + '/configuration')
    reset_tf_graph()
    ae = PointNetAutoEncoder(conf.experiment_name, conf)
    ae.restore_model(conf.train_dir, epoch=restore_epoch)

reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)

buf_size = 1 # Make 'training_stats' file to flush each output line regarding training.