def load(model_path, z_rotate, num_points, point_dimension=3):
	model_dir = osp.dirname(model_path)
	model_epoch = int(osp.basename(model_path).split('-')[1])
	experiment_name = osp.basename(osp.dirname(model_path)).split('train_')[1] #'single_class_ae_plane_chamfer_z_rotate'                         # Number of points per model.
	bneck_size = 128                                # Bottleneck-AE size
	ae_loss = 'chamfer'                             # Loss to optimize: 'emd' or 'chamfer'
	class_name = "airplane"
	syn_id = snc_category_to_synth_id()[class_name]
	class_dir = osp.join(top_in_dir , syn_id)    # e.g. /home/yz6/code/latent_3d_points/data/shape_net_core_uniform_samples_2048/02691156

	train_dir = create_dir(osp.join(top_out_dir, experiment_name))
	train_params = default_train_params()
	encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(num_points, bneck_size, point_dimension=point_dimension)


	conf = Conf(n_input = [num_points, point_dimension],
	            loss = ae_loss,
	            training_epochs = train_params['training_epochs'],
	            batch_size = train_params['batch_size'],
	            denoising = train_params['denoising'],
	            learning_rate = train_params['learning_rate'],
	            loss_display_step = train_params['loss_display_step'],
	            saver_step = train_params['saver_step'],
	            z_rotate = z_rotate == 'True',
	            train_dir = train_dir,
	            encoder = encoder,
	            decoder = decoder,
	            encoder_args = enc_args,
	            decoder_args = dec_args,
	            experiment_name = experiment_name,
	            allow_gpu_growth = True
	           )
	# pdb.set_trace()
	reset_tf_graph()
	ae = PointNetAutoEncoder(conf.experiment_name, conf)
	ae.restore_model(model_dir, model_epoch)
	return ae, conf
# In[7]:

encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(
    n_pc_points, bneck_size)
train_dir = create_dir(osp.join(top_out_dir, experiment_name))

# In[8]:

conf = Conf(n_input=[n_pc_points, 3],
            loss=ae_loss,
            training_epochs=train_params['training_epochs'],
            batch_size=train_params['batch_size'],
            denoising=train_params['denoising'],
            learning_rate=train_params['learning_rate'],
            train_dir=train_dir,
            loss_display_step=train_params['loss_display_step'],
            saver_step=train_params['saver_step'],
            z_rotate=train_params['z_rotate'],
            encoder=encoder,
            decoder=decoder,
            encoder_args=enc_args,
            decoder_args=dec_args,
            n_output=[2048, 3])

conf.experiment_name = experiment_name
conf.held_out_step = 5  # How often to evaluate/print out loss on
# held_out data (if they are provided in ae.train() ).
conf.save(osp.join(train_dir, 'configuration'))

# If you ran the above lines, you can reload a saved model like this:
Esempio n. 3
0
# dictionary
train_params = default_train_params()
encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(n_pc_points, bneck_size)


conf = Conf(n_input = [n_pc_points, 3],
            loss = ae_loss,
            training_epochs = 3000, #train_params['training_epochs'],
            batch_size = train_params['batch_size'],
            denoising = train_params['denoising'],
            learning_rate = train_params['learning_rate'],
            train_dir = train_dir,
            test_dir = test_dir,
            val_dir = val_dir,
            loss_display_step = train_params['loss_display_step'],
            saver_step = train_params['saver_step'],
            z_rotate = z_rotate == 'True', #train_params['z_rotate'],
            encoder = encoder,
            decoder = decoder,
            encoder_args = enc_args,
            decoder_args = dec_args,
            experiment_name = experiment_name,
            val_step = 5,
            test_step = 200
           )
            # How often to evaluate/print out loss on held_out data (if any). # epochs
conf.save(osp.join(train_dir, 'configuration'))

reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)
Esempio n. 4
0
def evaluate(setup, results, models, targets_list, victims_list):

    top_out_dir = osp.join(BASE_DIR, "latent_3d_points", "data")
    # print(BASE_DIR)

    # Top-dir of where point-clouds are stored.
    top_in_dir = osp.join(BASE_DIR, "latent_3d_points", "data",
                          "shape_net_core_uniform_samples_2048")

    # experiment_name = 'single_class_ae'
    experiment_name = 'new_ae'

    n_pc_ppoints = 1024  # 2048                # Number of points per model.
    bneck_size = 128  # Bottleneck-AE size
    # Loss to optimize: 'emd' or 'chamfer'             # Bottleneck-AE size
    ae_loss = 'chamfer'
    train_params = default_train_params()
    encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(
        n_pc_ppoints, bneck_size)
    train_dir = create_dir(osp.join(top_out_dir, experiment_name))

    conf = Conf(n_input=[n_pc_ppoints, 3],
                loss=ae_loss,
                training_epochs=train_params['training_epochs'],
                batch_size=train_params['batch_size'],
                denoising=train_params['denoising'],
                learning_rate=train_params['learning_rate'],
                train_dir=train_dir,
                loss_display_step=train_params['loss_display_step'],
                saver_step=train_params['saver_step'],
                z_rotate=train_params['z_rotate'],
                encoder=encoder,
                decoder=decoder,
                encoder_args=enc_args,
                decoder_args=dec_args)
    conf.experiment_name = experiment_name
    conf.held_out_step = 5  # How often to evaluate/print out loss on
    # held_out data (if they are provided in ae.train() ).
    conf.save(osp.join(train_dir, 'configuration'))

    load_pre_trained_ae = True
    restore_epoch = 500
    if load_pre_trained_ae:
        conf = Conf.load(train_dir + '/configuration')
        reset_tf_graph()
        ae = PointNetAutoEncoder(conf.experiment_name, conf)
        ae.restore_model(conf.train_dir, epoch=restore_epoch, verbose=True)
        models["ae"] = ae

    # all_resulting_corrects = []
    # natural_L_2_norm_orig = []
    # natural_L_2_norm_adv = []
    # natural_L_2_norm_nat = []
    # natural_L_infty_norm_orig = []
    # natural_L_infty_norm_adv = []
    # natural_L_infty_norm_nat = []
    # L_2_norm_adv = []
    # L_2_norm_nat = []
    # L_infty_norm_adv = []
    # L_infty_norm_nat = []
    # L_cham_norm_adv = []
    # L_cham_norm_nat = []
    # L_emd_norm_adv = []
    # L_emd_norm_nat = []
    # natural_L_cham_norm_orig = []
    # natural_L_cham_norm_adv = []
    # natural_L_cham_norm_nat = []
    accuracies_disc = {
        "orig_acc": "original accuracy on PointNet ",
        "adv_suc": "natural adverserial sucess rate on PointNet ",
        "adv_acc": "natural adverserial accuracy on PointNet ",
        "proj_acc": "projected accuracy on PointNet ",
        "rec_suc": "defended natural adverserial sucess rate on PointNet ",
        "rec_acc": "reconstructed defense accuracy on PointNet ",
        "orig_acc_pp": "original accuracy on PointNet_++ ",
        "orig_acc_gcn": "original accuracy on DGCN",
        "orig_acc_p": "original accuracy on PointNet_+ ",
        "adv_suc_pp": "natural adverserial sucess rate on PointNet_++ ",
        "adv_suc_gcn": "natural adverserial sucess rate on DGCN",
        "adv_suc_p": "natural adverserial sucess rate on PointNet_+ ",
        "adv_acc_pp": "natural adverserial accuracy on PointNet_++ ",
        "adv_acc_gcn": "natural adverserial accuracy on DGCN",
        "adv_acc_p": "natural adverserial accuracy on PointNet_+ ",
        "proj_acc_pp": "projected accuracy on PointNet_++ ",
        "proj_acc_gcn": "projected accuracy on DGCN",
        "proj_acc_p": "projected accuracy on PointNet_+ ",
        "rec_suc_pp":
        "defended natural adverserial sucess rate on PointNet_++ ",
        "rec_suc_gcn": "defended natural adverserial sucess rate on DGCN",
        "rec_suc_p": "defended natural adverserial sucess rate on PointNet_+ ",
        "rec_acc_pp": "reconstructed defense accuracy on PointNet_++ ",
        "rec_acc_gcn": "reconstructed defense accuracy on DGCN",
        "rec_acc_p": "reconstructed defense accuracy on PointNet_+ ",
        "b_adv_suc": "baseline adverserial sucess rate on PointNet ",
        "b_adv_acc": "baseline adverserial accuracy on PointNet ",
        "b_rec_suc":
        "baseline defended natural adverserial sucess rate on PointNet ",
        "b_rec_acc": "baselin ereconstructed defense accuracy on PointNet ",
        "b_adv_suc_pp": "baseline adverserial sucess rate on PointNet_++ ",
        "b_adv_suc_gcn": "baseline adverserial sucess rate on DGCN",
        "b_adv_suc_p": "baseline adverserial sucess rate on PointNet_+ ",
        "b_adv_acc_pp": "baseline adverserial accuracy on PointNet_++ ",
        "b_adv_acc_gcn": "baseline adverserial accuracy on DGCN",
        "b_adv_acc_p": "baseline adverserial accuracy on PointNet_+ ",
        "b_rec_suc_pp":
        "baseline defended natural adverserial sucess rate on PointNet_++ ",
        "b_rec_suc_gcn":
        "baseline defended natural adverserial sucess rate on DGCN",
        "b_rec_suc_p":
        "baseline defended natural adverserial sucess rate on PointNet_+ ",
        "b_rec_acc_pp":
        "baselin ereconstructed defense accuracy on PointNet_++ ",
        "b_rec_acc_gcn": "baselin ereconstructed defense accuracy on DGCN",
        "b_rec_acc_p":
        "baselin ereconstructed defense accuracy on PointNet_+ ",
        "orig_acc_r": "original accuracy under Random defense",
        "adv_suc_r": "natural adverserial accuracy under Random defense",
        "adv_acc_r": "natural adverserial sucess rate under Random defense",
        "b_adv_suc_r": "baseline  adverserial accuracy under Random defense",
        "b_adv_acc_r":
        "baseline  adverserial sucess rate under Random defense",
        "orig_acc_o": "original accuracy under Outlier defense",
        "adv_suc_o": "natural adverserial accuracy under Outlier defense",
        "adv_acc_o": "natural adverserial sucess rate under Outlier defense",
        "b_adv_suc_o": "baseline  adverserial accuracy under Outlier defense",
        "b_adv_acc_o":
        "baseline  adverserial sucess rate under Outlier defense",
        "orig_acc_bust": "original accuracy under Robust model",
        "adv_acc_bust": "natural adverserial accuracy under Robust model"
    }

    # accuracies_names  = [
    #     "orig_acc", "adv_acc", "proj_acc", "rec_acc", "orig_acc_pp", , "orig_acc_p"
    #     "adv_acc_pp", "proj_acc_pp", "rec_acc_pp", "adv_acc_p", "proj_acc_p", "rec_acc_p""orig_acc_r",
    #     "adv_acc_r","orig_acc_o","adv_acc_o"]
    norms_names = ["natural_L_cham_norm_orig"]
    ev_results = ListDict(accuracies_disc.keys() + norms_names)
    # norms_results = ListDict(norms_names)
    setups = ListDict(setup.keys())
    save_results(setup["results_file"], ev_results + setups)
    for target in targets_list:
        setup["target"] = target
        for victim in victims_list:
            if victim == setup["target"]:
                continue
            setup["victim"] = victim
            for batch_indx in range(int(setup["batch_size"])):
                predictions, norms = evaluate_all_shapes_scale(
                    batch_indx=batch_indx, setup=setup, models=models)
                [setups.append(setup) for ii in range(setup["batch_size"])]
                # norms_results.remove(norms_results - ListDict(norms))
                # norms_results.partial_extend(ListDict(norms))
                ev_results.remove(ev_results - ListDict(predictions) -
                                  ListDict(norms))
                ev_results.partial_extend(
                    ListDict(predictions)).partial_extend(ListDict(norms))
                save_results(setup["results_file"], ev_results + setups)

    save_results(setup["results_file"], ev_results + setups + results)
    return ev_results
Esempio n. 5
0
encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(
    n_pc_points, bneck_size)
train_dir = create_dir(osp.join(top_out_dir, experiment_name))

print enc_args
print dec_args

conf = Conf(
    n_input=[n_pc_points, 3],
    loss=ae_loss,
    # training_epochs = train_params['training_epochs'],
    training_epochs=600,
    batch_size=train_params['batch_size'],
    denoising=train_params['denoising'],
    learning_rate=train_params['learning_rate'],
    train_dir=train_dir,
    loss_display_step=train_params['loss_display_step'],
    saver_step=train_params['saver_step'],
    saver_max_to_keep=20,
    z_rotate=train_params['z_rotate'],
    encoder=encoder,
    decoder=decoder,
    encoder_args=enc_args,
    decoder_args=dec_args,
    adv_ae=False)
conf.experiment_name = experiment_name
conf.held_out_step = 5  # How often to evaluate/print out loss on
# held_out data (if they are provided in ae.train() ). ##use when training ae only on train data
conf.save(osp.join(train_dir, 'configuration'))

reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)