training_epochs=train_params['training_epochs'], batch_size=train_params['batch_size'], denoising=train_params['denoising'], learning_rate=train_params['learning_rate'], train_dir=train_dir, loss_display_step=train_params['loss_display_step'], saver_step=train_params['saver_step'], z_rotate=train_params['z_rotate'], encoder=encoder, decoder=decoder, encoder_args=enc_args, decoder_args=dec_args, n_output=[2048, 3]) conf.experiment_name = experiment_name conf.held_out_step = 5 # How often to evaluate/print out loss on # held_out data (if they are provided in ae.train() ). conf.save(osp.join(train_dir, 'configuration')) # If you ran the above lines, you can reload a saved model like this: # In[9]: load_pre_trained_ae = True restore_epoch = 400 if load_pre_trained_ae: conf = Conf.load(train_dir + '/configuration') reset_tf_graph() ae = PointNetAutoEncoder(conf.experiment_name, conf) ae.restore_model(conf.train_dir, epoch=restore_epoch)
def evaluate(setup, results, models, targets_list, victims_list): top_out_dir = osp.join(BASE_DIR, "latent_3d_points", "data") # print(BASE_DIR) # Top-dir of where point-clouds are stored. top_in_dir = osp.join(BASE_DIR, "latent_3d_points", "data", "shape_net_core_uniform_samples_2048") # experiment_name = 'single_class_ae' experiment_name = 'new_ae' n_pc_ppoints = 1024 # 2048 # Number of points per model. bneck_size = 128 # Bottleneck-AE size # Loss to optimize: 'emd' or 'chamfer' # Bottleneck-AE size ae_loss = 'chamfer' train_params = default_train_params() encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18( n_pc_ppoints, bneck_size) train_dir = create_dir(osp.join(top_out_dir, experiment_name)) conf = Conf(n_input=[n_pc_ppoints, 3], loss=ae_loss, training_epochs=train_params['training_epochs'], batch_size=train_params['batch_size'], denoising=train_params['denoising'], learning_rate=train_params['learning_rate'], train_dir=train_dir, loss_display_step=train_params['loss_display_step'], saver_step=train_params['saver_step'], z_rotate=train_params['z_rotate'], encoder=encoder, decoder=decoder, encoder_args=enc_args, decoder_args=dec_args) conf.experiment_name = experiment_name conf.held_out_step = 5 # How often to evaluate/print out loss on # held_out data (if they are provided in ae.train() ). conf.save(osp.join(train_dir, 'configuration')) load_pre_trained_ae = True restore_epoch = 500 if load_pre_trained_ae: conf = Conf.load(train_dir + '/configuration') reset_tf_graph() ae = PointNetAutoEncoder(conf.experiment_name, conf) ae.restore_model(conf.train_dir, epoch=restore_epoch, verbose=True) models["ae"] = ae # all_resulting_corrects = [] # natural_L_2_norm_orig = [] # natural_L_2_norm_adv = [] # natural_L_2_norm_nat = [] # natural_L_infty_norm_orig = [] # natural_L_infty_norm_adv = [] # natural_L_infty_norm_nat = [] # L_2_norm_adv = [] # L_2_norm_nat = [] # L_infty_norm_adv = [] # L_infty_norm_nat = [] # L_cham_norm_adv = [] # L_cham_norm_nat = [] # L_emd_norm_adv = [] # L_emd_norm_nat = [] # natural_L_cham_norm_orig = [] # natural_L_cham_norm_adv = [] # natural_L_cham_norm_nat = [] accuracies_disc = { "orig_acc": "original accuracy on PointNet ", "adv_suc": "natural adverserial sucess rate on PointNet ", "adv_acc": "natural adverserial accuracy on PointNet ", "proj_acc": "projected accuracy on PointNet ", "rec_suc": "defended natural adverserial sucess rate on PointNet ", "rec_acc": "reconstructed defense accuracy on PointNet ", "orig_acc_pp": "original accuracy on PointNet_++ ", "orig_acc_gcn": "original accuracy on DGCN", "orig_acc_p": "original accuracy on PointNet_+ ", "adv_suc_pp": "natural adverserial sucess rate on PointNet_++ ", "adv_suc_gcn": "natural adverserial sucess rate on DGCN", "adv_suc_p": "natural adverserial sucess rate on PointNet_+ ", "adv_acc_pp": "natural adverserial accuracy on PointNet_++ ", "adv_acc_gcn": "natural adverserial accuracy on DGCN", "adv_acc_p": "natural adverserial accuracy on PointNet_+ ", "proj_acc_pp": "projected accuracy on PointNet_++ ", "proj_acc_gcn": "projected accuracy on DGCN", "proj_acc_p": "projected accuracy on PointNet_+ ", "rec_suc_pp": "defended natural adverserial sucess rate on PointNet_++ ", "rec_suc_gcn": "defended natural adverserial sucess rate on DGCN", "rec_suc_p": "defended natural adverserial sucess rate on PointNet_+ ", "rec_acc_pp": "reconstructed defense accuracy on PointNet_++ ", "rec_acc_gcn": "reconstructed defense accuracy on DGCN", "rec_acc_p": "reconstructed defense accuracy on PointNet_+ ", "b_adv_suc": "baseline adverserial sucess rate on PointNet ", "b_adv_acc": "baseline adverserial accuracy on PointNet ", "b_rec_suc": "baseline defended natural adverserial sucess rate on PointNet ", "b_rec_acc": "baselin ereconstructed defense accuracy on PointNet ", "b_adv_suc_pp": "baseline adverserial sucess rate on PointNet_++ ", "b_adv_suc_gcn": "baseline adverserial sucess rate on DGCN", "b_adv_suc_p": "baseline adverserial sucess rate on PointNet_+ ", "b_adv_acc_pp": "baseline adverserial accuracy on PointNet_++ ", "b_adv_acc_gcn": "baseline adverserial accuracy on DGCN", "b_adv_acc_p": "baseline adverserial accuracy on PointNet_+ ", "b_rec_suc_pp": "baseline defended natural adverserial sucess rate on PointNet_++ ", "b_rec_suc_gcn": "baseline defended natural adverserial sucess rate on DGCN", "b_rec_suc_p": "baseline defended natural adverserial sucess rate on PointNet_+ ", "b_rec_acc_pp": "baselin ereconstructed defense accuracy on PointNet_++ ", "b_rec_acc_gcn": "baselin ereconstructed defense accuracy on DGCN", "b_rec_acc_p": "baselin ereconstructed defense accuracy on PointNet_+ ", "orig_acc_r": "original accuracy under Random defense", "adv_suc_r": "natural adverserial accuracy under Random defense", "adv_acc_r": "natural adverserial sucess rate under Random defense", "b_adv_suc_r": "baseline adverserial accuracy under Random defense", "b_adv_acc_r": "baseline adverserial sucess rate under Random defense", "orig_acc_o": "original accuracy under Outlier defense", "adv_suc_o": "natural adverserial accuracy under Outlier defense", "adv_acc_o": "natural adverserial sucess rate under Outlier defense", "b_adv_suc_o": "baseline adverserial accuracy under Outlier defense", "b_adv_acc_o": "baseline adverserial sucess rate under Outlier defense", "orig_acc_bust": "original accuracy under Robust model", "adv_acc_bust": "natural adverserial accuracy under Robust model" } # accuracies_names = [ # "orig_acc", "adv_acc", "proj_acc", "rec_acc", "orig_acc_pp", , "orig_acc_p" # "adv_acc_pp", "proj_acc_pp", "rec_acc_pp", "adv_acc_p", "proj_acc_p", "rec_acc_p""orig_acc_r", # "adv_acc_r","orig_acc_o","adv_acc_o"] norms_names = ["natural_L_cham_norm_orig"] ev_results = ListDict(accuracies_disc.keys() + norms_names) # norms_results = ListDict(norms_names) setups = ListDict(setup.keys()) save_results(setup["results_file"], ev_results + setups) for target in targets_list: setup["target"] = target for victim in victims_list: if victim == setup["target"]: continue setup["victim"] = victim for batch_indx in range(int(setup["batch_size"])): predictions, norms = evaluate_all_shapes_scale( batch_indx=batch_indx, setup=setup, models=models) [setups.append(setup) for ii in range(setup["batch_size"])] # norms_results.remove(norms_results - ListDict(norms)) # norms_results.partial_extend(ListDict(norms)) ev_results.remove(ev_results - ListDict(predictions) - ListDict(norms)) ev_results.partial_extend( ListDict(predictions)).partial_extend(ListDict(norms)) save_results(setup["results_file"], ev_results + setups) save_results(setup["results_file"], ev_results + setups + results) return ev_results
conf = Conf(n_input=[n_pc_points, 3], loss=ae_loss, training_epochs=train_params['training_epochs'], batch_size=train_params['batch_size'], denoising=train_params['denoising'], learning_rate=train_params['learning_rate'], train_dir=train_dir, loss_display_step=train_params['loss_display_step'], saver_step=train_params['saver_step'], z_rotate=train_params['z_rotate'], encoder=encoder, decoder=decoder, encoder_args=enc_args, decoder_args=dec_args) conf.experiment_name = experiment_name conf.held_out_step = 5 # How often to evaluate/print out loss on held_out data (if any). reset_tf_graph() ae = PointNetAutoEncoder(experiment_name, conf) t18 = Tork18('/home/ceteke/Documents/datasets/tork18', 'pour') X, y = t18.get_dataset() if len(num_points) != len(train_sizes): num_points.append(int(len(X) * t)) ae.restore_model(osp.join(top_out_dir, experiment_name), 350, True) scores = 0.0 for _ in range(folds): X_train, X_test, y_train, y_test = train_test_split(X,
loss = ae_loss, training_epochs = train_params['training_epochs'], batch_size = train_params['batch_size'], denoising = train_params['denoising'], learning_rate = train_params['learning_rate'], loss_display_step = train_params['loss_display_step'], saver_step = train_params['saver_step'], z_rotate = train_params['z_rotate'], train_dir = train_dir, encoder = encoder, decoder = decoder, encoder_args = enc_args, decoder_args = dec_args ) conf.experiment_name = experiment_name conf.held_out_step = 5 reset_tf_graph() pdb.set_trace() ae = PointNetAutoEncoder(conf.experiment_name, conf) ae.restore_model(model_dir, 500) model_path = '../data/lgan_plane/G_network_299.pth' g = torch.load(model_path) batch_size=50 for i in xrange(100): noise=Variable(torch.cuda.FloatTensor(batch_size, 128)) generate_noise(noise) fake_x = g(noise).data.cpu().numpy() fake_pc = ae.decode(fake_x)