def train(self, train_data, configuration, log_file=None, held_out_data=None): c = configuration stats = [] if c.saver_step is not None: create_dir(c.train_dir) for _ in range(c.training_epochs): loss, duration = self._single_epoch_train(train_data, c) epoch = int(self.sess.run(self.increment_epoch)) stats.append((epoch, loss, duration)) if epoch % c.loss_display_step == 0: print("Epoch:", '%04d' % (epoch), 'training time (minutes)=', "{:.4f}".format(duration / 60.0), "loss=", "{:.9f}".format(loss)) if log_file is not None: log_file.write('%04d\t%.9f\t%.4f\n' % (epoch, loss, duration / 60.0)) # Save the models checkpoint periodically. if c.saver_step is not None and (epoch % c.saver_step == 0 or epoch - 1 == 0): checkpoint_path = osp.join(c.train_dir, MODEL_SAVER_ID) self.saver.save(self.sess, checkpoint_path, global_step=self.epoch) if c.exists_and_is_not_none('summary_step') and ( epoch % c.summary_step == 0 or epoch - 1 == 0): summary = self.sess.run(self.merged_summaries) self.train_writer.add_summary(summary, epoch) if held_out_data is not None and c.exists_and_is_not_none( 'held_out_step') and (epoch % c.held_out_step == 0): loss, duration = self._single_epoch_train(held_out_data, c, only_fw=True) print("Held Out Data :", 'forward time (minutes)=', "{:.4f}".format(duration / 60.0), "loss=", "{:.9f}".format(loss)) if log_file is not None: log_file.write('On Held_Out: %04d\t%.9f\t%.4f\n' % (epoch, loss, duration / 60.0)) return stats
def visualize(experiment_name, class_name, gts, recs, vgts, vrecs, n=5): path = os.path.join('./html', experiment_name) create_dir(path) for i, [gt, rec, vgt, vrec] in enumerate(zip(gts, recs, vgts, vrecs)): trace = get_trace(gt[:, 0], gt[:, 1], gt[:, 2], 'input (gt)') # GT (input), blue trace1 = get_trace(rec[:, 0], rec[:, 1], rec[:, 2], 'recon') # reconstructed by AE, red scene=get_scene(gt, rec) layout=dict(height=600, width=600, scene=scene, title='_'.join([experiment_name, class_name, 'AE'])) fig=dict(data=[trace, trace1], layout=layout) offline.plot(fig, filename=os.path.join('./html', experiment_name, '_'.join([class_name, 'AE', str(i) + '.html']))) trace0 = get_trace(vgt[:, 0], vgt[:, 1], vgt[:, 2], 'input') # input (visible surface), blue trace1 = get_trace(vrec[:, 0], vrec[:, 1], vrec[:, 2], 'recon') # reconstructed by TL, red scene=get_scene(gt, rec) layout=dict(height=600, width=600, scene=scene, title='_'.join([experiment_name, class_name, 'TL'])) fig=dict(data=[trace, trace1, trace0], layout=layout) offline.plot(fig, filename=os.path.join('./html', experiment_name, '_'.join([class_name, 'TL', str(i) + '.html']))) if i >= n-1: break
'target', 'adversarial', 'source', 'before_defense', 'after_defense' ], 'wrong data_type: %s.' % flags.data_type # define basic parameters top_out_dir = osp.dirname(osp.dirname( osp.abspath(__file__))) # Use to save Neural-Net check-points etc. data_path = osp.join(top_out_dir, flags.ae_folder, 'eval') files = [ f for f in os.listdir(data_path) if osp.isfile(osp.join(data_path, f)) ] classifier_path = osp.join(top_out_dir, flags.classifier_folder) if flags.data_type == 'target': classifier_data_path = osp.join(data_path, flags.attack_folder) output_path = create_dir( osp.join(classifier_data_path, flags.output_folder_name + '_orig')) elif flags.data_type == 'adversarial': classifier_data_path = osp.join(data_path, flags.attack_folder) output_path = create_dir( osp.join(classifier_data_path, flags.output_folder_name)) elif flags.data_type == 'source': classifier_data_path = osp.join(data_path, flags.attack_folder, flags.defense_folder) output_path = create_dir( osp.join(classifier_data_path, flags.output_folder_name + '_orig')) elif flags.data_type == 'before_defense': classifier_data_path = osp.join(data_path, flags.attack_folder) output_path = create_dir( osp.join(classifier_data_path, flags.defense_folder, flags.output_folder_name)) elif flags.data_type == 'after_defense':
default=0, help= '1: Save point cloud plots, 0: Do not save point cloud plots [default: 0]') flags = parser.parse_args() print('Evaluate attack flags:', flags) # define basic parameters top_out_dir = osp.dirname(osp.dirname( osp.abspath(__file__))) # Use to save Neural-Net check-points etc. data_path = osp.join(top_out_dir, flags.ae_folder, 'eval') files = [ f for f in os.listdir(data_path) if osp.isfile(osp.join(data_path, f)) ] output_path = create_dir(osp.join(data_path, flags.output_folder_name)) outlier_thresh = 0.05 # load attack configuration conf = Conf.load(osp.join(output_path, 'attack_configuration')) # load data point_clouds, latent_vectors, reconstructions, pc_classes, slice_idx, ae_loss = \ load_data(data_path, files, ['point_clouds_test_set', 'latent_vectors_test_set', 'reconstructions_test_set', 'pc_classes', 'slice_idx_test_set', 'ae_loss_test_set']) assert np.all( ae_loss > 0 ), 'Note: not all autoencoder loss values are larger than 0 as they should!'
], 'wrong loss_adv_type: %s' % flags.loss_adv_type assert flags.num_iterations_thresh <= flags.num_iterations, 'num_iterations_thresh (%d) should be smaller or equal to num_iterations (%d)' % ( flags.num_iterations_thresh, flags.num_iterations) assert flags.target_pc_idx_type in [ 'latent_nn', 'chamfer_nn_complete' ], 'wrong target_pc_idx_type: %s' % flags.target_pc_idx_type # define basic parameters top_out_dir = osp.dirname(osp.dirname( osp.abspath(__file__))) # Use to save Neural-Net check-points etc. data_path = osp.join(top_out_dir, flags.ae_folder, 'eval') files = [ f for f in os.listdir(data_path) if osp.isfile(osp.join(data_path, f)) ] output_path = create_dir(osp.join(data_path, flags.output_folder_name)) # load data point_clouds, latent_vectors, pc_classes, slice_idx, ae_loss = \ load_data(data_path, files, ['point_clouds_test_set', 'latent_vectors_test_set', 'pc_classes', 'slice_idx_test_set', 'ae_loss_test_set']) assert np.all( ae_loss > 0 ), 'Note: not all autoencoder loss values are larger than 0 as they should!' nn_idx_dict = { 'latent_nn': 'latent_nn_idx_test_set', 'chamfer_nn_complete': 'chamfer_nn_idx_complete_test_set' } nn_idx = load_data(data_path, files, [nn_idx_dict[flags.target_pc_idx_type]])
flags = parser.parse_args() print('Test autoencoder flags:', flags) assert flags.set_type in ['train_set', 'val_set', 'test_set'], 'wrong set_type: %s' % flags.set_type # define basic parameters project_dir = osp.dirname(osp.dirname(osp.abspath(__file__))) top_in_dir = osp.join(project_dir, 'data', 'shape_net_core_uniform_samples_2048' ) # Top-dir of where point-clouds are stored. top_out_dir = osp.join(project_dir) # Use to save Neural-Net check-points etc. # Load train configuration train_dir = create_dir(osp.join(top_out_dir, flags.train_folder)) restore_epoch = flags.restore_epoch conf = Conf.load(train_dir + '/configuration') conf.encoder_args['return_layer_before_symmetry'] = True # Load point clouds object_class = conf.object_class class_names = conf.class_names pc_data, slice_idx, pc_label = load_dataset(class_names, flags.set_type, top_in_dir) point_clouds = pc_data.point_clouds.copy() # Sort point cloud axes if conf.sort_axes: point_clouds_axes_sorted = sort_axes(point_clouds) point_clouds = point_clouds_axes_sorted
type=str, default='log/autoencoder_victim', help= 'Folder for saving data form the training phase [default: log/autoencoder_victim]' ) flags = parser.parse_args() print('Train autoencoder flags:', flags) # Define basic parameters project_dir = osp.dirname(osp.dirname(osp.abspath(__file__))) top_in_dir = osp.join(project_dir, 'data', 'shape_net_core_uniform_samples_2048' ) # Top-dir of where point-clouds are stored. top_out_dir = project_dir # Use to save Neural-Net check-points etc. train_dir = create_dir(osp.join(top_out_dir, flags.train_folder)) experiment_name = 'autoencoder' n_pc_points = 2048 # Number of points per model bneck_size = 128 # Bottleneck-AE size ae_loss = 'chamfer' # Loss to optimize object_class = ['13l'] class_names = [ 'table', 'car', 'chair', 'airplane', 'sofa', 'rifle', 'lamp', 'watercraft', 'bench', 'loudspeaker', 'cabinet', 'display', 'telephone' ] # Load default train parameters train_params = default_train_params() train_params['training_epochs'] = flags.training_epochs
'PointNet', 'AtlasNet', 'FoldingNet' ], 'wrong ae_type: %s.' % flags.transfer_ae_type print('Run transfer flags:', flags) # define basic parameters top_out_dir = osp.dirname(osp.dirname( osp.abspath(__file__))) # Use to save Neural-Net check-points etc. data_path = osp.join(top_out_dir, flags.ae_folder, 'eval') files = [ f for f in os.listdir(data_path) if osp.isfile(osp.join(data_path, f)) ] attack_path = osp.join(data_path, flags.attack_folder) output_path = create_dir( osp.join(top_out_dir, flags.transfer_ae_folder, 'eval', flags.output_folder_name)) # load attack configuration conf = Conf.load(osp.join(attack_path, 'attack_configuration')) # update autoencoder configuration conf.experiment_name = 'autoencoder' conf.train_dir = output_path conf.is_denoising = True # Required for having a separate placeholder for ground truth point cloud (for computing AE loss) conf.encoder_args['return_layer_before_symmetry'] = True assert conf.encoder_args[ 'b_norm_decay'] == 1., 'Reruired for avoiding the update of batch normalization moving_mean and moving_variance parameters' assert conf.decoder_args[ 'b_norm_decay'] == 1., 'Reruired for avoiding the update of batch normalization moving_mean and moving_variance parameters' assert conf.decoder_args[
default=0, help= '1: Save point cloud plots, 0: Do not save point cloud plots [default: 0]') flags = parser.parse_args() print('Evaluate transfer flags:', flags) # define basic parameters top_out_dir = osp.dirname(osp.dirname( osp.abspath(__file__))) # Use to save Neural-Net check-points etc. data_path = osp.join(top_out_dir, flags.ae_folder, 'eval') files = [ f for f in os.listdir(data_path) if osp.isfile(osp.join(data_path, f)) ] attack_path = create_dir(osp.join(data_path, flags.attack_folder)) output_path = create_dir( osp.join(top_out_dir, flags.transfer_ae_folder, 'eval', flags.output_folder_name)) # load attack configuration conf = Conf.load(osp.join(attack_path, 'attack_configuration')) # load data point_clouds, latent_vectors, reconstructions, pc_classes, slice_idx, ae_loss = \ load_data(data_path, files, ['point_clouds_test_set', 'latent_vectors_test_set', 'reconstructions_test_set', 'pc_classes', 'slice_idx_test_set', 'ae_loss_test_set']) assert np.all( ae_loss > 0 ), 'Note: not all autoencoder loss values are larger than 0 as they should!'
parser.add_argument("--pc_start_idx", type=int, default=0, help="Start index for source point clouds [default: 0]") parser.add_argument("--pc_batch_size", type=int, default=100, help="Batch size of source point clouds [default: 100]") flags = parser.parse_args() print('Prepare indices flags:', flags) # define basic parameters project_dir = osp.dirname(osp.dirname(osp.abspath(__file__))) data_path = create_dir(osp.join(project_dir, flags.ae_folder, 'eval')) files = [ f for f in os.listdir(data_path) if osp.isfile(osp.join(data_path, f)) ] # load data point_clouds, latent_vectors, pc_classes, slice_idx = \ load_data(data_path, files, ['point_clouds_test_set', 'latent_vectors_test_set', 'pc_classes', 'slice_idx_test_set']) show = False if show: n = 0 plot_3d_point_cloud(point_clouds[n]) slice_idx_file_name = [f for f in files if 'slice_idx_test_set' in f][0] file_name_parts = slice_idx_file_name.split('_')
help="Output folder name") flags = parser.parse_args() print('Get knn dists flags:', flags) # define basic parameters top_out_dir = osp.dirname(osp.dirname( osp.abspath(__file__))) # Use to save Neural-Net check-points etc. data_path = osp.join(top_out_dir, flags.ae_folder, 'eval') files = [ f for f in os.listdir(data_path) if osp.isfile(osp.join(data_path, f)) ] attack_dir = osp.join(top_out_dir, flags.ae_folder, 'eval', flags.attack_folder) output_path = create_dir(osp.join(attack_dir, flags.output_folder_name)) output_path_orig = create_dir( osp.join(attack_dir, flags.output_folder_name + '_orig')) # load data point_clouds, pc_classes, slice_idx = \ load_data(data_path, files, ['point_clouds_test_set', 'pc_classes', 'slice_idx_test_set']) num_points = point_clouds.shape[1] # load attack configuration conf = Conf.load(osp.join(attack_dir, 'attack_configuration')) nn_idx_dict = { 'latent_nn': 'latent_nn_idx_test_set', 'chamfer_nn_complete': 'chamfer_nn_idx_complete_test_set'
save_synthetic_samples = True # How many synthetic samples to produce at each save step. n_syn_samples = latent_data.num_examples # Optimization parameters init_lr = 0.0001 batch_size = 50 noise_params = {'mu':0, 'sigma': 0.2} noise_dim = bneck_size beta = 0.5 # ADAM's momentum. n_out = [bneck_size] # Dimensionality of generated samples. if save_synthetic_samples: synthetic_data_out_dir = osp.join(top_out_dir, 'OUT/synthetic_samples/', experiment_name) create_dir(synthetic_data_out_dir) if save_gan_model: train_dir = osp.join(top_out_dir, 'OUT/latent_gan', experiment_name) create_dir(train_dir) reset_tf_graph() if use_wgan: lam = 10 # lambda of W-GAN-GP gan = W_GAN_GP(experiment_name, init_lr, lam, n_out, noise_dim, \ latent_code_discriminator_two_layers, latent_code_generator_two_layers,\
# Command line arguments parser = argparse.ArgumentParser() parser.add_argument('--ae_folder', type=str, default='log/autoencoder_victim', help='Folder for loading a trained autoencoder model [default: log/autoencoder_victim]') parser.add_argument("--attack_pc_idx", type=str, default='log/autoencoder_victim/eval/sel_idx_rand_100_test_set_13l.npy', help="List of indices of point clouds for the attack") parser.add_argument("--do_sanity_checks", type=int, default=0, help="1: Do sanity checks, 0: Do not do sanity checks [default: 0]") parser.add_argument("--output_folder_name", type=str, default='attack_res', help="Output folder name") flags = parser.parse_args() print('Get dists flags:', flags) # define basic parameters top_out_dir = osp.dirname(osp.dirname(osp.abspath(__file__))) # Use to save Neural-Net check-points etc. data_path = osp.join(top_out_dir, flags.ae_folder, 'eval') files = [f for f in os.listdir(data_path) if osp.isfile(osp.join(data_path, f))] output_path = create_dir(osp.join(data_path, flags.output_folder_name)) chamfer_batch_size = 10 # load attack configuration conf = Conf.load(osp.join(output_path, 'attack_configuration')) # load data point_clouds, pc_classes, slice_idx = \ load_data(data_path, files, ['point_clouds_test_set', 'pc_classes', 'slice_idx_test_set']) num_points = point_clouds.shape[1] nn_idx_dict = {'latent_nn': 'latent_nn_idx_test_set', 'chamfer_nn_complete': 'chamfer_nn_idx_complete_test_set'} nn_idx = load_data(data_path, files, [nn_idx_dict[conf.target_pc_idx_type]])
parser.add_argument('--defense_folder', type=str, default='defense_critical_res', help='Folder for loading defense data') parser.add_argument("--output_folder_name", type=str, default='classifier_res', help="Output folder name") parser.add_argument('--save_graphs', type=int, default=0, help='1: Save statistics graphs, 0: Do not save statistics graphs [default: 0]') flags = parser.parse_args() print('Evaluate classifier flags:', flags) assert flags.data_type in ['target', 'adversarial', 'source', 'before_defense', 'after_defense'], 'wrong data_type: %s.' % flags.data_type assert flags.classification_type in ['hit_target', 'avoid_source'], 'wrong classification_type: %s.' % flags.classification_type # define basic parameters top_out_dir = osp.dirname(osp.dirname(osp.abspath(__file__))) # Use to save Neural-Net check-points etc. data_path = osp.join(top_out_dir, flags.ae_folder, 'eval') files = [f for f in os.listdir(data_path) if osp.isfile(osp.join(data_path, f))] attack_path = create_dir(osp.join(data_path, flags.attack_folder)) if flags.data_type == 'target': output_path = create_dir(osp.join(attack_path, flags.output_folder_name + '_orig')) elif flags.data_type == 'adversarial': output_path = create_dir(osp.join(attack_path, flags.output_folder_name)) elif flags.data_type == 'source': output_path = create_dir(osp.join(attack_path, flags.defense_folder, flags.output_folder_name + '_orig')) elif flags.data_type == 'before_defense': adversarial_data_path = create_dir(osp.join(attack_path, flags.output_folder_name)) output_path = create_dir(osp.join(attack_path, flags.defense_folder, flags.output_folder_name)) elif flags.data_type == 'after_defense': output_path = create_dir(osp.join(attack_path, flags.defense_folder, flags.output_folder_name)) else: assert False, 'wrong data_type: %s' % flags.data_type