def load(model_path, z_rotate, num_points, point_dimension=3): model_dir = osp.dirname(model_path) model_epoch = int(osp.basename(model_path).split('-')[1]) experiment_name = osp.basename(osp.dirname(model_path)).split('train_')[1] #'single_class_ae_plane_chamfer_z_rotate' # Number of points per model. bneck_size = 128 # Bottleneck-AE size ae_loss = 'chamfer' # Loss to optimize: 'emd' or 'chamfer' class_name = "airplane" syn_id = snc_category_to_synth_id()[class_name] class_dir = osp.join(top_in_dir , syn_id) # e.g. /home/yz6/code/latent_3d_points/data/shape_net_core_uniform_samples_2048/02691156 train_dir = create_dir(osp.join(top_out_dir, experiment_name)) train_params = default_train_params() encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(num_points, bneck_size, point_dimension=point_dimension) conf = Conf(n_input = [num_points, point_dimension], loss = ae_loss, training_epochs = train_params['training_epochs'], batch_size = train_params['batch_size'], denoising = train_params['denoising'], learning_rate = train_params['learning_rate'], loss_display_step = train_params['loss_display_step'], saver_step = train_params['saver_step'], z_rotate = z_rotate == 'True', train_dir = train_dir, encoder = encoder, decoder = decoder, encoder_args = enc_args, decoder_args = dec_args, experiment_name = experiment_name, allow_gpu_growth = True ) # pdb.set_trace() reset_tf_graph() ae = PointNetAutoEncoder(conf.experiment_name, conf) ae.restore_model(model_dir, model_epoch) return ae, conf
# Load default training parameters (some of which are listed beloq). For more details please print the configuration object. # # 'batch_size': 50 # # 'denoising': False (# by default AE is not denoising) # # 'learning_rate': 0.0005 # # 'z_rotate': False (# randomly rotate models of each batch) # # 'loss_display_step': 1 (# display loss at end of these many epochs) # 'saver_step': 10 (# over how many epochs to save neural-network) # In[6]: train_params = default_train_params() # In[7]: encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18( n_pc_points, bneck_size) train_dir = create_dir(osp.join(top_out_dir, experiment_name)) # In[8]: conf = Conf(n_input=[n_pc_points, 3], loss=ae_loss, training_epochs=train_params['training_epochs'], batch_size=train_params['batch_size'], denoising=train_params['denoising'], learning_rate=train_params['learning_rate'],
sizes = [32, 64, 128] train_sizes = [0.1, 0.15, 0.2, 0.25, 0.3, 0.5, 0.8] num_points = [] accs = {32: [], 64: [], 128: []} folds = 10 for size in sizes: for t in train_sizes: experiment_name = 'shapenet_2048_{}'.format(size) n_pc_points = 2048 # Number of points per model. bneck_size = size # Bottleneck-AE size ae_loss = 'emd' # Loss to optimize: 'emd' or 'chamfer' train_dir = create_dir(osp.join(top_out_dir, experiment_name)) train_params = default_train_params(single_class=False) encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18( n_pc_points, bneck_size) conf = Conf(n_input=[n_pc_points, 3], loss=ae_loss, training_epochs=train_params['training_epochs'], batch_size=train_params['batch_size'], denoising=train_params['denoising'], learning_rate=train_params['learning_rate'], train_dir=train_dir, loss_display_step=train_params['loss_display_step'], saver_step=train_params['saver_step'], z_rotate=train_params['z_rotate'], encoder=encoder, decoder=decoder,
# How many synthetic samples to produce at each save step. n_syn_samples = train_data.num_examples # Optimization parameters init_lr = args.lr batch_size = 50 noise_params = {'mu': 0, 'sigma': 0.2} # noise_dim = 128 noise_dim = 1948 # incomplete shape: 2048 - 100 - 1948 beta = 0.5 # ADAM's momentum. n_out = [n_pc_points, 3] # Dimensionality of generated samples. discriminator = mlp_discriminator # generator = point_cloud_generator train_params = default_train_params() # not actually used encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18( n_pc_points, bneck_size) conf = Conf( n_input=[1948, 3], loss=ae_loss, training_epochs=train_params['training_epochs'], # not actually used batch_size=train_params['batch_size'], denoising=train_params['denoising'], learning_rate=train_params['learning_rate'], train_dir=top_out_dir, loss_display_step=train_params['loss_display_step'], saver_step=train_params['saver_step'], z_rotate=train_params['z_rotate'], encoder=encoder,
def evaluate(setup, results, models, targets_list, victims_list): top_out_dir = osp.join(BASE_DIR, "latent_3d_points", "data") # print(BASE_DIR) # Top-dir of where point-clouds are stored. top_in_dir = osp.join(BASE_DIR, "latent_3d_points", "data", "shape_net_core_uniform_samples_2048") # experiment_name = 'single_class_ae' experiment_name = 'new_ae' n_pc_ppoints = 1024 # 2048 # Number of points per model. bneck_size = 128 # Bottleneck-AE size # Loss to optimize: 'emd' or 'chamfer' # Bottleneck-AE size ae_loss = 'chamfer' train_params = default_train_params() encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18( n_pc_ppoints, bneck_size) train_dir = create_dir(osp.join(top_out_dir, experiment_name)) conf = Conf(n_input=[n_pc_ppoints, 3], loss=ae_loss, training_epochs=train_params['training_epochs'], batch_size=train_params['batch_size'], denoising=train_params['denoising'], learning_rate=train_params['learning_rate'], train_dir=train_dir, loss_display_step=train_params['loss_display_step'], saver_step=train_params['saver_step'], z_rotate=train_params['z_rotate'], encoder=encoder, decoder=decoder, encoder_args=enc_args, decoder_args=dec_args) conf.experiment_name = experiment_name conf.held_out_step = 5 # How often to evaluate/print out loss on # held_out data (if they are provided in ae.train() ). conf.save(osp.join(train_dir, 'configuration')) load_pre_trained_ae = True restore_epoch = 500 if load_pre_trained_ae: conf = Conf.load(train_dir + '/configuration') reset_tf_graph() ae = PointNetAutoEncoder(conf.experiment_name, conf) ae.restore_model(conf.train_dir, epoch=restore_epoch, verbose=True) models["ae"] = ae # all_resulting_corrects = [] # natural_L_2_norm_orig = [] # natural_L_2_norm_adv = [] # natural_L_2_norm_nat = [] # natural_L_infty_norm_orig = [] # natural_L_infty_norm_adv = [] # natural_L_infty_norm_nat = [] # L_2_norm_adv = [] # L_2_norm_nat = [] # L_infty_norm_adv = [] # L_infty_norm_nat = [] # L_cham_norm_adv = [] # L_cham_norm_nat = [] # L_emd_norm_adv = [] # L_emd_norm_nat = [] # natural_L_cham_norm_orig = [] # natural_L_cham_norm_adv = [] # natural_L_cham_norm_nat = [] accuracies_disc = { "orig_acc": "original accuracy on PointNet ", "adv_suc": "natural adverserial sucess rate on PointNet ", "adv_acc": "natural adverserial accuracy on PointNet ", "proj_acc": "projected accuracy on PointNet ", "rec_suc": "defended natural adverserial sucess rate on PointNet ", "rec_acc": "reconstructed defense accuracy on PointNet ", "orig_acc_pp": "original accuracy on PointNet_++ ", "orig_acc_gcn": "original accuracy on DGCN", "orig_acc_p": "original accuracy on PointNet_+ ", "adv_suc_pp": "natural adverserial sucess rate on PointNet_++ ", "adv_suc_gcn": "natural adverserial sucess rate on DGCN", "adv_suc_p": "natural adverserial sucess rate on PointNet_+ ", "adv_acc_pp": "natural adverserial accuracy on PointNet_++ ", "adv_acc_gcn": "natural adverserial accuracy on DGCN", "adv_acc_p": "natural adverserial accuracy on PointNet_+ ", "proj_acc_pp": "projected accuracy on PointNet_++ ", "proj_acc_gcn": "projected accuracy on DGCN", "proj_acc_p": "projected accuracy on PointNet_+ ", "rec_suc_pp": "defended natural adverserial sucess rate on PointNet_++ ", "rec_suc_gcn": "defended natural adverserial sucess rate on DGCN", "rec_suc_p": "defended natural adverserial sucess rate on PointNet_+ ", "rec_acc_pp": "reconstructed defense accuracy on PointNet_++ ", "rec_acc_gcn": "reconstructed defense accuracy on DGCN", "rec_acc_p": "reconstructed defense accuracy on PointNet_+ ", "b_adv_suc": "baseline adverserial sucess rate on PointNet ", "b_adv_acc": "baseline adverserial accuracy on PointNet ", "b_rec_suc": "baseline defended natural adverserial sucess rate on PointNet ", "b_rec_acc": "baselin ereconstructed defense accuracy on PointNet ", "b_adv_suc_pp": "baseline adverserial sucess rate on PointNet_++ ", "b_adv_suc_gcn": "baseline adverserial sucess rate on DGCN", "b_adv_suc_p": "baseline adverserial sucess rate on PointNet_+ ", "b_adv_acc_pp": "baseline adverserial accuracy on PointNet_++ ", "b_adv_acc_gcn": "baseline adverserial accuracy on DGCN", "b_adv_acc_p": "baseline adverserial accuracy on PointNet_+ ", "b_rec_suc_pp": "baseline defended natural adverserial sucess rate on PointNet_++ ", "b_rec_suc_gcn": "baseline defended natural adverserial sucess rate on DGCN", "b_rec_suc_p": "baseline defended natural adverserial sucess rate on PointNet_+ ", "b_rec_acc_pp": "baselin ereconstructed defense accuracy on PointNet_++ ", "b_rec_acc_gcn": "baselin ereconstructed defense accuracy on DGCN", "b_rec_acc_p": "baselin ereconstructed defense accuracy on PointNet_+ ", "orig_acc_r": "original accuracy under Random defense", "adv_suc_r": "natural adverserial accuracy under Random defense", "adv_acc_r": "natural adverserial sucess rate under Random defense", "b_adv_suc_r": "baseline adverserial accuracy under Random defense", "b_adv_acc_r": "baseline adverserial sucess rate under Random defense", "orig_acc_o": "original accuracy under Outlier defense", "adv_suc_o": "natural adverserial accuracy under Outlier defense", "adv_acc_o": "natural adverserial sucess rate under Outlier defense", "b_adv_suc_o": "baseline adverserial accuracy under Outlier defense", "b_adv_acc_o": "baseline adverserial sucess rate under Outlier defense", "orig_acc_bust": "original accuracy under Robust model", "adv_acc_bust": "natural adverserial accuracy under Robust model" } # accuracies_names = [ # "orig_acc", "adv_acc", "proj_acc", "rec_acc", "orig_acc_pp", , "orig_acc_p" # "adv_acc_pp", "proj_acc_pp", "rec_acc_pp", "adv_acc_p", "proj_acc_p", "rec_acc_p""orig_acc_r", # "adv_acc_r","orig_acc_o","adv_acc_o"] norms_names = ["natural_L_cham_norm_orig"] ev_results = ListDict(accuracies_disc.keys() + norms_names) # norms_results = ListDict(norms_names) setups = ListDict(setup.keys()) save_results(setup["results_file"], ev_results + setups) for target in targets_list: setup["target"] = target for victim in victims_list: if victim == setup["target"]: continue setup["victim"] = victim for batch_indx in range(int(setup["batch_size"])): predictions, norms = evaluate_all_shapes_scale( batch_indx=batch_indx, setup=setup, models=models) [setups.append(setup) for ii in range(setup["batch_size"])] # norms_results.remove(norms_results - ListDict(norms)) # norms_results.partial_extend(ListDict(norms)) ev_results.remove(ev_results - ListDict(predictions) - ListDict(norms)) ev_results.partial_extend( ListDict(predictions)).partial_extend(ListDict(norms)) save_results(setup["results_file"], ev_results + setups) save_results(setup["results_file"], ev_results + setups + results) return ev_results
def attack(setup, models, targets_list, victims_list): top_out_dir = osp.join(BASE_DIR, "latent_3d_points", "data") # Top-dir of where point-clouds are stored. top_in_dir = osp.join(BASE_DIR, "latent_3d_points", "data", "shape_net_core_uniform_samples_2048") experiment_name = 'single_class_ae' n_pc_points = 1024 # 2048 # Number of points per model. bneck_size = 128 NB_PER_VICTIM = 25 # nb of point clouds per class # Loss to optimize: 'emd' or 'chamfer' # Bottleneck-AE size ae_loss = 'chamfer' train_params = default_train_params() encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18( n_pc_points, bneck_size) train_dir = create_dir(osp.join(top_out_dir, experiment_name)) conf = Conf(n_input=[n_pc_points, 3], loss=ae_loss, training_epochs=train_params['training_epochs'], batch_size=setup["batch_size"], denoising=train_params['denoising'], learning_rate=train_params['learning_rate'], train_dir=train_dir, hard_bound_mode=setup["hard_bound_mode"], dyn_bound_mode=setup["dyn_bound_mode"], b_infty=setup["b_infty"], b_two=setup["b_two"], u_infty=setup["u_infty"], u_two=setup["u_two"], loss_display_step=train_params['loss_display_step'], saver_step=train_params['saver_step'], z_rotate=train_params['z_rotate'], encoder=encoder, decoder=decoder, encoder_args=enc_args, decoder_args=dec_args) conf.experiment_name = experiment_name conf.held_out_step = 5 # How often to evaluate/print out loss on # held_out data (if they are provided in ae.train() ). # conf.save(osp.join(train_dir, 'configuration')) is_training = False with tf.Graph().as_default(): # with tf.device('/gpu:'+str(GPU_INDEX)): # print("3333333333333333333") load_pre_trained_ae = True restore_epoch = 500 if load_pre_trained_ae: # conf = Conf.load(train_dir + '/configuration') # reset_tf_graph() ae = PointNetAutoEncoderWithClassifier(conf.experiment_name, conf) ae.restore_model(conf.train_dir, epoch=restore_epoch, verbose=True) # pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT) ae.models = models is_training_pl = tf.placeholder(tf.bool, shape=()) # is_projection = tf.placeholder(tf.bool, shape=()) # pert=tf.get_variable(name='pert',shape=[BATCH_SIZE,NUM_POINT,3],initializer=tf.truncated_normal_initializer(stddev=0.01)) target = tf.placeholder(tf.int32, shape=(None)) victim_label = tf.placeholder(tf.int32, shape=(None)) pert = ae.pert_ pointclouds_pl = ae.x pointclouds_input = ae.x_h # with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): if setup["network"] == "PN": with tf.variable_scope(tf.get_variable_scope(), reuse=False): early_pred, end_points = ae.get_model_w_ae( pointclouds_input, is_training_pl) with tf.variable_scope("QQ", reuse=False): late_pred, end_points_late = ae.get_model_w_ae( ae.x_reconstr, is_training_pl) elif setup["network"] == "PN1": with tf.variable_scope(tf.get_variable_scope(), reuse=False): early_pred, end_points = ae.get_model_w_ae_p( pointclouds_input, is_training_pl) with tf.variable_scope("QQ", reuse=False): late_pred, end_points_late = ae.get_model_w_ae_p( ae.x_reconstr, is_training_pl) elif setup["network"] == "PN2": with tf.variable_scope(tf.get_variable_scope(), reuse=False): early_pred, end_points = ae.get_model_w_ae_pp( pointclouds_input, is_training_pl) with tf.variable_scope("QQ", reuse=False): late_pred, end_points_late = ae.get_model_w_ae_pp( ae.x_reconstr, is_training_pl) elif setup["network"] == "GCN": with tf.variable_scope(tf.get_variable_scope(), reuse=False): early_pred, end_points = ae.get_model_w_ae_gcn( pointclouds_input, is_training_pl) with tf.variable_scope("QQ", reuse=False): late_pred, end_points_late = ae.get_model_w_ae_gcn( ae.x_reconstr, is_training_pl) else: print("network not known") #adv loss targeted /relativistic targeted / untargeted if setup["evaluation_mode"] == 0: early_adv_loss = ae.get_adv_loss(early_pred, target) elif setup["evaluation_mode"] == 1: early_adv_loss = early_adv_loss = ae.get_untargeted_adv_loss( early_pred, victim_label, KAPPA) dyn_target = tf.placeholder(tf.int32, shape=(None)) # late_adv_loss = ae.get_adv_loss_batch(late_pred, dyn_target) late_adv_loss = ae.get_untargeted_adv_loss(late_pred, victim_label, KAPPA_AE) # nat_norm = tf.sqrt(tf.reduce_sum( # tf.square(ae.x_reconstr - ae.x_h), [1, 2])) nat_norm = 1000 * ae.chamfer_distance(ae.x_reconstr, ae.x_h) #perturbation l2 constraint pert_norm = tf.sqrt(tf.reduce_sum(tf.square(pert), [1, 2])) #perturbation l1 constraint # pert_norm = tf.reduce_sum(tf.abs(pert), [1, 2]) #perturbation l_infty constraint pert_bound = tf.norm(tf.nn.relu(pert - S_INFTY), ord=1, axis=(1, 2)) pert_cham = 1000 * ae.chamfer_distance(pointclouds_input, pointclouds_pl) pert_emd = ae.emd_distance(pointclouds_input, pointclouds_pl) dist_weight = tf.placeholder(shape=[BATCH_SIZE], dtype=tf.float32) nat_weight = tf.placeholder(shape=[BATCH_SIZE], dtype=tf.float32) cham_weight = tf.placeholder(shape=[BATCH_SIZE], dtype=tf.float32) emd_weight = tf.placeholder(shape=[BATCH_SIZE], dtype=tf.float32) infty_weight = tf.placeholder(shape=[BATCH_SIZE], dtype=tf.float32) lr_attack = tf.placeholder(dtype=tf.float32) attack_optimizer = tf.train.AdamOptimizer(lr_attack) l_2_loss = tf.reduce_mean(tf.multiply(dist_weight, pert_norm)) l_cham_loss = tf.reduce_mean(tf.multiply(cham_weight, pert_cham)) l_emd_loss = tf.reduce_mean(tf.multiply(emd_weight, pert_emd)) nat_loss = tf.reduce_mean(tf.multiply(nat_weight, nat_norm)) l_infty_loss = tf.reduce_mean(tf.multiply(infty_weight, pert_bound)) adv_loss = (1 - GAMMA) * early_adv_loss + (GAMMA) * late_adv_loss distance_loss = l_2_loss + nat_loss + l_infty_loss + l_cham_loss + l_emd_loss total_loss = adv_loss + distance_loss attack_op = attack_optimizer.minimize(total_loss, var_list=[ae.pert]) vl = tf.global_variables() vl = [x for x in vl if "single_class_ae" not in x.name] vl_1 = [x for x in vl if "QQ" not in x.name] # vl_2 = [x for x in vl if "PP" not in x.name] vl_2 = {x.name.replace("QQ/", "").replace(":0", ""): x for x in vl} # vl_2 = [x for x in vl if "Classifier_1/" in x.name] # vl = [x for x in vl if "single_class_ae" not in x.name] # print(20*"#", vl_1) # print(20*"#", vl_2) # saver = tf.train.Saver( # {x.name.replace("PP/", "").replace("QQ/", ""): x for x in vl}) # saver = tf.train.Saver(vl) saver_1 = tf.train.Saver(vl_1) saver_2 = tf.train.Saver(vl_2) # Create a session config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True #config.log_device_placement = True # sess = tf.Session(config=config) sess = ae.sess sess.run(tf.global_variables_initializer()) ae.restore_model(conf.train_dir, epoch=restore_epoch, verbose=True) ops = { "ae": ae, 'pointclouds_pl': pointclouds_pl, # 'labels_pl': labels_pl, 'is_training_pl': is_training_pl, 'pointclouds_input': pointclouds_input, 'dist_weight': dist_weight, "nat_weight": nat_weight, "infty_weight": infty_weight, "target": target, "victim_label": victim_label, "cham_weight": cham_weight, "emd_weight": emd_weight, 'pert': ae.pert, "dyn_target": dyn_target, # 'pre_max':end_points['pre_max'], # 'post_max':end_points['post_max'], 'early_pred': early_pred, "late_pred": late_pred, 'early_adv_loss': early_adv_loss, 'adv_loss': adv_loss, # "late_adv_loss": late_adv_loss, 'pert_norm': pert_norm, 'nat_norm': nat_norm, "pert_bound": pert_bound, "bound_ball_infty": ae.bound_ball_infty, "bound_ball_two": ae.bound_ball_two, "pert_cham": pert_cham, "pert_emd": pert_emd, 'total_loss': total_loss, 'lr_attack': lr_attack, "x_m": ae.x_reconstr, 'attack_op': attack_op } # print_tensors_in_checkpoint_file( # file_name=MODEL_PATH, tensor_name='beta1_power', all_tensors=True) # saver.restore(sess, MODEL_PATH) saver_1.restore(sess, models["test_path"]) saver_2.restore(sess, models["test_path"]) print('model restored!') norms_names = [ "L_2_norm_adv", "L_infty_norm_adv", "L_cham_norm_adv", "L_emd_norm_adv", "natural_L_cham_norm_adv" ] # the class index of selected 10 largest classed in ModelNet40 results = ListDict(norms_names) setups = ListDict(setup.keys()) save_results(setup["save_file"], results + setups) for target in targets_list: setup["target"] = target for victim in victims_list: if victim == setup["target"]: continue setup["victim"] = victim attacked_data = attacked_data_all[ victim] #attacked_data shape:25*1024*3 for j in range(NB_PER_VICTIM // BATCH_SIZE): norms, img = attack_one_batch( sess, ops, attacked_data[j * BATCH_SIZE:(j + 1) * BATCH_SIZE], setup) np.save( os.path.join( '.', DUMP_DIR, '{}_{}_{}_adv.npy'.format(victim, setup["target"], j)), img) [setups.append(setup) for ii in range(setup["batch_size"])] results.extend(ListDict(norms)) # compiled_results.chek_error() save_results(setup["save_file"], results + setups) # np.save(os.path.join('.',DUMP_DIR,'{}_{}_{}_mxadv.npy' .format(victim,setup["target"],j)),img) np.save( os.path.join( '.', DUMP_DIR, '{}_{}_{}_orig.npy'.format(victim, setup["target"], j)), attacked_data[j * BATCH_SIZE:(j + 1) * BATCH_SIZE] ) #dump originial example for comparison #joblib.dump(dist_list,os.path.join('.',DUMP_DIR,'dist_{}.z' .format(setup["target"])))#log distance information for performation evaluation save_results(setup["save_file"], results + setups) return results