Exemplo n.º 1
0
def load(model_path, z_rotate, num_points, point_dimension=3):
	model_dir = osp.dirname(model_path)
	model_epoch = int(osp.basename(model_path).split('-')[1])
	experiment_name = osp.basename(osp.dirname(model_path)).split('train_')[1] #'single_class_ae_plane_chamfer_z_rotate'                         # Number of points per model.
	bneck_size = 128                                # Bottleneck-AE size
	ae_loss = 'chamfer'                             # Loss to optimize: 'emd' or 'chamfer'
	class_name = "airplane"
	syn_id = snc_category_to_synth_id()[class_name]
	class_dir = osp.join(top_in_dir , syn_id)    # e.g. /home/yz6/code/latent_3d_points/data/shape_net_core_uniform_samples_2048/02691156

	train_dir = create_dir(osp.join(top_out_dir, experiment_name))
	train_params = default_train_params()
	encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(num_points, bneck_size, point_dimension=point_dimension)


	conf = Conf(n_input = [num_points, point_dimension],
	            loss = ae_loss,
	            training_epochs = train_params['training_epochs'],
	            batch_size = train_params['batch_size'],
	            denoising = train_params['denoising'],
	            learning_rate = train_params['learning_rate'],
	            loss_display_step = train_params['loss_display_step'],
	            saver_step = train_params['saver_step'],
	            z_rotate = z_rotate == 'True',
	            train_dir = train_dir,
	            encoder = encoder,
	            decoder = decoder,
	            encoder_args = enc_args,
	            decoder_args = dec_args,
	            experiment_name = experiment_name,
	            allow_gpu_growth = True
	           )
	# pdb.set_trace()
	reset_tf_graph()
	ae = PointNetAutoEncoder(conf.experiment_name, conf)
	ae.restore_model(model_dir, model_epoch)
	return ae, conf
#     'learning_rate': 0.0005
#
#     'z_rotate': False      (# randomly rotate models of each batch)
#
#     'loss_display_step': 1 (# display loss at end of these many epochs)
#     'saver_step': 10       (# over how many epochs to save neural-network)

# In[6]:

train_params = default_train_params()

# In[7]:

encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(
    n_pc_points, bneck_size)
train_dir = create_dir(osp.join(top_out_dir, experiment_name))

# In[8]:

conf = Conf(n_input=[n_pc_points, 3],
            loss=ae_loss,
            training_epochs=train_params['training_epochs'],
            batch_size=train_params['batch_size'],
            denoising=train_params['denoising'],
            learning_rate=train_params['learning_rate'],
            train_dir=train_dir,
            loss_display_step=train_params['loss_display_step'],
            saver_step=train_params['saver_step'],
            z_rotate=train_params['z_rotate'],
            encoder=encoder,
            decoder=decoder,
Exemplo n.º 3
0


top_out_dir = '../data/'                        # Use to write Neural-Net check-points etc.
top_in_dir = sys.argv[1] #'/home/yz6/data/SSE_CARS_DATA/' # Top-dir of where point-clouds are stored.
experiment_name = sys.argv[2]
n_pc_points = int(sys.argv[3]) #600  # Number of points per model.     
ae_loss = sys.argv[4] #'chamfer'      
z_rotate = sys.argv[5] # 'True' or 'False'
fixed_points =  'False'


bneck_size = 128                  # Bottleneck-AE size
# point cloud instance
train_pc, val_pc, test_pc = load_all_point_clouds_under_folder(top_in_dir, n_threads=2, file_ending='.obj', verbose=True, fixed_points=fixed_points == 'True', num_points=n_pc_points)
train_dir = create_dir(osp.join(top_out_dir, 'train_'+experiment_name))
val_dir = create_dir(osp.join(top_out_dir, 'val_'+experiment_name))
test_dir = create_dir(osp.join(top_out_dir, 'test_'+experiment_name))

pickle_data(osp.join(train_dir, 'train_pc.pkl'), train_pc)
pickle_data(osp.join(val_dir, 'val_pc.pkl'), val_pc)
pickle_data(osp.join(test_dir, 'test_pc.pkl'), test_pc)


# dictionary
train_params = default_train_params()
encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(n_pc_points, bneck_size)


conf = Conf(n_input = [n_pc_points, 3],
            loss = ae_loss,
Exemplo n.º 4
0
                                                    train_size=0.8,
                                                    random_state=521)
c1 = collections.Counter(y_train.tolist())
print([(i, float(c1[i]) / len(y_train) * 100.0) for i in c1])
c2 = collections.Counter(y_test.tolist())
print([(i, float(c2[i]) / len(y_test) * 100.0) for i in c2])
X_train = X_train[:, :, :3]
X_test = X_test[:, :, :3]
print("Train shape", X_train.shape)
print("Test shape", X_test.shape)

n_pc_points = 1024  # Number of points per model.
bneck_size = 128  # Bottleneck-AE size
ae_loss = 'emd'  # Loss to optimize: 'emd' or 'chamfer'
experiment_name = 'shapenet_1024_ae_128'
train_dir = create_dir(os.path.join('data/', experiment_name))
train_params = default_train_params(single_class=False)
train_params['training_epochs'] = 5
encoder, decoder, enc_args, dec_args = washington_xyz_rgb(
    n_pc_points, bneck_size)

pcd_dataset = PointCloudDataSet(X_train, labels=y_train, copy=False)

conf = Conf(n_input=[n_pc_points, 3],
            loss=ae_loss,
            training_epochs=train_params['training_epochs'],
            batch_size=train_params['batch_size'],
            denoising=train_params['denoising'],
            learning_rate=train_params['learning_rate'],
            train_dir=train_dir,
            loss_display_step=train_params['loss_display_step'],
Exemplo n.º 5
0
    train_dir=top_out_dir,
    loss_display_step=train_params['loss_display_step'],
    saver_step=train_params['saver_step'],
    z_rotate=train_params['z_rotate'],
    encoder=encoder,
    decoder=decoder,
    encoder_args=enc_args,
    decoder_args=dec_args,
    n_output=[2048, 3])
conf.experiment_name = experiment_name
generator = conditional_missing_points_generator

if save_synthetic_samples:
    synthetic_data_out_dir = osp.join(top_out_dir, 'OUT/synthetic_samples/',
                                      experiment_name)
    create_dir(synthetic_data_out_dir)

if save_gan_model:
    train_dir = osp.join(top_out_dir, 'OUT/raw_gan', experiment_name)
    create_dir(train_dir)

# In[15]:

reset_tf_graph()

if use_wgan:
    lam = 10
    disc_kwargs = {'b_norm': False}
    gan = W_GAN_GP(experiment_name,
                   init_lr,
                   lam,
Exemplo n.º 6
0
def evaluate(setup, results, models, targets_list, victims_list):

    top_out_dir = osp.join(BASE_DIR, "latent_3d_points", "data")
    # print(BASE_DIR)

    # Top-dir of where point-clouds are stored.
    top_in_dir = osp.join(BASE_DIR, "latent_3d_points", "data",
                          "shape_net_core_uniform_samples_2048")

    # experiment_name = 'single_class_ae'
    experiment_name = 'new_ae'

    n_pc_ppoints = 1024  # 2048                # Number of points per model.
    bneck_size = 128  # Bottleneck-AE size
    # Loss to optimize: 'emd' or 'chamfer'             # Bottleneck-AE size
    ae_loss = 'chamfer'
    train_params = default_train_params()
    encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(
        n_pc_ppoints, bneck_size)
    train_dir = create_dir(osp.join(top_out_dir, experiment_name))

    conf = Conf(n_input=[n_pc_ppoints, 3],
                loss=ae_loss,
                training_epochs=train_params['training_epochs'],
                batch_size=train_params['batch_size'],
                denoising=train_params['denoising'],
                learning_rate=train_params['learning_rate'],
                train_dir=train_dir,
                loss_display_step=train_params['loss_display_step'],
                saver_step=train_params['saver_step'],
                z_rotate=train_params['z_rotate'],
                encoder=encoder,
                decoder=decoder,
                encoder_args=enc_args,
                decoder_args=dec_args)
    conf.experiment_name = experiment_name
    conf.held_out_step = 5  # How often to evaluate/print out loss on
    # held_out data (if they are provided in ae.train() ).
    conf.save(osp.join(train_dir, 'configuration'))

    load_pre_trained_ae = True
    restore_epoch = 500
    if load_pre_trained_ae:
        conf = Conf.load(train_dir + '/configuration')
        reset_tf_graph()
        ae = PointNetAutoEncoder(conf.experiment_name, conf)
        ae.restore_model(conf.train_dir, epoch=restore_epoch, verbose=True)
        models["ae"] = ae

    # all_resulting_corrects = []
    # natural_L_2_norm_orig = []
    # natural_L_2_norm_adv = []
    # natural_L_2_norm_nat = []
    # natural_L_infty_norm_orig = []
    # natural_L_infty_norm_adv = []
    # natural_L_infty_norm_nat = []
    # L_2_norm_adv = []
    # L_2_norm_nat = []
    # L_infty_norm_adv = []
    # L_infty_norm_nat = []
    # L_cham_norm_adv = []
    # L_cham_norm_nat = []
    # L_emd_norm_adv = []
    # L_emd_norm_nat = []
    # natural_L_cham_norm_orig = []
    # natural_L_cham_norm_adv = []
    # natural_L_cham_norm_nat = []
    accuracies_disc = {
        "orig_acc": "original accuracy on PointNet ",
        "adv_suc": "natural adverserial sucess rate on PointNet ",
        "adv_acc": "natural adverserial accuracy on PointNet ",
        "proj_acc": "projected accuracy on PointNet ",
        "rec_suc": "defended natural adverserial sucess rate on PointNet ",
        "rec_acc": "reconstructed defense accuracy on PointNet ",
        "orig_acc_pp": "original accuracy on PointNet_++ ",
        "orig_acc_gcn": "original accuracy on DGCN",
        "orig_acc_p": "original accuracy on PointNet_+ ",
        "adv_suc_pp": "natural adverserial sucess rate on PointNet_++ ",
        "adv_suc_gcn": "natural adverserial sucess rate on DGCN",
        "adv_suc_p": "natural adverserial sucess rate on PointNet_+ ",
        "adv_acc_pp": "natural adverserial accuracy on PointNet_++ ",
        "adv_acc_gcn": "natural adverserial accuracy on DGCN",
        "adv_acc_p": "natural adverserial accuracy on PointNet_+ ",
        "proj_acc_pp": "projected accuracy on PointNet_++ ",
        "proj_acc_gcn": "projected accuracy on DGCN",
        "proj_acc_p": "projected accuracy on PointNet_+ ",
        "rec_suc_pp":
        "defended natural adverserial sucess rate on PointNet_++ ",
        "rec_suc_gcn": "defended natural adverserial sucess rate on DGCN",
        "rec_suc_p": "defended natural adverserial sucess rate on PointNet_+ ",
        "rec_acc_pp": "reconstructed defense accuracy on PointNet_++ ",
        "rec_acc_gcn": "reconstructed defense accuracy on DGCN",
        "rec_acc_p": "reconstructed defense accuracy on PointNet_+ ",
        "b_adv_suc": "baseline adverserial sucess rate on PointNet ",
        "b_adv_acc": "baseline adverserial accuracy on PointNet ",
        "b_rec_suc":
        "baseline defended natural adverserial sucess rate on PointNet ",
        "b_rec_acc": "baselin ereconstructed defense accuracy on PointNet ",
        "b_adv_suc_pp": "baseline adverserial sucess rate on PointNet_++ ",
        "b_adv_suc_gcn": "baseline adverserial sucess rate on DGCN",
        "b_adv_suc_p": "baseline adverserial sucess rate on PointNet_+ ",
        "b_adv_acc_pp": "baseline adverserial accuracy on PointNet_++ ",
        "b_adv_acc_gcn": "baseline adverserial accuracy on DGCN",
        "b_adv_acc_p": "baseline adverserial accuracy on PointNet_+ ",
        "b_rec_suc_pp":
        "baseline defended natural adverserial sucess rate on PointNet_++ ",
        "b_rec_suc_gcn":
        "baseline defended natural adverserial sucess rate on DGCN",
        "b_rec_suc_p":
        "baseline defended natural adverserial sucess rate on PointNet_+ ",
        "b_rec_acc_pp":
        "baselin ereconstructed defense accuracy on PointNet_++ ",
        "b_rec_acc_gcn": "baselin ereconstructed defense accuracy on DGCN",
        "b_rec_acc_p":
        "baselin ereconstructed defense accuracy on PointNet_+ ",
        "orig_acc_r": "original accuracy under Random defense",
        "adv_suc_r": "natural adverserial accuracy under Random defense",
        "adv_acc_r": "natural adverserial sucess rate under Random defense",
        "b_adv_suc_r": "baseline  adverserial accuracy under Random defense",
        "b_adv_acc_r":
        "baseline  adverserial sucess rate under Random defense",
        "orig_acc_o": "original accuracy under Outlier defense",
        "adv_suc_o": "natural adverserial accuracy under Outlier defense",
        "adv_acc_o": "natural adverserial sucess rate under Outlier defense",
        "b_adv_suc_o": "baseline  adverserial accuracy under Outlier defense",
        "b_adv_acc_o":
        "baseline  adverserial sucess rate under Outlier defense",
        "orig_acc_bust": "original accuracy under Robust model",
        "adv_acc_bust": "natural adverserial accuracy under Robust model"
    }

    # accuracies_names  = [
    #     "orig_acc", "adv_acc", "proj_acc", "rec_acc", "orig_acc_pp", , "orig_acc_p"
    #     "adv_acc_pp", "proj_acc_pp", "rec_acc_pp", "adv_acc_p", "proj_acc_p", "rec_acc_p""orig_acc_r",
    #     "adv_acc_r","orig_acc_o","adv_acc_o"]
    norms_names = ["natural_L_cham_norm_orig"]
    ev_results = ListDict(accuracies_disc.keys() + norms_names)
    # norms_results = ListDict(norms_names)
    setups = ListDict(setup.keys())
    save_results(setup["results_file"], ev_results + setups)
    for target in targets_list:
        setup["target"] = target
        for victim in victims_list:
            if victim == setup["target"]:
                continue
            setup["victim"] = victim
            for batch_indx in range(int(setup["batch_size"])):
                predictions, norms = evaluate_all_shapes_scale(
                    batch_indx=batch_indx, setup=setup, models=models)
                [setups.append(setup) for ii in range(setup["batch_size"])]
                # norms_results.remove(norms_results - ListDict(norms))
                # norms_results.partial_extend(ListDict(norms))
                ev_results.remove(ev_results - ListDict(predictions) -
                                  ListDict(norms))
                ev_results.partial_extend(
                    ListDict(predictions)).partial_extend(ListDict(norms))
                save_results(setup["results_file"], ev_results + setups)

    save_results(setup["results_file"], ev_results + setups + results)
    return ev_results
Exemplo n.º 7
0
def attack(setup, models, targets_list, victims_list):
    top_out_dir = osp.join(BASE_DIR, "latent_3d_points", "data")

    # Top-dir of where point-clouds are stored.
    top_in_dir = osp.join(BASE_DIR, "latent_3d_points", "data",
                          "shape_net_core_uniform_samples_2048")

    experiment_name = 'single_class_ae'
    n_pc_points = 1024  # 2048                # Number of points per model.
    bneck_size = 128
    NB_PER_VICTIM = 25  # nb of point clouds per class
    # Loss to optimize: 'emd' or 'chamfer'             # Bottleneck-AE size
    ae_loss = 'chamfer'
    train_params = default_train_params()
    encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(
        n_pc_points, bneck_size)
    train_dir = create_dir(osp.join(top_out_dir, experiment_name))
    conf = Conf(n_input=[n_pc_points, 3],
                loss=ae_loss,
                training_epochs=train_params['training_epochs'],
                batch_size=setup["batch_size"],
                denoising=train_params['denoising'],
                learning_rate=train_params['learning_rate'],
                train_dir=train_dir,
                hard_bound_mode=setup["hard_bound_mode"],
                dyn_bound_mode=setup["dyn_bound_mode"],
                b_infty=setup["b_infty"],
                b_two=setup["b_two"],
                u_infty=setup["u_infty"],
                u_two=setup["u_two"],
                loss_display_step=train_params['loss_display_step'],
                saver_step=train_params['saver_step'],
                z_rotate=train_params['z_rotate'],
                encoder=encoder,
                decoder=decoder,
                encoder_args=enc_args,
                decoder_args=dec_args)
    conf.experiment_name = experiment_name
    conf.held_out_step = 5  # How often to evaluate/print out loss on
    # held_out data (if they are provided in ae.train() ).
    # conf.save(osp.join(train_dir, 'configuration'))
    is_training = False
    with tf.Graph().as_default():
        # with tf.device('/gpu:'+str(GPU_INDEX)):

        # print("3333333333333333333")
        load_pre_trained_ae = True
        restore_epoch = 500
        if load_pre_trained_ae:
            # conf = Conf.load(train_dir + '/configuration')
            # reset_tf_graph()
            ae = PointNetAutoEncoderWithClassifier(conf.experiment_name, conf)
            ae.restore_model(conf.train_dir, epoch=restore_epoch, verbose=True)
            # pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
        ae.models = models
        is_training_pl = tf.placeholder(tf.bool, shape=())
        # is_projection = tf.placeholder(tf.bool, shape=())

        # pert=tf.get_variable(name='pert',shape=[BATCH_SIZE,NUM_POINT,3],initializer=tf.truncated_normal_initializer(stddev=0.01))
        target = tf.placeholder(tf.int32, shape=(None))
        victim_label = tf.placeholder(tf.int32, shape=(None))
        pert = ae.pert_
        pointclouds_pl = ae.x
        pointclouds_input = ae.x_h
        # with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
        if setup["network"] == "PN":
            with tf.variable_scope(tf.get_variable_scope(), reuse=False):
                early_pred, end_points = ae.get_model_w_ae(
                    pointclouds_input, is_training_pl)
            with tf.variable_scope("QQ", reuse=False):
                late_pred, end_points_late = ae.get_model_w_ae(
                    ae.x_reconstr, is_training_pl)
        elif setup["network"] == "PN1":
            with tf.variable_scope(tf.get_variable_scope(), reuse=False):
                early_pred, end_points = ae.get_model_w_ae_p(
                    pointclouds_input, is_training_pl)
            with tf.variable_scope("QQ", reuse=False):
                late_pred, end_points_late = ae.get_model_w_ae_p(
                    ae.x_reconstr, is_training_pl)
        elif setup["network"] == "PN2":
            with tf.variable_scope(tf.get_variable_scope(), reuse=False):
                early_pred, end_points = ae.get_model_w_ae_pp(
                    pointclouds_input, is_training_pl)
            with tf.variable_scope("QQ", reuse=False):
                late_pred, end_points_late = ae.get_model_w_ae_pp(
                    ae.x_reconstr, is_training_pl)
        elif setup["network"] == "GCN":
            with tf.variable_scope(tf.get_variable_scope(), reuse=False):
                early_pred, end_points = ae.get_model_w_ae_gcn(
                    pointclouds_input, is_training_pl)
            with tf.variable_scope("QQ", reuse=False):
                late_pred, end_points_late = ae.get_model_w_ae_gcn(
                    ae.x_reconstr, is_training_pl)
        else:
            print("network not known")

        #adv loss targeted /relativistic targeted /  untargeted
        if setup["evaluation_mode"] == 0:
            early_adv_loss = ae.get_adv_loss(early_pred, target)
        elif setup["evaluation_mode"] == 1:
            early_adv_loss = early_adv_loss = ae.get_untargeted_adv_loss(
                early_pred, victim_label, KAPPA)

        dyn_target = tf.placeholder(tf.int32, shape=(None))
        # late_adv_loss = ae.get_adv_loss_batch(late_pred, dyn_target)
        late_adv_loss = ae.get_untargeted_adv_loss(late_pred, victim_label,
                                                   KAPPA_AE)
        # nat_norm = tf.sqrt(tf.reduce_sum(
        #     tf.square(ae.x_reconstr - ae.x_h), [1, 2]))
        nat_norm = 1000 * ae.chamfer_distance(ae.x_reconstr, ae.x_h)

        #perturbation l2 constraint
        pert_norm = tf.sqrt(tf.reduce_sum(tf.square(pert), [1, 2]))
        #perturbation l1 constraint
        # pert_norm = tf.reduce_sum(tf.abs(pert), [1, 2])
        #perturbation l_infty constraint
        pert_bound = tf.norm(tf.nn.relu(pert - S_INFTY), ord=1, axis=(1, 2))
        pert_cham = 1000 * ae.chamfer_distance(pointclouds_input,
                                               pointclouds_pl)
        pert_emd = ae.emd_distance(pointclouds_input, pointclouds_pl)

        dist_weight = tf.placeholder(shape=[BATCH_SIZE], dtype=tf.float32)
        nat_weight = tf.placeholder(shape=[BATCH_SIZE], dtype=tf.float32)
        cham_weight = tf.placeholder(shape=[BATCH_SIZE], dtype=tf.float32)
        emd_weight = tf.placeholder(shape=[BATCH_SIZE], dtype=tf.float32)
        infty_weight = tf.placeholder(shape=[BATCH_SIZE], dtype=tf.float32)
        lr_attack = tf.placeholder(dtype=tf.float32)
        attack_optimizer = tf.train.AdamOptimizer(lr_attack)
        l_2_loss = tf.reduce_mean(tf.multiply(dist_weight, pert_norm))
        l_cham_loss = tf.reduce_mean(tf.multiply(cham_weight, pert_cham))
        l_emd_loss = tf.reduce_mean(tf.multiply(emd_weight, pert_emd))

        nat_loss = tf.reduce_mean(tf.multiply(nat_weight, nat_norm))

        l_infty_loss = tf.reduce_mean(tf.multiply(infty_weight, pert_bound))
        adv_loss = (1 - GAMMA) * early_adv_loss + (GAMMA) * late_adv_loss
        distance_loss = l_2_loss + nat_loss + l_infty_loss + l_cham_loss + l_emd_loss
        total_loss = adv_loss + distance_loss
        attack_op = attack_optimizer.minimize(total_loss, var_list=[ae.pert])

        vl = tf.global_variables()
        vl = [x for x in vl if "single_class_ae" not in x.name]
        vl_1 = [x for x in vl if "QQ" not in x.name]
        # vl_2 = [x for x in vl if "PP" not in x.name]
        vl_2 = {x.name.replace("QQ/", "").replace(":0", ""): x for x in vl}
        # vl_2 = [x for x in vl if "Classifier_1/" in x.name]

        # vl = [x for x in vl if  "single_class_ae" not in x.name]
        # print(20*"#", vl_1)
        # print(20*"#", vl_2)
        # saver = tf.train.Saver(
        #     {x.name.replace("PP/", "").replace("QQ/", ""): x for x in vl})
        # saver = tf.train.Saver(vl)
        saver_1 = tf.train.Saver(vl_1)
        saver_2 = tf.train.Saver(vl_2)

        # Create a session
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        #config.log_device_placement = True
        # sess = tf.Session(config=config)
        sess = ae.sess
        sess.run(tf.global_variables_initializer())
        ae.restore_model(conf.train_dir, epoch=restore_epoch, verbose=True)

        ops = {
            "ae": ae,
            'pointclouds_pl': pointclouds_pl,
            #    'labels_pl': labels_pl,
            'is_training_pl': is_training_pl,
            'pointclouds_input': pointclouds_input,
            'dist_weight': dist_weight,
            "nat_weight": nat_weight,
            "infty_weight": infty_weight,
            "target": target,
            "victim_label": victim_label,
            "cham_weight": cham_weight,
            "emd_weight": emd_weight,
            'pert': ae.pert,
            "dyn_target": dyn_target,
            #    'pre_max':end_points['pre_max'],
            #    'post_max':end_points['post_max'],
            'early_pred': early_pred,
            "late_pred": late_pred,
            'early_adv_loss': early_adv_loss,
            'adv_loss': adv_loss,
            #    "late_adv_loss": late_adv_loss,
            'pert_norm': pert_norm,
            'nat_norm': nat_norm,
            "pert_bound": pert_bound,
            "bound_ball_infty": ae.bound_ball_infty,
            "bound_ball_two": ae.bound_ball_two,
            "pert_cham": pert_cham,
            "pert_emd": pert_emd,
            'total_loss': total_loss,
            'lr_attack': lr_attack,
            "x_m": ae.x_reconstr,
            'attack_op': attack_op
        }

        # print_tensors_in_checkpoint_file(
        #     file_name=MODEL_PATH, tensor_name='beta1_power', all_tensors=True)
        # saver.restore(sess, MODEL_PATH)
        saver_1.restore(sess, models["test_path"])
        saver_2.restore(sess, models["test_path"])
        print('model restored!')

        norms_names = [
            "L_2_norm_adv", "L_infty_norm_adv", "L_cham_norm_adv",
            "L_emd_norm_adv", "natural_L_cham_norm_adv"
        ]

        # the class index of selected 10 largest classed in ModelNet40
        results = ListDict(norms_names)
        setups = ListDict(setup.keys())
        save_results(setup["save_file"], results + setups)
        for target in targets_list:
            setup["target"] = target
            for victim in victims_list:
                if victim == setup["target"]:
                    continue
                setup["victim"] = victim
                attacked_data = attacked_data_all[
                    victim]  #attacked_data shape:25*1024*3
                for j in range(NB_PER_VICTIM // BATCH_SIZE):
                    norms, img = attack_one_batch(
                        sess, ops,
                        attacked_data[j * BATCH_SIZE:(j + 1) * BATCH_SIZE],
                        setup)
                    np.save(
                        os.path.join(
                            '.', DUMP_DIR,
                            '{}_{}_{}_adv.npy'.format(victim, setup["target"],
                                                      j)), img)
                    [setups.append(setup) for ii in range(setup["batch_size"])]
                    results.extend(ListDict(norms))
                    # compiled_results.chek_error()
                    save_results(setup["save_file"], results + setups)
                    # np.save(os.path.join('.',DUMP_DIR,'{}_{}_{}_mxadv.npy' .format(victim,setup["target"],j)),img)
                    np.save(
                        os.path.join(
                            '.', DUMP_DIR,
                            '{}_{}_{}_orig.npy'.format(victim, setup["target"],
                                                       j)),
                        attacked_data[j * BATCH_SIZE:(j + 1) * BATCH_SIZE]
                    )  #dump originial example for comparison
        #joblib.dump(dist_list,os.path.join('.',DUMP_DIR,'dist_{}.z' .format(setup["target"])))#log distance information for performation evaluation
        save_results(setup["save_file"], results + setups)
        return results
Exemplo n.º 8
0
top_out_dir = '../data/'                        # Use to write Neural-Net check-points etc.
top_in_dir = '../data/shape_net_core_uniform_samples_2048/' # Top-dir of where point-clouds are stored.


model_dir = osp.join(top_out_dir, 'single_class_ae')
experiment_name = 'single_class_ae'
n_pc_points = 2048                              # Number of points per model.
bneck_size = 128                                # Bottleneck-AE size
ae_loss = 'emd'                             # Loss to optimize: 'emd' or 'chamfer'
class_name = "airplane"
syn_id = snc_category_to_synth_id()[class_name]
class_dir = osp.join(top_in_dir , syn_id)    # e.g. /home/yz6/code/latent_3d_points/data/shape_net_core_uniform_samples_2048/02691156
all_pc_data = load_all_point_clouds_under_folder(class_dir, n_threads=8, file_ending='.ply', verbose=True)


train_dir = create_dir(osp.join(top_out_dir, experiment_name))
out_dir = create_dir(osp.join(top_out_dir, "generated_planes"))
train_params = default_train_params()
encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(n_pc_points, bneck_size)

conf = Conf(n_input = [n_pc_points, 3],
            loss = ae_loss,
            training_epochs = train_params['training_epochs'],
            batch_size = train_params['batch_size'],
            denoising = train_params['denoising'],
            learning_rate = train_params['learning_rate'],
            loss_display_step = train_params['loss_display_step'],
            saver_step = train_params['saver_step'],
            z_rotate = train_params['z_rotate'],
            train_dir = train_dir,
            encoder = encoder,
Exemplo n.º 9
0
# print("avg_dist_train: ", avg_dist_train)
# print('nn_dist_train')
# nearest_list, nn_dist_train = get_nn_distance(train_hidden)
# print("nn_dist_train: ", nn_dist_train)

# print("chamfer train nearest average using nearest list")
# avg_nearest_train = get_chamfer_permut(train_reconstr, nearest_list)
# print(avg_nearest_train)
# print('nn_dict')
# dict_code_to_gen, dict_gen_to_code, nn_list, nn_mean = get_nn(train_hidden, test_hidden)
# print("nn_mean: ", nn_mean)
# print("nn_list")
# print(nn_list)

tmp_dir = osp.join('../data', 'tmp')
create_dir(tmp_dir)

for i in range(100):
    np.savetxt(osp.join(tmp_dir, '{0}_generated.csv'.format(i)),
               generated_reconstr[i],
               delimiter=",")
    np.savetxt(osp.join(tmp_dir, '{0}_train_nearest.csv'.format(i)),
               train_feed[train_gen2code[i][0]],
               delimiter=",")
    np.savetxt(osp.join(tmp_dir, '{0}_test_nearest.csv'.format(i)),
               test_feed[test_gen2code[i][0]],
               delimiter=",")
    np.savetxt(osp.join(tmp_dir, '{0}_train_nearest_reconstr.csv'.format(i)),
               train_reconstr[train_gen2code[i][0]],
               delimiter=",")