Exemple #1
0
def adv_train(x, y):
    n_adv = int(hps.ae_frac * hps.batch_size)
    if n_adv == 0:
        return x
    else:
        adv_inputs = ae.pgd_attack(x[:n_adv], y[:n_adv], cleverhans_model, hps.p,
                                   eps_dict[hps.p][hps.dataset], hps.pgd_n_iter)
        return tf.concat([adv_inputs, x[n_adv:]], axis=0)
Exemple #2
0
                                                    name='train_step')

    # Separate forward pass graph for Cleverhans wrapper (for PGD attack) placed on the last GPU
    logits_all_gpus = forward_pass_cleverhans(x_tf)

    # Model saver
    saver = tf.train.Saver()
    # GPU settings
    gpu_options = tf.GPUOptions(visible_device_list=str(hps.gpus)[1:-1],
                                per_process_gpu_memory_fraction=hps.gpu_memory)
    config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)

with tf.Session(graph=graph, config=config) as sess:
    with graph.as_default(), tf.device('/gpu:0'):
        pgd_ae_tensor = ae.pgd_attack(x_tf, y_in, cleverhans_model, hps.p,
                                      eps_dict[hps.p][hps.dataset],
                                      hps.pgd_n_iter)

    sess.run(tf.global_variables_initializer())  # run 'init' op
    epoch_start, epoch_end = 0, hps.n_epochs

    log.add('Session started with hyperparameters: {} \n'.format(hps_str))
    time_start = time.time()

    # for epoch in range(epoch_start, epoch_end):
    for epoch in range(1, epoch_end + 1):
        epoch_start_reduced_lr = 0.9
        lr_actual = hps.lr / 10 if epoch >= epoch_start_reduced_lr * hps.n_epochs else hps.lr

        frac_reg = min(epoch / 10.0,
                       1.0)  # from 0 to 1 linearly over the first 10 epochs
                                per_process_gpu_memory_fraction=hps.gpu_memory)
    config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)

# ---------  Pytorch part for the Kolter-Wong model ----------
device = torch.device('cuda:' + str(hps.gpus[-1]))
torch.cuda.set_device(hps.gpus[-1])

model_torch = kolter_wong.models.select_model(hps.nn_type, hps.n_in,
                                              hps.n_out).to(device)
for var in model_torch.parameters():
    var.requires_grad = False
# ----------      end      ----------

with tf.Session(graph=graph, config=config) as sess:
    with graph.as_default(), tf.device('/gpu:0'):
        pgd_ae_tensor = ae.pgd_attack(x_tf, y_in, cleverhans_model, hps.p, eps,
                                      hps.pgd_n_iter)
    load_model(
        sess,
        hps)  # load the weights from hps.model_path to the current TF model

    time_start = time.time()
    # First on the full test set
    clean_inputs_all, clean_labels_all = x_test, y_test

    logits_val = sess.run(logits,
                          feed_dict={
                              x_in: clean_inputs_all,
                              is_train: False
                          })
    correctly_classified = np.argmax(clean_labels_all,
                                     axis=1) == np.argmax(logits_val, axis=1)