# pass the generated_img to the encoder, and use the output compute loss
    generated_img_adv = tf.reverse(generated_img_adv,
                                   axis=[-1])  # switch RGB to BGR
    adv_img_bgr = generated_img_adv
    generated_img_adv = stn.encoder.preprocess(
        generated_img_adv)  # preprocess image
    enc_gen_adv, enc_gen_layers_adv = stn.encoder.encode(generated_img_adv)

    generated_img = tf.reverse(generated_img, axis=[-1])  # switch RGB to BGR
    img_bgr = generated_img
    generated_img = stn.encoder.preprocess(generated_img)  # preprocess image
    enc_gen, enc_gen_layers = stn.encoder.encode(generated_img)

    if data_set == "cifar10":
        classifier = Model("eval", raw_cifar.train_images)
        classifier._build_model(adv_img, label, reuse=False)
        adv_loss = -classifier.relaxed_y_xent
        adv_acc = classifier.accuracy
        classifier._build_model(img, label, reuse=True)
        normal_loss = -classifier.relaxed_y_xent
        norm_acc = classifier.accuracy
    elif data_set == "imagenet":
        classifier = build_imagenet_model(adv_img_bgr, label)
        losses = tf.stack([
            classifier.rev_xent, classifier.poss_loss, classifier.xent,
            classifier.xent_filter
        ])
        adv_loss = -tf.reduce_sum(losses[loss_choice])
        adv_acc = classifier.accuracy
        acc_y = classifier.acc_y
        stn = StyleTransferNet_adv(encoder_path)

        op_d = {"pgd": {}}

        def switch(name):
            assert name in op_d
            _d = op_d[name]
            for k, v in _d.items():
                code = "global %s" % k
                code += "\r\n" + "%s=_d[\"%s\"]" % (k, k)
                exec(code)

        pgd_attack = None
        # pass content and style to the stn, getting the generated_img
        if data_set == "cifar10":
            classifier = Model("eval", raw_cifar.train_images)
        else:
            pass
        first_use = True

        update_mean = True
        update_sigma = True

        report_batch = 50
        aname1 = "nat"
        os.makedirs("motiveimg1", exist_ok=True)

        for batch in range(0, motive_batch):
            step = batch
            x_batch = x_batch_s[batch * 8:(batch + 1) * 8]
            out_img = []
Example #3
0
# Setting up training parameters
max_num_training_steps = config['max_num_training_steps']
num_output_steps = config['num_output_steps']
num_summary_steps = config['num_summary_steps']
num_checkpoint_steps = config['num_checkpoint_steps']
step_size_schedule = config['step_size_schedule']
weight_decay = config['weight_decay']
data_path = config['data_path']
momentum = config['momentum']
batch_size = config['training_batch_size']

# Setting up the data and the model
raw_cifar = cifar10_input.CIFAR10Data(data_path)
global_step = tf.contrib.framework.get_or_create_global_step()
model = Model(mode='train', data=raw_cifar.train_images)
model._build_model_easy()

# Setting up the optimizer
boundaries = [int(sss[0]) for sss in step_size_schedule]
boundaries = boundaries[1:]
values = [sss[1] for sss in step_size_schedule]
learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32),
                                            boundaries, values)
total_loss = model.mean_xent + weight_decay * model.weight_decay_loss
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
    train_step = tf.train.MomentumOptimizer(learning_rate, momentum).minimize(
        total_loss, global_step=global_step)

# Set up adversary
Example #4
0
config_path = "config.json"

with open(config_path) as config_file:
    config = json.load(config_file)
num_eval_examples = config['num_eval_examples']
eval_batch_size = config['eval_batch_size']
eval_on_cpu = config['eval_on_cpu']
data_path = config['data_path']
opt_method = config["method"]

# Set upd the data, hyperparameters, and the model
cifar = cifar10_input.CIFAR10Data(data_path)

if eval_on_cpu:
    with tf.device("/cpu:0"):
        model = Model(mode='eval', data=cifar.train_images)
        attack = LinfPGDAttack(model, config['epsilon'], config['num_steps'],
                               config['step_size'], config['random_start'],
                               config['loss_func'])
else:
    model = Model(mode='eval', data=cifar.train_images)
    model._build_model_easy()
    attack = LinfPGDAttack(model, config['epsilon'], config['num_steps'],
                           config['step_size'], config['random_start'],
                           config['loss_func'])

global_step = tf.contrib.framework.get_or_create_global_step()

model_dir = "./"
ckpt_file = "pretrained.ckpt"
# Setting up the Tensorboard and checkpoint outputs