Esempio n. 1
0
model = Vgg16()
k=6

#learning_rate = 1e-4
#optimizer = torch.optim.Adam(x_adv, lr=learning_rate)

loss = None
x_adv = image
x_adv.retain_grad()

for i in range(1000):
    preds = model(x_adv)
    internal_logits = preds[-2]
    final_logits = preds[-1]
    label = np.argmax(variable_to_numpy(final_logits))
    loss = internal_logits.std()
    
    # save image
    x_adv_np = x_adv.cpu().detach().numpy()
    x_adv_np = np.squeeze(x_adv_np)
    x_adv_np = np.transpose(x_adv_np, (1, 2, 0))
    x_adv_np = (x_adv_np * 255).astype(np.uint8)

    if (i % 50 == 0):
        Image.fromarray(x_adv_np).save('./out/car_%d.jpg' % i)
        google_label = detect_label('./out/car_%d.jpg' % i)
        print(i, variable_to_numpy(loss), imagenet_dict[label])
        print(google_label)

    loss.backward(retain_graph=True)
for layer_idx, (intermediate_logit_var,
                intermediate_logit_adv_var) in enumerate(
                    tqdm(zip(internal_logits_var, internal_logits_adv_var))):
    intermediate_features = intermediate_logit_var[0].detach().cpu().numpy()
    intermediate_adv_features = intermediate_logit_adv_var[0].detach().cpu(
    ).numpy()
    visualize_features_compare(
        intermediate_features,
        intermediate_adv_features,
        output_dir='results',
        file_prefix='compare_{0:02d}_'.format(layer_idx),
        data_format='channels_first',
        only_first_channel=True)

adv_np = variable_to_numpy(adv)

image_pil = Image.fromarray(
    np.transpose((adv_np * 255).astype(np.uint8), (1, 2, 0)))
image_pil.save(os.path.join("results", 'adv_at_{0:02d}.png'.format(10)))

linf = int(np.max(abs(image_np - adv_np)) * 255)
print('linf: ', linf)
l1 = np.mean(abs(image_np - adv_np)) * 255
print('l1: ', l1)
l2 = np.sqrt(
    np.mean(
        np.multiply((image_np * 255 - adv_np * 255),
                    (image_np * 255 - adv_np * 255))))
print('l2: ', l2)
print(" ")