test_model = torchvision.models.densenet121(
    pretrained='imagenet').cuda().eval()

success_attacks = 0
for idx, temp_image_name in enumerate(tqdm(images_name)):
    total_samples = len(images_name)
    ori_img_path = os.path.join(dataset_dir_ori, temp_image_name)
    adv_img_path = os.path.join(dataset_dir_adv, temp_image_name)

    image_ori_np = load_image(data_format='channels_first',
                              shape=(224, 224),
                              bounds=(0, 1),
                              abs_path=True,
                              fpath=ori_img_path)
    image_ori_var = numpy_to_variable(image_ori_np)
    gt_out = test_model(image_ori_var).detach().cpu().numpy()
    gt_label = np.argmax(gt_out)

    image_adv_np = load_image(data_format='channels_first',
                              shape=(224, 224),
                              bounds=(0, 1),
                              abs_path=True,
                              fpath=adv_img_path)
    image_adv_var = numpy_to_variable(image_adv_np)
    pd_out = test_model(image_adv_var).detach().cpu().numpy()
    pd_label = np.argmax(pd_out)

    linf = int(np.max(abs(image_ori_np - image_adv_np)) * 255)
    print('linf: ', linf)
with open('labels.txt', 'r') as inf:
    imagenet_dict = eval(inf.read())

model = Vgg16()
internal = [i for i in range(29)]

attack = DispersionAttack_opt(model, epsilon=16. / 255, steps=200)

image_np = load_image(data_format='channels_first', fname='example.png')

image_pil = Image.fromarray(
    np.transpose((image_np * 255).astype(np.uint8), (1, 2, 0)))
image_pil.save(os.path.join("results", 'original.png'))

image_var = numpy_to_variable(image_np)

internal_logits_var, pred_nat_var = model.prediction(image_var,
                                                     internal=internal)

pred_nat = pred_nat_var.detach().cpu().numpy()
gt_label = np.argmax(pred_nat)
print(gt_label)
pred_cls = imagenet_dict[gt_label]
print(pred_cls)
'''
for layer_idx, intermediate_logit_var in enumerate(tqdm(internal_logits_var)):
    intermediate_features = intermediate_logit_var[0].detach().cpu().numpy()
    visualize_features(intermediate_features, output_dir='results', file_prefix='ori_{0:02d}_'.format(layer_idx), data_format='channels_first', only_first_channel=True)
'''
Beispiel #3
0
cuda = torch.device('cuda:0')

# x_image = Variable(torch.zeros(1, 3, 244, 244), requires_grad=True)
'''
image, label = foolbox.utils.imagenet_example(data_format='channels_first')
origin = np.transpose(image, (1, 2, 0)).astype(np.uint8)
Image.fromarray(origin).save('/home/yunhan/Documents/bh-asia/images/0000.png')
'''

image = cv2.imread("images/origin.jpg")
image = cv2.resize(image, (224, 224))
image = image[..., ::-1]
image = (np.transpose(image, (2, 0, 1))).astype(np.float32)

x_image = numpy_to_variable(image, cuda)

model = Vgg16()

k = 8

pred = model(x_image)[k]
loss = pred.std()
#loss = torch.norm(pred, 2)
loss.backward(retain_graph=True)
grad = x_image.grad.data
print(k, pred.shape, loss)

x_pert = x_image

for i in range(8, 8):