def demo(path):
    labels = open('./data/labels.txt', 'r').read().split('\n')
    print(labels)
    net.eval()
    img=Image.open(path).convert('RGB')
    img=transform(img)[np.newaxis,:].to(device)
    _,out=net(img).max(1)
    print(labels[out[0]])
else:
    device = 'cpu'

net.eval()
labels = open(os.path.join('./data/synset_words.txt'), 'r').read().split('\n')


def clip_tensor(A, minv, maxv):
    A = torch.max(A, minv * torch.ones(A.shape))
    A = torch.min(A, maxv * torch.ones(A.shape))
    return A


clip = lambda x: clip_tensor(x, 0, 255)

im1 = transform(Image.open('./data/test_im1.jpeg').convert('RGB'))
im2 = transform(Image.open('./data/test_im2.jpg').convert('RGB'))
im3 = transform(Image.open('./data/test_im3.jpg').convert('RGB'))
im4 = transform(Image.open('./data/test_im4.jpg').convert('RGB'))

imgs = torch.Tensor(4, 3, 224, 224)
imgs[0] = im1
imgs[1] = im2
imgs[2] = im3
imgs[3] = im4

tf = transforms.Compose([
    transforms.Normalize(mean=[0, 0, 0], std=map(lambda x: 1 / x, mean)),
    transforms.Normalize(mean=map(lambda x: -x, std), std=[1, 1, 1]),
    transforms.Lambda(clip),
    transforms.ToPILImage(),
def generate(path, dataset, testset, net, delta=0.2, max_iter_uni=np.inf, xi=10, p=np.inf, num_classes=10, overshoot=0.2, max_iter_df=20):
    '''

    :param path:
    :param dataset:
    :param testset:
    :param net:
    :param delta:
    :param max_iter_uni:
    :param p:
    :param num_class:
    :param overshoot:
    :param max_iter_df:
    :return:
    '''
    net.eval()
    if torch.cuda.is_available():
        device = 'cuda'
        net.cuda()
        cudnn.benchmark = True
    else:
        device = 'cpu'

    dataset = os.path.join(path, dataset)
    testset = os.path.join(path, testset)
    if not os.path.isfile(dataset):
        print("Trainingdata of UAP does not exist, please check!")
        sys.exit()
    if not os.path.isfile(testset):
        print("Testingdata of UAP does not exist, please check!")
        sys.exit()

    img_trn = []
    with open(dataset, 'r') as f:
        for line in f:
            line = line.rstrip()
            line = line.strip('\n')
            line = line.rstrip()
            words = line.split()
            img_trn.append((words[0], int(words[1])))
    img_tst = []
    with open(testset, 'r') as f:
        for line in f:
            line = line.rstrip()
            line = line.strip('\n')
            line = line.rstrip()
            words = line.split()
            img_tst.append((words[0], int(words[1])))
    num_img_trn = len(img_trn)
    num_img_tst = len(img_tst)
    order = np.arange(num_img_trn)

    v=np.zeros([224,224,3])
    fooling_rate = 0.0
    iter = 0

    # start an epoch
    while fooling_rate < 1-delta and iter < max_iter_uni:
        np.random.shuffle(order)
        print("Starting pass number ", iter)
        for k in order:
            cur_img = Image.open(img_trn[k][0]).convert('RGB')
            cur_img1 = transform(cur_img)[np.newaxis, :].to(device)
            r2 = int(net(cur_img1).max(1)[1])
            torch.cuda.empty_cache()

            per_img = Image.fromarray(cut(cur_img)+v.astype(np.uint8))
            per_img1 = convert(per_img)[np.newaxis, :].to(device)
            r1 = int(net(per_img1).max(1)[1])
            torch.cuda.empty_cache()

            if r1 == r2:
                print(">> k =", np.where(k==order)[0][0], ', pass #', iter, end='      ')
                dr, iter_k, label, k_i, pert_image = deepfool(per_img1[0], net, num_classes=num_classes, overshoot=overshoot, max_iter=max_iter_df)

                if iter_k < max_iter_df-1:

                    v[:, :, 0] += dr[0, 0, :, :]
                    v[:, :, 1] += dr[0, 1, :, :]
                    v[:, :, 2] += dr[0, 2, :, :]
                    v = project_lp(v, xi, p)

        iter = iter + 1

        with torch.no_grad():
            # Compute fooling_rate
            est_labels_orig = torch.tensor(np.zeros(0, dtype=np.int64))
            est_labels_pert = torch.tensor(np.zeros(0, dtype=np.int64))

            batch = 32

            test_data_orig = MyDataset(txt=testset, transform=transform)
            test_loader_orig = DataLoader(dataset=test_data_orig, batch_size=batch, pin_memory=True)
            test_data_pert = MyDataset(txt=testset, pert=v, transform=transform)
            test_loader_pert = DataLoader(dataset=test_data_pert, batch_size=batch, pin_memory=True)

            for batch_idx, (inputs, _) in enumerate(test_loader_orig):
                inputs = inputs.to(device)
                outputs = net(inputs)
                _, predicted = outputs.max(1)
                est_labels_orig = torch.cat((est_labels_orig, predicted.cpu()))
            torch.cuda.empty_cache()

            for batch_idx, (inputs, _) in enumerate(test_loader_pert):
                inputs = inputs.to(device)
                outputs = net(inputs)
                _, predicted = outputs.max(1)
                est_labels_pert = torch.cat((est_labels_pert, predicted.cpu()))
            torch.cuda.empty_cache()

            fooling_rate = float(torch.sum(est_labels_orig != est_labels_pert))/float(num_img_tst)
            print("FOOLING RATE: ", fooling_rate)
            np.save('v'+str(iter)+'_'+str(round(fooling_rate, 4)), v)

    return v
Exemple #4
0
    v = generate(args.PATH, 'dataset4u-trn.txt', 'dataset4u-val.txt', net, max_iter_uni=10, delta=0.1, p=np.inf, num_classes=25, overshoot=0.1, max_iter_df=500, xi=args.xi, batch_size=args.batch_size)
    # Saving the universal perturbation
    np.save('./data/universal.npy', v)
else:
    print('   Found a pre-computed universal perturbation at', file_perturbation)
    v = np.load(file_perturbation)


testimg = "./data/test_im4.jpg"
print('>> Testing the universal perturbation on', testimg)
labels = open('./data/labels.txt', 'r').read().split('\n')
testimgToInput = Image.open(testimg).convert('RGB')
pertimgToInput = np.clip(cut(testimgToInput)+v, 0, 255)
pertimg = Image.fromarray(pertimgToInput.astype(np.uint8))

img_orig = transform(testimgToInput)
inputs_orig = img_orig[np.newaxis, :].to(device)
outputs_orig = net(inputs_orig)
_, predicted_orig = outputs_orig.max(1)
label_orig = labels[predicted_orig[0]]

img_pert = transform(pertimg)
inputs_pert = img_pert[np.newaxis, :].to(device)
outputs_pert = net(inputs_pert)
_, predicted_pert = outputs_pert.max(1)
label_pert = labels[predicted_pert[0]]

# Show original and perturbed image
plt.figure()
plt.subplot(1, 2, 1)
plt.imshow(cut(testimgToInput), interpolation=None)
import torchvision.transforms as transforms
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision.models as models
from PIL import Image
from deepfool import deepfool
import os
from transform_file import transform

net = models.resnet34(pretrained=True)
net.eval()

im_orig = Image.open('./data/test_im2.jpg')

im = transform(im_orig)
r, loop_i, label_orig, label_pert, pert_image = deepfool(im, net, 10, 0.2, 50)

labels = open(os.path.join('./data/synset_words.txt'), 'r').read().split('\n')

str_label_orig = labels[np.int(label_orig)].split(',')[0]
str_label_pert = labels[np.int(label_pert)].split(',')[0]

print("Original label = ", str_label_orig)
print("Perturbed label = ", str_label_pert)


def clip_tensor(A, minv, maxv):
    A = torch.max(A, minv * torch.ones(A.shape))
    A = torch.min(A, maxv * torch.ones(A.shape))
    return A