Exemple #1
0
def fgsm_attack_test(model, image, target, device, epsilon=0.01, image_size=(128, 128)):
    # image = image.unsqueeze(0)
    image =image.type(torch.FloatTensor)
    image = image.to(device)
    # steer = model(image)
    # perturbed_image = image.clone()
    # # steer = steer.type(torch.FloatTensor)
    # # if (steer.item() > -0.1):
    # #     target_steer = steer + target
    # # else:
    # #     target_steer = steer - target
    # target_steer = steer - target
    # target_steer = target_steer.to(device)
    # image.requires_grad = True
    # output = model(image)
    # adv_output = output.clone()
    # diff = 0
    # # while abs(diff) < abs(target):
    # for i in range(5):
    #     loss = F.mse_loss(adv_output, target_steer)
    #     model.zero_grad()
    #     loss.backward(retain_graph=True)
    #     image_grad = image.grad.data
    #     perturbed_image = fgsm_attack(perturbed_image, epsilon, image_grad)
    #     adv_output = model(perturbed_image)
    #     diff = abs(adv_output.detach().cpu().numpy() - output.detach().cpu().numpy())
    # # diff, perturbed_image, steer, adv_output = fgsm_attack(model, image, target, device, epsilon=epsilon, image_size=image_size)
    # noise = torch.clamp(perturbed_image - image, 0, 1)
    diff, perturbed_image, steer, adv_output, noise = fgsm_attack(
        model, image, target, device)
    plt = generate_image(image.detach().cpu().numpy().transpose(0, 2, 3, 1)[0, :, :, :], perturbed_image.detach().cpu().numpy().transpose(0, 2, 3, 1)[
                         0, :, :, :], noise.detach().cpu().numpy().transpose(0, 2, 3, 1)[0, :, :, :], steer.detach().cpu().numpy()[0][0], adv_output.detach().cpu().numpy()[0][0], image_size)
    return diff, plt, np.sum(noise.detach().cpu().numpy()), perturbed_image.detach().cpu().numpy()
Exemple #2
0
def fgsm_attack_(model,
                 image,
                 target,
                 device,
                 epsilon=0.01,
                 image_size=(128, 128)):
    # image = image.unsqueeze(0)
    image = image.type(torch.FloatTensor)
    image = image.to(device)
    steer = model(image)
    perturbed_image = image.clone()
    # steer = steer.type(torch.FloatTensor)
    # if (steer.item() > -0.1):
    #     target_steer = steer + target
    # else:
    #     target_steer = steer - target
    target_steer = steer - target
    target_steer = target_steer.to(device)
    image.requires_grad = True
    output = model(image)
    adv_output = output.clone()
    diff = 0
    # while abs(diff) < abs(target):
    for i in range(5):
        loss = F.mse_loss(adv_output, target_steer)
        model.zero_grad()
        loss.backward(retain_graph=True)
        image_grad = image.grad.data
        perturbed_image = fgsm_attack(perturbed_image, epsilon, image_grad)
        adv_output = model(perturbed_image)
        diff = abs(adv_output.detach().cpu().numpy() -
                   output.detach().cpu().numpy())
    # diff, perturbed_image, steer, adv_output = fgsm_attack(model, image, target, device, epsilon=epsilon, image_size=image_size)
    # noise = torch.clamp(perturbed_image - image, 0, 1)
    return perturbed_image
Exemple #3
0
def exp1_fig():
    model = BaseCNN()
    model_name = 'baseline'
    model.load_state_dict(torch.load('baseline.pt'))
    device = torch.device("cuda" if (torch.cuda.is_available()) else "cpu")
    model = model.to(device)
    model.eval()
    target = 0.3
    image = imread(
        'F:\\udacity-data\\testing\\center\\1479425441182877835.jpg')[200:, :]
    image = imresize(image, (128, 128))
    image = image / 255.
    image = torch.from_numpy(image.transpose((2, 0, 1))).unsqueeze(0)
    image =image.type(torch.FloatTensor)
    image = image.to(device)
    output = model(image)
    print(output)

    advGAN_generator = Generator(3, 3, model_name).to(device)
    advGAN_uni_generator = Generator(3, 3, model_name).to(device)
    
    # fgsm

    _, perturbed_image_fgsm, _, adv_output_fgsm, noise_fgsm = fgsm_attack(model, image, target, device)
    print('fgsm', adv_output_fgsm)
    perturbed_image_fgsm = perturbed_image_fgsm.squeeze(0).detach().cpu().numpy().transpose(1, 2, 0)
    noise_fgsm = noise_fgsm.squeeze(0).detach().cpu().numpy().transpose(1, 2, 0)
    perturbed_image_fgsm = draw(perturbed_image_fgsm, adv_output_fgsm.item(), output.item())
    perturbed_image_fgsm = imresize(perturbed_image_fgsm, (128, 128))
    # opt
    perturbed_image_opt, noise_opt, _, adv_output_opt = optimized_attack(
        model, target, image, device)
    perturbed_image_opt = perturbed_image_opt.squeeze(0).detach().cpu().numpy().transpose(1, 2, 0)
    print('opt', adv_output_opt)

    noise_opt = noise_opt.squeeze(0).detach().cpu().numpy().transpose(1, 2, 0)
    perturbed_image_opt = draw(perturbed_image_opt, adv_output_opt.item(), output.item())
    perturbed_image_opt = imresize(perturbed_image_opt, (128, 128))
    # optu
    noise_optu = np.load(model_name + '_universal_attack_noise.npy')
    noise_optu = torch.from_numpy(noise_optu).type(torch.FloatTensor).to(device)
    perturbed_image_optu = image + noise_optu
    perturbed_image_optu = torch.clamp(perturbed_image_optu, 0, 1)
    adv_output_optu = model(perturbed_image_optu)
    print('universal', adv_output_optu)    
    perturbed_image_optu = perturbed_image_optu.squeeze(0).detach().cpu().numpy().transpose(1, 2, 0)
    noise_optu = noise_optu.squeeze(0).detach().cpu().numpy().transpose(1, 2, 0)
    perturbed_image_optu = draw(perturbed_image_optu, adv_output_optu.item(), output.item())
    perturbed_image_optu = imresize(perturbed_image_optu, (128, 128))
    # advGAN
    advGAN_generator.load_state_dict(torch.load(
        './models/' + model_name + '_netG_epoch_60.pth'))
    noise_advGAN = advGAN_generator(image)
    perturbed_image_advGAN = image + torch.clamp(noise_advGAN, -0.3, 0.3)
    perturbed_image_advGAN = torch.clamp(perturbed_image_advGAN, 0, 1)
    adv_output_advGAN = model(perturbed_image_advGAN)
    print('advGAN', adv_output_advGAN)
    perturbed_image_advGAN = perturbed_image_advGAN.squeeze(0).detach().cpu().numpy().transpose(1, 2, 0)
    noise_advGAN = noise_advGAN.squeeze(0).detach().cpu().numpy().transpose(1, 2, 0)
    perturbed_image_advGAN = draw(perturbed_image_advGAN, adv_output_advGAN.item(), output.item())
    perturbed_image_advGAN = imresize(perturbed_image_advGAN, (128, 128))
    # advGAN_U
    advGAN_uni_generator.load_state_dict(torch.load(
        './models/' + model_name + '_universal_netG_epoch_60.pth'))
    noise_seed = np.load(model_name + '_noise_seed.npy')
    noise_advGAN_U = advGAN_uni_generator(torch.from_numpy(
        noise_seed).type(torch.FloatTensor).to(device))
    perturbed_image_advGAN_U = image + torch.clamp(noise_advGAN_U, -0.3, 0.3)
    perturbed_image_advGAN_U = torch.clamp(perturbed_image_advGAN_U, 0, 1)
    adv_output_advGAN_U = model(perturbed_image_advGAN_U)    
    print('advGAN_uni', adv_output_advGAN_U)    
    perturbed_image_advGAN_U = perturbed_image_advGAN_U.squeeze(0).detach().cpu().numpy().transpose(1, 2, 0)
    noise_advGAN_U = noise_advGAN_U.squeeze(0).detach().cpu().numpy().transpose(1, 2, 0)
    perturbed_image_advGAN_U = draw(perturbed_image_advGAN_U, adv_output_advGAN_U.item(), output.item())
    perturbed_image_advGAN_U = imresize(perturbed_image_advGAN_U, (128, 128))

    plt.subplot(2,5,1)
    plt.imshow(perturbed_image_fgsm)
    # plt.text(0.3, 0.3, 'y: %.4f' % output.item())
    plt.xticks([])
    plt.yticks([])
    plt.subplot(2,5,2)
    plt.imshow(perturbed_image_opt)
    plt.xticks([])
    plt.yticks([])
    plt.subplot(2,5,3)
    plt.imshow(perturbed_image_optu)
    plt.xticks([])
    plt.yticks([])
    plt.subplot(2,5,4)
    plt.imshow(perturbed_image_advGAN)
    plt.xticks([])
    plt.yticks([])
    plt.subplot(2,5,5)
    plt.imshow(perturbed_image_advGAN_U)
    plt.xticks([])
    plt.yticks([])
    plt.subplot(2,5,6)
    plt.imshow(np.clip(noise_fgsm*5,0,1))
    plt.xticks([])
    plt.yticks([])
    plt.subplot(2,5,7)
    plt.imshow(np.clip(noise_opt*5,0,1))
    plt.xticks([])
    plt.yticks([])
    plt.subplot(2,5,8)
    plt.imshow(np.clip(noise_optu*5,0,1))
    plt.xticks([])
    plt.yticks([])
    plt.subplot(2,5,9)
    plt.imshow(np.clip(noise_advGAN*5,0,1))
    plt.xticks([])
    plt.yticks([])
    plt.subplot(2,5,10)
    plt.imshow(np.clip(noise_advGAN_U*5,0,1))
    plt.xticks([])
    plt.yticks([])
    
    plt.tight_layout(pad=0.5, w_pad=0, h_pad=0)
    plt.show()
def fgsm_test(model, image, target, device):
    _, perturbed_image_fgsm, _, adv_output_fgsm, noise_fgsm = fgsm_attack(
        model, image, target, device)
    # t1 = time.time()
    # print("Total time running: %s seconds for advGAN_uni prediction" %
    #       (str((t1-t0) / 5614)))

    t0 = time.time()
    for _, sample_batched in enumerate(test_generator):
        batch_x = sample_batched['image']
        # print(batch_x.size())
        # print(batch_x.size())

        # print(batch_y)

        batch_x = batch_x.type(torch.FloatTensor)

        batch_x = batch_x.to(device)
        _, adv_x, _, _, _ = fgsm_attack(model, batch_x, target, device)
        output = model(adv_x)

    t1 = time.time()
    print("Total time running: %s seconds for fgsm prediction" % (str(
        (t1 - t0) / 5614)))

    # t0 = time.time()
    # for _, sample_batched in enumerate(test_generator):
    #     batch_x = sample_batched['image']
    #     # print(batch_x.size())
    #     # print(batch_x.size())

    #     # print(batch_y)

    #     batch_x = batch_x.type(torch.FloatTensor)
def attack_detection(model_name, net, test_data_loader, attack, threshold=0.05):
    advGAN_generator = Generator(3,3, model_name).to(device)
    advGAN_generator.load_state_dict(torch.load('./models/' + model_name + '_netG_epoch_60.pth'))         
    advGAN_generator.eval() 

    advGAN_uni_generator = Generator(3,3, model_name).to(device)
    advGAN_uni_generator.load_state_dict(torch.load('./models/' + model_name + '_universal_netG_epoch_60.pth'))    
    advGAN_uni_generator.eval()
    advGAN_uni_noise_seed = np.load(model_name + '_noise_seed.npy')


    opt_uni_noise = np.load(model_name + '_universal_attack_noise.npy')

    count_ori = 0
    count_adv = 0
    total = 0

    for _, example in enumerate(test_data_loader):
        # example = test_dataset[0]
        example_image = np.transpose(example['image'].squeeze(0).numpy(), (1, 2, 0))
        # example_image = example['image'].numpy()
        # squeeze_image = reduce_bit(example_image, 4)
        squeeze_image = median_filter_np(example_image, 2)
        example_image_tensor = torch.from_numpy(np.transpose(example_image, (-1, 0, 1))).unsqueeze(0)
        example_image_tensor = example_image_tensor.type(torch.FloatTensor)
        example_image_tensor = example_image_tensor.to(device)
        squeeze_image_tensor = torch.from_numpy(np.transpose(squeeze_image, (-1, 0, 1))).unsqueeze(0)
        squeeze_image_tensor = squeeze_image_tensor.type(torch.FloatTensor)
        squeeze_image_tensor = squeeze_image_tensor.to(device)
        if (abs(net(example_image_tensor) - net(squeeze_image_tensor)) > threshold):
            count_ori += 1

        # b = net(example_image_tensor)
        if attack == 'fgsm':
            _, perturbed_image, y_pred, y_adv, _ = fgsm_attack(net, example_image_tensor, target, device)
        elif attack == 'advGAN':
        # print(steer, adv_output)
            noise = advGAN_generator(example_image_tensor)
            perturbed_image = example_image_tensor + torch.clamp(noise, -0.3, 0.3)
            perturbed_image = torch.clamp(perturbed_image, 0, 1)
            y_pred = net(example_image_tensor)
            y_adv = net(perturbed_image)
        elif attack == 'advGAN_uni':
            noise = advGAN_uni_generator(torch.from_numpy(advGAN_uni_noise_seed).type(torch.FloatTensor).to(device))
            perturbed_image = example_image_tensor + torch.clamp(noise, -0.3, 0.3)
            perturbed_image = torch.clamp(perturbed_image, 0, 1)
            y_pred = net(example_image_tensor)
            y_adv = net(perturbed_image)
        elif attack == 'opt_uni':
            noise = torch.from_numpy(opt_uni_noise).type(torch.FloatTensor).to(device)
            perturbed_image = example_image_tensor + torch.clamp(noise, -0.3, 0.3)
            perturbed_image = torch.clamp(perturbed_image, 0, 1)
            y_pred = net(example_image_tensor)
            y_adv = net(perturbed_image)
        elif attack == 'opt':
            perturbed_image, _, y_pred, y_adv = optimized_attack(net, target, example_image_tensor, device)
        # # print(net(perturbed_image))

        if abs(y_pred - y_adv) >= 0.3:
            total += 1
            perturbed_image_np = perturbed_image.squeeze().detach().cpu().numpy()
            perturbed_image_np = np.transpose(perturbed_image_np, (1, 2, 0))
            squeeze_perturbed_image = median_filter_np(perturbed_image_np, 2)
            squeeze_perturbed_image = torch.from_numpy(np.transpose(squeeze_perturbed_image, (-1, 0, 1))).unsqueeze(0)
            squeeze_perturbed_image = squeeze_perturbed_image.type(torch.FloatTensor)
            squeeze_perturbed_image = squeeze_perturbed_image.to(device)
            if (abs(net(perturbed_image) - net(squeeze_perturbed_image)) > threshold):
                count_adv += 1      
    print(attack, total, count_ori, count_adv)