Пример #1
0
def main(image_path, ckpt_path, predict_status=False):
    images = tf.placeholder(tf.float32, (None, 224, 224, 3))
    preprocessed_images = vgg_preprocessing(images)
    logits, _ = vgg.vgg_19(preprocessed_images, is_training=False)
    restorer = tf.train.Saver(tf.trainable_variables())

    image = open_image(image_path)
    p, ext = os.path.splitext(image_path)
    adv_path = p + '-adv' + ext
    pert_path = p + '-pert' + ext

    with tf.Session() as session:
        restorer.restore(session, ckpt_path)
        model = TensorFlowModel(images, logits, (0, 255))
        label = np.argmax(model.predictions(image))
        print('label:', label)
        if predict_status:
            return

        # target_class = 22
        # criterion = TargetClassProbability(target_class, p=0.99)

        # attack = LBFGSAttack(model, criterion)

        # attack = FGSM(model, criterion)
        attack = FGSM(model)

        # attack = MomentumIterativeAttack(model, criterion)
        # attack = MomentumIterativeAttack(model)

        # attack = SinglePixelAttack(model)
        # attack = LocalSearchAttack(model)

        adversarial = attack(image, label=label)
        new_label = np.argmax(model.predictions(adversarial))
        print('new label:', new_label)

        image = image.astype(np.uint8)
        adversarial = adversarial.astype(np.uint8)
        pert = adversarial - image

        save_image(adversarial, adv_path)
        save_image(pert, pert_path)

        # show images
        plt.subplot(1, 3, 1)
        plt.imshow(image)
        plt.subplot(1, 3, 2)
        plt.imshow(adversarial)
        plt.subplot(1, 3, 3)
        plt.imshow(pert)
        plt.show()
def run():
    run_button.config(state='disabled')
    test_loader = load_data()
    torch_model = select_model()
    if use_cuda.get():
        log_text = 'Move model to cuda.'
        torch_model = torch_model.cuda()

    torch_model.eval()
    fmodel = PyTorchModel(torch_model, bounds=[0, 1], num_classes=10)

    method = attack_comboxlist.get()
    log_text = 'Perform {} attack... \n'.format(method)
    send_information(Receive_window, log_text)
    if method == 'FGSM':
        from foolbox.attacks import FGSM
        attack = FGSM(model=fmodel, criterion=Misclassification())
    if method == 'iFGSM':
        from foolbox.attacks import IterativeGradientSignAttack
        attack = IterativeGradientSignAttack(model=fmodel,
                                             criterion=Misclassification())
    if method == 'DeepFool':
        from foolbox.attacks import DeepFoolAttack
        attack = DeepFoolAttack(model=fmodel, criterion=Misclassification())

    attack_root = 'attacks/' + comboxlist.get() + '/' + method + '_ms'
    hacked_path = attack_root + '/hacked'
    hacked_data_path = attack_root + '/hacked_data'
    original_path = attack_root + '/original'
    original_data_path = attack_root + '/original_data'
    if os.path.exists(attack_root) is False:
        os.makedirs(attack_root)
        os.mkdir(hacked_path)
        os.mkdir(hacked_data_path)
        os.mkdir(original_path)
        os.mkdir(original_data_path)

    count = 1
    for data, label in test_loader:
        data = data[0].numpy()
        label = label.item()
        adversarial = attack(data, label)

        if adversarial is not None:
            if np.linalg.norm(adversarial - data) == 0:
                continue
            adv_label = np.argmax(fmodel.predictions(adversarial))

            if np.linalg.norm(adversarial - data) > 0:
                dataset = comboxlist.get()
                if dataset == 'MNIST':
                    image_data = adversarial[0] * 255
                    ori_data = data[0] * 255

                if dataset == 'CIFAR10':
                    image_data = adversarial.transpose(1, 2, 0) * 255
                    ori_data = data.transpose(1, 2, 0) * 255

                if save_adv.get():
                    hackedname = hacked_data_path + '/' + str(
                        count) + '-' + str(label) + '-' + str(
                            adv_label) + ".npy"
                    np.save(hackedname, image_data)
                    image = Image.fromarray(image_data.astype(np.uint8))
                    image.save(
                        "{hackedpath}/{name}-{label}-{adv_label}.png".format(
                            hackedpath=hacked_path,
                            name=count,
                            label=label,
                            adv_label=adv_label))

                if save_ori.get():
                    oriname = original_data_path + '/' + str(
                        count) + '-' + str(label) + ".npy"
                    np.save(oriname, ori_data)
                    oriimage = Image.fromarray(ori_data.astype(np.uint8))
                    oriimage.save("{originalpath}/{name}-{label}.png".format(
                        originalpath=original_path, name=count, label=label))

                count = count + 1

            if count % (int(att_num.get()) / 10) == 0:
                log_text = "Attack: {}/{}".format(count, att_num.get())
                send_information(Receive_window, log_text)
            if count > int(att_num.get()):
                break

    log_text = "Done! The adversarial images and correspoinding data are stored in attacks for next use in step4!"
    send_information(Receive_window, log_text)
    run_button.config(state='normal')
Пример #3
0
# preds_adv = model.get_probs(adv_x)



model = TensorFlowModel(cnn.inputs,cnn.network,bounds=(0, 255))

from foolbox.criteria import TargetClassProbability

target_class = 9
criterion = TargetClassProbability(target_class, p=0.99)


from foolbox.attacks import FGSM


attack=FGSM(model)
image = train_images[0].reshape((28, 28, 1))
label = np.argmax(model.predictions(image))

adversarial = attack(image,label=label,epsilons=1,max_epsilon=0.03*255)


import matplotlib.pyplot as plt

plt.subplot(1, 3, 1)
plt.imshow(image.reshape((28, 28)), cmap='gray',vmin=0, vmax=255)
plt.gca().set_title(label)

plt.subplot(1, 3, 2)
plt.imshow(adversarial.reshape((28, 28)), cmap='gray',vmin=0, vmax=255)
plt.gca().set_title(np.argmax(model.predictions(adversarial)))
plot_samples = 100

# Define plot path.
if plot_sample:
    path = './adv_samples/'
    path += dataset
    if not os.path.exists(path):
        os.makedirs(path)

# -------------------------------------------------
# ADVERSARIAL SAMPLE GENERATION
# -------------------------------------------------

# Create Foolbox model from Keras ResNet classifier and FGSM attack type.
foolbox_model = KerasModel(model, (0, 1))
attack = FGSM(foolbox_model)

# Turn all test set samples into adversarial samples.
for i in tqdm(range(len(X_te))):

    # Try to create an adversarial sample.
    adv_sample = attack(np.reshape(X_te[i], orig_dims),
                        label=y_te[i],
                        max_epsilon=max_epsilon)

    # In rare cases, sample generation might fail, which leaves adv_sample empty.
    if adv_sample is not None:

        # Successful adversarial samples are written back into the original matrix.
        X_te[i] = np.reshape(adv_sample, np.prod(orig_dims))
Пример #5
0
		#print(anepval)
		totalsuccess=0
		for abool in anepval:
			abool1=np.array(abool)
			#print(abool1)
			if abool1==True:
				totalsuccess+=1
		listsuccess.append(totalsuccess)

	listsuccessPGD=listsuccess
	



	#apply FGSM attack
	attack=FGSM()
	epsilons=[0.0, 0.001, 0.01, 0.03, 0.1, 0.3, 0.5, 1.0]
	t0=time.process_time()
	_, advsFGSM, success = attack(fmodel, images, labels, epsilons=epsilons)
	t1=time.process_time()
	attacktimeFGSM=t1-t0
	# print("done with attack")

	#print(success)
	listsuccessFGSM=[]
	for anepval in success:
		#print(anepval)
		totalsuccess=0
		for abool in anepval:
			abool1=np.array(abool)
			#print(abool1)
Пример #6
0
def foolbox_attack(filter=None,
                   filter_preserve='low',
                   free_parm='eps',
                   plot_num=None):
    # get model.
    model = get_model()
    model = nn.DataParallel(model).to(device)
    model = model.eval()

    preprocessing = dict(mean=[0.485, 0.456, 0.406],
                         std=[0.229, 0.224, 0.225],
                         axis=-3)
    fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)

    if plot_num:
        free_parm = ''
        val_loader = get_val_loader(plot_num)
    else:
        # Load images.
        val_loader = get_val_loader(args.attack_batch_size)

    if 'eps' in free_parm:
        epsilons = [0.001, 0.003, 0.005, 0.008, 0.01, 0.1]
    else:
        epsilons = [0.01]
    if 'step' in free_parm:
        steps = [1, 5, 10, 30, 40, 50]
    else:
        steps = [args.iteration]

    for step in steps:
        # Adversarial attack.
        if args.attack_type == 'LinfPGD':
            attack = LinfPGD(steps=step)
        elif args.attack_type == 'FGSM':
            attack = FGSM()

        clean_acc = 0.0

        for i, data in enumerate(val_loader, 0):

            # Samples (attack_batch_size * attack_epochs) images for adversarial attack.
            if i >= args.attack_epochs:
                break

            images, labels = data[0].to(device), data[1].to(device)
            if step == steps[0]:
                clean_acc += (get_acc(
                    fmodel, images, labels
                )) / args.attack_epochs  # accumulate for attack epochs.

            _images, _labels = ep.astensors(images, labels)
            raw_advs, clipped_advs, success = attack(fmodel,
                                                     _images,
                                                     _labels,
                                                     epsilons=epsilons)

            if plot_num:
                grad = torch.from_numpy(
                    raw_advs[0].numpy()).to(device) - images
                grad = grad.clone().detach_()
                return grad

            if filter:
                robust_accuracy = torch.empty(len(epsilons))
                for eps_id in range(len(epsilons)):
                    grad = torch.from_numpy(
                        raw_advs[eps_id].numpy()).to(device) - images
                    grad = grad.clone().detach_()
                    freq = dct.dct_2d(grad)
                    if filter_preserve == 'low':
                        mask = torch.zeros(freq.size()).to(device)
                        mask[:, :, :filter, :filter] = 1
                    elif filter_preserve == 'high':
                        mask = torch.zeros(freq.size()).to(device)
                        mask[:, :, filter:, filter:] = 1
                    masked_freq = torch.mul(freq, mask)
                    new_grad = dct.idct_2d(masked_freq)
                    x_adv = torch.clamp(images + new_grad, 0, 1).detach_()

                    robust_accuracy[eps_id] = (get_acc(fmodel, x_adv, labels))
            else:
                robust_accuracy = 1 - success.float32().mean(axis=-1)
            if i == 0:
                robust_acc = robust_accuracy / args.attack_epochs
            else:
                robust_acc += robust_accuracy / args.attack_epochs

        if step == steps[0]:
            print("sample size is : ",
                  args.attack_batch_size * args.attack_epochs)
            print(f"clean accuracy:  {clean_acc * 100:.1f} %")
            print(
                f"Model {args.model} robust accuracy for {args.attack_type} perturbations with"
            )
        for eps, acc in zip(epsilons, robust_acc):
            print(
                f"  Step {step}, Linf norm ≤ {eps:<6}: {acc.item() * 100:4.1f} %"
            )
        print('  -------------------')
#!/usr/bin/env python3

from foolbox.attacks import FGSM
from robust_vision_benchmark import attack_server

fgsm = FGSM()

attack_server(fgsm)
Пример #8
0
    #     out_path = './tianchi2021_adv/pgd_resnet50_at/'
    #     if not os.path.exists(out_path):
    #         os.makedirs(out_path)
    #     pgd = attacker.attack(model, inputs=images, labels=labels)
    #     for t in range(args.batch_size):
    #         pgd = np.transpose(pgd[t].detach().cpu().numpy(), (1, 2, 0))
    #         name = filenames[t]
    #         if not os.path.exists(out_path):
    #             os.makedirs(out_path)
    #         out = out_path + name
    #         scipy.misc.imsave(out, pgd)

    if args.fgsm is True:
        FGSM = attack_integrated(images, labels, 'FGSM')
        predictions = model(FGSM).argmax(1)
        print(FGSM.shape, FGSM.max(), FGSM.min(), predictions)
        adv_accuracy = (predictions == labels).float().mean()
        print("adv_accuracy ", adv_accuracy)
        accs_advs += adv_accuracy.item()
        if epoch < 5 or (epoch + 1) % 50 == 0:
            print(epoch, "fgsm adv: ", accs_advs / (epoch + 1))
        for t in range(args.batch_size):
            ddn2 = np.transpose(FGSM[t].detach().cpu().numpy(), (1, 2, 0))
            name = filenames[t]
            out_path = './tianchi2021_adv/fgsm_resnet152/'
            if not os.path.exists(out_path):
                os.makedirs(out_path)
            # print(out_path)
            out = out_path + name
            # ddn2 = scipy.misc.imresize(ddn2)
            scipy.misc.imsave(out, ddn2)