Example #1
0
def attack_loader(args, net):

    # Gradient Clamping based Attack
    if args.attack == "pgd":
        return torchattacks.PGD(model=net,
                                eps=args.eps,
                                alpha=args.eps / args.steps * 2.3,
                                steps=args.steps,
                                random_start=True)

    elif args.attack == "auto":
        return torchattacks.APGD(model=net, eps=args.eps)

    elif args.attack == "fab":
        return torchattacks.FAB(model=net,
                                eps=args.eps,
                                n_classes=args.n_classes)

    elif args.attack == "cw":
        return torchattacks.CW(model=net, c=0.1, lr=0.1, steps=200)

    elif args.attack == "fgsm":
        return torchattacks.FGSM(model=net, eps=args.eps)

    elif args.attack == "bim":
        return torchattacks.BIM(model=net, eps=args.eps, alpha=1 / 255)

    elif args.attack == "deepfool":
        return torchattacks.DeepFool(model=net, steps=10)

    elif args.attack == "sparse":
        return torchattacks.SparseFool(model=net)

    elif args.attack == "gn":
        return torchattacks.GN(model=net, sigma=args.eps)
def get_atk(model, atk_name, eps, steps):

    if atk_name == 'fgsm':
        return torchattacks.FGSM(model, eps=eps)
    elif atk_name == 'bim':
        return torchattacks.BIM(model,
                                eps=eps,
                                steps=steps,
                                alpha=eps / (steps * .5))
    elif atk_name == 'deepfool':
        return torchattacks.DeepFool(model, steps=steps)
    elif atk_name == 'cw':
        return torchattacks.CW(model)
    elif atk_name == 'pgd':
        return torchattacks.PGD(model,
                                eps=eps,
                                steps=steps,
                                alpha=eps / (steps * .5))
    elif atk_name == 'rfgsm':
        return torchattacks.RFGSM(model, eps=eps, alpha=eps)
    elif atk_name == 'auto-attack':
        return torchattacks.AutoAttack(model, eps=eps)
    elif atk_name == 'mifgsm':
        return torchattacks.MIFGSM(model, eps=eps, steps=steps)
    elif atk_name == 'square':
        return torchattacks.Square(model, eps=eps)
    elif atk_name == 'fab':
        return torchattacks.FAB(model, eps=eps)
    elif atk_name == 'one-pixel':
        return torchattacks.OnePixel(model)
    elif atk_name == 'gn':
        return torchattacks.GN(model, sigma=eps)
    elif atk_name == 'apgd':
        return torchattacks.APGD(model, eps=eps)
    elif atk_name == 'eotpgd':
        return torchattacks.EOTPGD(model,
                                   eps=eps,
                                   steps=steps,
                                   alpha=eps / (steps * .5))
    elif atk_name == 'pgddlr':
        return torchattacks.PGDDLR(model,
                                   eps=eps,
                                   steps=steps,
                                   alpha=eps / (steps * .5))
    elif atk_name == 'ffgsm':
        return torchattacks.FFGSM(model, eps=eps, alpha=eps)
    elif atk_name == 'sparsefool':
        return torchattacks.SparseFool(model)

    else:
        print("Attack not valid")
        sys.exit(-1)
def basic_iterative_method(model, X, Y, eps, eps_iter, test_loader=None):
    print(X.shape, Y.shape)
    atk = torchattacks.BIM(model, eps=eps, alpha=eps_iter, steps=7)
    if test_loader is not None:
        x_adv_list = []
        for batch in test_loader:
            x = batch[0]
            y = batch[1]
            x_adv_list.append(atk(x, y))
        X_adv = torch.cat(x_adv_list)
    print('adv', X_adv.shape)
    X_adv = X_adv.cpu()
    return X_adv
Example #4
0
def load_attack(model, attack: str):
    import torchattacks
    if attack == 'PGD':
        return torchattacks.PGD(model, eps=2 / 255, alpha=2 / 255, steps=7)
    elif attack == 'CW':
        return torchattacks.CW(model,
                               targeted=False,
                               c=1,
                               kappa=0,
                               steps=1000,
                               lr=0.01)
    elif attack == 'BIM':
        return torchattacks.BIM(model, eps=4 / 255, alpha=1 / 255, steps=0)
    elif attack == 'FGSM':
        return torchattacks.FGSM(model, eps=1 / 255)
    else:
        raise NotImplementedError()
Example #5
0
def Greedy_Decode_Eval(Net, datasets, args):
    # TestNet = Net.eval()
    epoch_size = len(datasets) // args.test_batch_size # 整除,多余的末尾就不会包括进来了
    # collate_fn:如何取样本的,我们可以定义自己的函数来准确地实现想要的功能 
    # shuffle:设置为True的时候,每个世代都会打乱数据集 
    batch_iterator = iter(DataLoader(datasets, args.test_batch_size, shuffle=True, num_workers=args.num_workers, collate_fn=collate_fn))
    attack = torchattacks.BIM(Net, eps=args.epsilon, alpha=1/255, iters=0)

    total = epoch_size * args.test_batch_size   # 总识别样本数
    correct = 0
    for i in range(epoch_size):
        # load train data
        images, labels, lengths = next(batch_iterator)  # 提取iter的元素,注意images里面是整个batch的图像,但这时候类型是tensor了
        # print(lengths) # 如果batch_size=100,那么lengths就是[7,7,...,7],100个7(list类型)
        # print(images.shape)
        start = 0
        targets = []
        for length in lengths:  # 事到如今又要将tabel一个个提取出来,那为什么之前要用extend方法而不用append?
            label = labels[start:start+length]
            targets.append(label)
            start += length
        targets = np.array([el.numpy() for el in targets], dtype=np.int32)
        
        perturbed_images = attack(images, labels, lengths)

        # 重新进行识别
        preb_atk = Net(perturbed_images)

        # 获得标签
        preb_labels_atk = np.array(get_preb_labels(preb_atk))
        for i in range(preb_labels_atk.shape[0]):
            # print(preb_labels_atk[i])
            # print(targets[i])
            if len(preb_labels_atk[i]) == len(targets[i]) and (preb_labels_atk[i] == targets[i]).all():
                correct += 1
    
    return correct * 1.0 / total
Example #6
0
CUDA_MODE = torch.cuda.is_available()

if not CUDA_MODE:
    logger.warn(
        "CUDA not available. Run on a CUDA enabled platform (NVIDIA GPU with compute capability >= 3) to get memory usage and timing stats (this code makes use of CUDA events to accurately measure memory and timing). Press [ENTER] to continue anyways."
    )
    input()

# How many images we want to test
IMG_NUM = 500
model.eval()
attacks = [
    torchattacks.PGD(model),
    torchattacks.DeepFool(model),
    torchattacks.StepLL(model),
    torchattacks.BIM(model)
]

for attack in attacks:
    time = []
    attack_l2 = []
    peak_cuda = []
    avg_cuda = []
    total_success = 0

    logger.info(f"Benchmarking {str(attack)} on {IMG_NUM} images")
    for img_id in range(IMG_NUM):

        # Load the image and reshape it to [NxCxWxH] (which is what the models expect)
        target_im = data[str(model)][img_id][None, :, :, :].to(device)
        _, TRUECLASS = torch.max(model(target_im), 1)
def train_model(device, dataloaders, batch_size, len_dataset, model, criterion, optimizer, scheduler, num_epochs=25):
    since = time.time()

    '''
    * state_dict: 각 layer 를 매개변수 텐서로 매핑하는 Python 사전(dict) 객체
    - layer; learnable parameters (convolutional layers, linear layers, etc.), registered buffers (batchnorm’s running_mean)
    - Optimizer objects (torch.optim)
    '''
    best_model_wts = copy.deepcopy(model.state_dict())
    best_acc = 0.0
    train_loss, train_acc, valid_loss, valid_acc = [], [], [], []


    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)

        # 각 epoch마다 training, validation phase 나눠줌.
        for phase in ['train', 'valid']:
            if phase == 'train':
                model.train()  # training mode
            else:
                model.eval()   # evaluate mode

            running_loss, running_corrects, num_cnt = 0.0, 0, 0

            ratio_adv_ori = int((len_dataset // batch_size + 1) * 0.4)   # adversarial, original data 비율 정하기

            # batch 별로 나눠진 데이터 불러오기
            for i, (inputs, labels) in enumerate(dataloaders[phase]):

                # 설정한 비율에 따라 adversarial, original input으로 나누기
                if (phase == 'train' and (i < ratio_adv_ori)) or (phase == 'valid' and i % 2 == 0):
                    inputs = inputs.to(device)

                else:
                    # adversarial attack 정의
                    atks = [torchattacks.FGSM(model, eps=8 / 255),
                            torchattacks.BIM(model, eps=8 / 255, alpha=2 / 255, steps=7),
                            torchattacks.PGD(model, eps=8 / 255, alpha=2 / 255, steps=7),
                            ]

                    inputs = atks[i % 3](inputs, labels).to(device)

                    # Image Processing Based Defense Methods --> tensor를 image로 변환하여 적용
                    for batch in range(inputs.shape[0]):
                        tensor2pil = transforms.ToPILImage()(inputs[batch]).convert('RGB')

                        # 1. Resizing
                        # Image.resize(size, resample=3, box=None, reducing_gap=None)
                        # resample(filter): PIL.Image.NEAREST, PIL.Image.BOX, PIL.Image.BILINEAR, PIL.Image.HAMMING, PIL.Image.BICUBIC
                        tensor2pil.resize((74, 74))
                        tensor2pil.resize((224, 224))

                        # 다시 이미지를 tensor로 바꾸기
                        tensor_img = transforms.ToTensor()(tensor2pil)
                        inputs[batch] = tensor_img


                        # 2. jpeg compression
                        tensor2numpy = inputs[batch].cpu().numpy()
                        cv_img = np.transpose(tensor2numpy, (1, 2, 0))      # [w, h, c]
                        cv_img = cv_img * 255
                        encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 15]
                        result, encimg = cv2.imencode('.jpg', cv_img, encode_param)
                        if False == result:
                            print('could not encode image!')
                            quit()

                        # decode from jpeg format
                        jpeg_img = cv2.imdecode(encimg, 1)
                        jpeg2input = np.transpose(jpeg_img, (2, 0, 1)) / 255
                        inputs[batch] = torch.Tensor(jpeg2input).to(device)


                    # # save adversarial examples
                    # save_inputs = inputs.cpu().numpy()
                    # labels = labels.cpu().numpy()
                    # from matplotlib.pyplot import imsave
                    #
                    # for j in range(batch_size):
                    #     image = save_inputs[j, :, :, :]
                    #     label = labels[j]
                    #     if label == 0:
                    #         imsave(
                    #             f"C:/Users/mmclab1/Desktop/fakecheck/dataset/adv_img_examples/"
                    #             f"fake_adversarial_image_{j}.png",
                    #             np.transpose(image, (1, 2, 0)))
                    #     else:
                    #         imsave(
                    #             f"C:/Users/mmclab1/Desktop/fakecheck/dataset/adv_img_examples/"
                    #             f"real_adversarial_image_{j}.png",
                    #             np.transpose(image, (1, 2, 0)))



                labels = labels.to(device)

                # 학습 가능한 가중치인 "optimizer 객체" 사용하여, 갱신할 변수들에 대한 모든 변화도 0으로 설정
                # backward() 호출시, 변화도가 buffer 에 덮어쓰지 않고 누적되기 때문.
                optimizer.zero_grad()

                # forward pass
                # gradient 계산하는 모드로, 학습 시에만 연산 기록을 추적
                with torch.set_grad_enabled(phase == 'train'):

                    outputs = model(inputs)             # h(x) 값, 모델의 예측 값
                    _, preds = torch.max(outputs, 1)    # dim = 1, output의 각 sample 결과값(row)에서 max값 1개만 뽑음.
                    loss = criterion(outputs, labels)   # h(x) 모델이 잘 예측했는지 판별하는 loss function

                    # training phase에서만 backward + optimize 수행
                    if phase == 'train':
                        loss.backward()     # gradient 계산
                        optimizer.step()    # parameter update

                # statistics
                running_loss += loss.item() * inputs.size(0)            # inputs.size(0) == batch size
                running_corrects += torch.sum(preds == labels.data)     # True == 1, False == 0, 총 정답 수
                num_cnt += len(labels)                                  # len(labels) == batch size

            if phase == 'train':
                scheduler.step()    # Learning Rate Scheduler

            epoch_loss = float(running_loss / num_cnt)
            epoch_acc = float((running_corrects.double() / num_cnt).cpu() * 100)

            if phase == 'train':
                train_loss.append(epoch_loss)
                train_acc.append(epoch_acc)
            else:
                valid_loss.append(epoch_loss)
                valid_acc.append(epoch_acc)
            print('{} Loss: {:.2f} Acc: {:.1f}'.format(phase, epoch_loss, epoch_acc))

            # deep copy the model
            if phase == 'valid' and epoch_acc > best_acc:
                best_idx = epoch
                best_acc = epoch_acc
                best_model_wts = copy.deepcopy(model.state_dict())
                #                 best_model_wts = copy.deepcopy(model.module.state_dict())
                print('==> best model saved - %d / %.1f' % (best_idx, best_acc))

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
    print('Best valid Acc: %d - %.1f' % (best_idx, best_acc))

    # load best model weights
    PATH = 'pytorch_model_adv_epoch30_4_sgd_resize3_comp15.pt'
    model.load_state_dict(best_model_wts)
    # torch.save(model.state_dict(), PATH)  # 모델 객체의 state_dict 저장
    torch.save(model, PATH)                 # 전체모델 저장
    torch.save(model.state_dict(), f'C:/Users/mmclab1/.cache/torch/hub/checkpoints/{PATH}')
    print('model saved')

    # train, validation의 loss, acc 그래프로 나타내기
    plt.subplot(311)
    plt.plot(train_loss)
    plt.plot(valid_loss)
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'validation'], loc='upper left')

    plt.subplot(313)
    plt.plot(train_acc)
    plt.plot(valid_acc)
    plt.ylabel('acc')
    plt.xlabel('epoch')
    plt.legend(['train', 'validation'], loc='upper left')

    plt.savefig('graph_adv_epoch30_4_sgd_resize3_comp15.png')
    plt.show()

    return model, best_idx, best_acc, train_loss, train_acc, valid_loss, valid_acc, inputs
Example #8
0
def train_epoch(model, loader, optimizer):
    model.train()
    train_loss = []
    bar = tqdm(loader)
    for i, (data, target, face_name, df_method) in enumerate(bar):

        optimizer.zero_grad()

        if args.use_meta:
            data, meta = data
            data, meta, target = data.to(device), meta.to(device), target.to(
                device)
            logits = model(data, meta)
        else:

            # attack 추가
            method = {
                '0_PGD': [20, 70, 2],
                '1_APGD': [20, 70, 2],
                '2_FGSM': [2, 8],
                '3_FFGSM': [4, 7, 10],
                '4_MIFGSM': [3, 6],
                '5_RFGSM': [4, 7, 8],
                '6_BIM': [4, 10, 1],
                '7_CW': [1e-4, 2e-4]
            }

            # 1. original data save
            # img_o
            # 2. small sized data
            # img_s = scaling(image_o, scaling_factor=0.5)

            # out_attack = attack(small_data, target~~~)
            # img_gen = normalize ( scaling ((out_attack - small_data), 1/scaling_factor) + img_o)

            for eps in range(2):
                globals()['atk{}'.format(0)] = torchattacks.PGD(
                    model,
                    eps=method['0_PGD'][eps] / 255,
                    alpha=method['0_PGD'][-1] / 255,
                    steps=4)
                globals()['atk{}'.format(1)] = torchattacks.APGD(
                    model,
                    eps=method['1_APGD'][eps] / 255,
                    alpha=method['1_APGD'][-1] / 255,
                    steps=4)
                globals()['atk{}'.format(2)] = torchattacks.FGSM(
                    model, eps=method['2_FGSM'][eps] / 255)
                globals()['atk{}'.format(3)] = torchattacks.FFGSM(
                    model,
                    eps=method['3_FFGSM'][eps] / 255,
                    alpha=method['3_FFGSM'][-1] / 255)
                globals()['atk{}'.format(4)] = torchattacks.MIFGSM(
                    model, eps=method['4_MIFGSM'][eps] / 255, steps=4)
                globals()['atk{}'.format(5)] = torchattacks.RFGSM(
                    model,
                    eps=method['5_RFGSM'][eps] / 255,
                    alpha=method['5_RFGSM'][-1] / 255,
                    steps=4)
                globals()['atk{}'.format(6)] = torchattacks.BIM(
                    model,
                    eps=method['6_BIM'][eps] / 255,
                    alpha=method['6_BIM'][-1] / 255)
                globals()['atk{}'.format(7)] = torchattacks.CW(
                    model, c=method['7_CW'][eps], steps=10)

                for count in range(8):
                    # globals()['data_atk{}'.format(i)]
                    globals()['data_atk{}'.format(count)] = globals()[
                        'atk{}'.format(count)](data, (target + 1) % 2)
                    globals()['data_atk{}'.format(count)], target = (
                        globals()['data_atk{}'.format(count)]
                    ).to(device), target.to(device)
                    logits = model(globals()['data_atk{}'.format(count)])
                    globals()['data_atk{}'.format(count)] = (
                        globals()['data_atk{}'.format(count)]).cpu().numpy()

                method_keys = list(method.keys())

                bat_size = args.batch_size
                for j in range(bat_size):
                    for save_cnt in range(8):
                        globals()['im{}'.format(save_cnt)] = (globals()[
                            'data_atk{}'.format(save_cnt)])[j, :, :, :]
                        # imsave(
                        #     f"./confirm_attack2img/AE-classification/{method_keys[save_cnt]}/"
                        #     f"{target[j]}_{face_name[j]}_{i * bat_size + j}_{method_keys[save_cnt]}_eps{method[method_keys[save_cnt]][eps]}_wsbs.png",
                        #     np.transpose(globals()['im{}'.format(save_cnt)], (1, 2, 0)))
                        imsave(
                            f"./confirm_attack2img/AE-classification/train/"
                            f"{target[j]}_{face_name[j]}_{df_method[j]}_{i * bat_size + j}_{method_keys[save_cnt]}_eps{method[method_keys[save_cnt]][eps]}_wsbs.png",
                            np.transpose(globals()['im{}'.format(save_cnt)],
                                         (1, 2, 0)))

        loss = criterion(logits, target)

        if not args.use_amp:
            loss.backward()
        # else:
        #     with amp.scale_loss(loss, optimizer) as scaled_loss:
        #         scaled_loss.backward()

        if args.image_size in [896, 576]:
            # 그라디언트가 너무 크면 값을 0.5로 잘라준다 (max_grad_norm=0.5)
            torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)

        # gradient accumulation (메모리 부족할때)
        if args.accumulation_step:
            if (i + 1) % args.accumulation_step == 0:
                optimizer.step()
                # optimizer.zero_grad()
        else:
            optimizer.step()
            # optimizer.zero_grad()

        loss_np = loss.detach().cpu().numpy()
        train_loss.append(loss_np)
        smooth_loss = sum(train_loss[-100:]) / min(len(train_loss), 100)
        bar.set_description('loss: %.5f, smooth_loss: %.5f' %
                            (loss_np, smooth_loss))

    train_loss = np.mean(train_loss)
    return train_loss
Example #9
0
def train_epoch(model, loader, optimizer):
    model.train()
    train_loss = []
    bar = tqdm(loader)
    for i, (data, target, face_name) in enumerate(bar):

        optimizer.zero_grad()

        if args.use_meta:
            data, meta = data
            data, meta, target = data.to(device), meta.to(device), target.to(device)
            logits = model(data, meta)
        else:

            # attack 추가
            method = {
                '1_PGD': [20,70,2],
                '2_APGD':[20,70,2],
                '3_FGSM': [2,8],
                '4_FFGSM': [4,7,10],
                '5_MIFGSM': [3,6],
                '6_RFGSM': [4,7,8],
                '7_BIM':[4,10,1],
                '8_CW':[1e-4, 2e-4]}

            #TODO: dataset에 original image와 attacked image모두 만들기
            # 1. original data save         img_o
            # 2. small sized data           img_s = scaling(image_o, scaling_factor=0.5)
            # out_attack = attack(small_data, target~~~)
            # img_gen = normalize ( scaling ((out_attack - small_data), 1/scaling_factor) + img_o)


            scaling_factor = 0.5
            img_origin = np.transpose(data.cpu().numpy()[i, :, :, :], (1, 2, 0))
            img_small = cv2.resize(img_origin, dsize=(0, 0),
                                    fx=scaling_factor,
                                    fy=scaling_factor)   #,interpolation=cv2.INTER_AREA


            for eps in range(2):
                globals()['atk{}'.format(1)] = torchattacks.PGD(model, eps=method['1_PGD'][eps] / 255, alpha=method['1_PGD'][-1] / 255, steps=4)
                globals()['atk{}'.format(2)] = torchattacks.APGD(model, eps=method['2_APGD'][eps] / 255, alpha=method['2_APGD'][-1] / 255, steps=4)
                globals()['atk{}'.format(3)] = torchattacks.FGSM(model, eps=method['3_FGSM'][eps] / 255)
                globals()['atk{}'.format(4)] = torchattacks.FFGSM(model, eps=method['4_FFGSM'][eps] / 255, alpha=method['4_FFGSM'][-1] / 255)
                globals()['atk{}'.format(5)] = torchattacks.MIFGSM(model, eps=method['5_MIFGSM'][eps] / 255, steps=4)
                globals()['atk{}'.format(6)] = torchattacks.RFGSM(model, eps=method['6_RFGSM'][eps] / 255, alpha=method['6_RFGSM'][-1] / 255, steps=4)
                globals()['atk{}'.format(7)] = torchattacks.BIM(model, eps=method['7_BIM'][eps] / 255, alpha=method['7_BIM'][-1] / 255)
                globals()['atk{}'.format(8)] = torchattacks.CW(model, c= method['8_CW'][eps], steps=10)

                for count in range(1,9):
                    # regularization
                    # torch.clamp(images + delta, min=0, max=1).detach()
                    # torch.from_numpy(img_small)
                    out_attack = globals()['atk{}'.format(count)](torch.from_numpy(img_small), (target + 1) % 2)
                    img_gen = torch.clamp(cv2.resize(out_attack-img_small,dsize=(0,0),fx=1/scaling_factor, fy=1/scaling_factor), min=0, max=1).detach() + img_origin

                    globals()['data_atk{}'.format(count)] = torch.from_numpy(img_gen)
                    globals()['data_atk{}'.format(count)], target = (globals()['data_atk{}'.format(count)]).to(device), target.to(device)
                    logits = model(globals()['data_atk{}'.format(count)])
                    globals()['data_atk{}'.format(count)] = (globals()['data_atk{}'.format(count)]).cpu().numpy()


                method_keys = list(method.keys())

                bat_size = args.batch_size
                for j in range(bat_size):
                    # save original image
                    # im0 = data.cpu().numpy()[j, :, :, :]
                    # imsave(
                    #     f"./confirm_attack2img/AE-real_fake/0_original/"
                    #     f"{target[j]}_{face_name[j]}_{i * bat_size + j}_0_wsbs.png",
                    #     np.transpose(im0, (1, 2, 0)))

                    # save attacked image
                    for save_cnt in range(1,9):
                        globals()['im{}'.format(save_cnt)] = (globals()['data_atk{}'.format(save_cnt)])[j, :, :, :]
                        # imsave(
                        #     f"./confirm_attack2img/AE-real_fake/{method_keys[save_cnt]}/"
                        #     f"{target[j]}_{face_name[j]}_{i * bat_size + j}_{method_keys[save_cnt]}_eps{method[method_keys[save_cnt]][eps]}_wsbs.png",
                        #     np.transpose(globals()['im{}'.format(save_cnt)], (1, 2, 0)))
                        imsave(
                            f"./confirm_attack2img/AE-real_fake/train/"
                            f"{target[j]}_{face_name[j]}_{i * bat_size + j}_{method_keys[save_cnt]}_eps{method[method_keys[save_cnt]][eps]}_wsbs.png",
                            np.transpose(globals()['im{}'.format(save_cnt)], (1, 2, 0)))


        loss = criterion(logits, target)

        if not args.use_amp:
            loss.backward()
        # else:
        #     with amp.scale_loss(loss, optimizer) as scaled_loss:
        #         scaled_loss.backward()

        if args.image_size in [896, 576]:
            # 그라디언트가 너무 크면 값을 0.5로 잘라준다 (max_grad_norm=0.5)
            torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)

        # gradient accumulation (메모리 부족할때)
        if args.accumulation_step:
            if (i + 1) % args.accumulation_step == 0:
                optimizer.step()
                # optimizer.zero_grad()
        else:
            optimizer.step()
            # optimizer.zero_grad()

        loss_np = loss.detach().cpu().numpy()
        train_loss.append(loss_np)
        smooth_loss = sum(train_loss[-100:]) / min(len(train_loss), 100)
        bar.set_description('loss: %.5f, smooth_loss: %.5f' % (loss_np, smooth_loss))

    train_loss = np.mean(train_loss)
    return train_loss
Example #10
0
def train_epoch(model, loader, optimizer):
    model.train()
    train_loss = []
    bar = tqdm(loader)
    for i, (data, target, image_name) in enumerate(bar):

        optimizer.zero_grad()

        if args.use_meta:
            data, meta = data
            data, meta, target = data.to(device), meta.to(device), target.to(
                device)
            logits = model(data, meta)
        else:

            # attack 추가: 5,499장 --> 총 7개 attack x epsilon 2개 == 76,986
            method = {
                '0_FGSM': [2, 5, 8],
                '1_PGD': [20, 50, 80, 2],
                '2_BIM': [4, 7, 10, 1]
            }

            for eps in range(3):
                globals()['atk{}'.format(0)] = torchattacks.FGSM(
                    model, eps=method['0_FGSM'][eps] / 255)
                globals()['atk{}'.format(1)] = torchattacks.PGD(
                    model,
                    eps=method['1_PGD'][eps] / 255,
                    alpha=method['1_PGD'][-1] / 255,
                    steps=4)
                globals()['atk{}'.format(2)] = torchattacks.BIM(
                    model,
                    eps=method['2_BIM'][eps] / 255,
                    alpha=method['2_BIM'][-1] / 255)

                for count in range(3):
                    # globals()['data_atk{}'.format(i)]
                    globals()['data_atk{}'.format(count)] = globals()[
                        'atk{}'.format(count)](data, (target + 1) % 2)
                    globals()['data_atk{}'.format(count)], target = (
                        globals()['data_atk{}'.format(count)]
                    ).to(device), target.to(device)
                    logits = model(globals()['data_atk{}'.format(count)])
                    globals()['data_atk{}'.format(count)] = (
                        globals()['data_atk{}'.format(count)]).cpu().numpy()

                method_keys = list(method.keys())

                bat_size = args.batch_size
                for j in range(bat_size):
                    for save_cnt in range(3):
                        globals()['im{}'.format(save_cnt)] = (globals()[
                            'data_atk{}'.format(save_cnt)])[j, :, :, :]
                        # imsave(
                        #     f"./confirm_attack2img/AE-classification/{method_keys[save_cnt]}/"
                        #     f"{target[j]}_{id[j]}_{i * bat_size + j}_{method_keys[save_cnt]}_eps{method[method_keys[save_cnt]][eps]}.png",
                        #     np.transpose(globals()['im{}'.format(save_cnt)], (1, 2, 0)))
                        imsave(
                            f"./data/Adversarial Attack/{method_keys[save_cnt]}/"
                            f"{image_name[j].split('.')[0]}_{method_keys[save_cnt]}_eps{method[method_keys[save_cnt]][eps]}_{i * bat_size + j}.png",
                            np.transpose(globals()['im{}'.format(save_cnt)],
                                         (1, 2, 0)))

        loss = criterion(logits, target)

        if not args.use_amp:
            loss.backward()
        # else:
        #     with amp.scale_loss(loss, optimizer) as scaled_loss:
        #         scaled_loss.backward()

        if args.image_size in [896, 576]:
            # 그라디언트가 너무 크면 값을 0.5로 잘라준다 (max_grad_norm=0.5)
            torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)

        # gradient accumulation (메모리 부족할때)
        if args.accumulation_step:
            if (i + 1) % args.accumulation_step == 0:
                optimizer.step()
                # optimizer.zero_grad()
        else:
            optimizer.step()
            # optimizer.zero_grad()

        loss_np = loss.detach().cpu().numpy()
        train_loss.append(loss_np)
        smooth_loss = sum(train_loss[-100:]) / min(len(train_loss), 100)
        bar.set_description('loss: %.5f, smooth_loss: %.5f' %
                            (loss_np, smooth_loss))

    train_loss = np.mean(train_loss)
    return train_loss
def test_model(model, phase='test'):
    # phase = 'train', 'valid', 'test'

    model.eval()  # evaluate mode; gradient 계산 안함.
    running_loss, running_corrects, num_cnt = 0.0, 0, 0
    '''
    with torch.no_grad():   # memory save를 위해 gradient 저장하지 않음.
    보통 test를 할 때, gradient를 training 시키는 것이 아니기 때문에 위와 같은 코드를 추가한다.
    
    grad = torch.autograd.grad(cost, images, retain_graph=False, create_graph=False)[0]
    하지만, adversarial attack은 위와 같이 gradient를 토대로 data에 공격을 가하기 때문에 gradient가 필요하다.
    
    따라서 test_adv 에는 with torch.no_grad()를 제외해야 한다.
    '''

    for i, (inputs, labels) in enumerate(dataloaders[phase]):

        # adversarial attack 정의
        atks = [
            torchattacks.FGSM(model, eps=8 / 255),
            torchattacks.BIM(model, eps=8 / 255, alpha=2 / 255, steps=7),
            torchattacks.PGD(model, eps=8 / 255, alpha=2 / 255, steps=7),
        ]

        adv_images = atks[0](inputs, labels).to(device)

        # Image Processing Based Defense Methods --> tensor를 image로 변환하여 적용
        for batch in range(inputs.shape[0]):
            tensor2img = transforms.ToPILImage()(inputs[batch]).convert('RGB')

            # 1. Resizing
            # Image.resize(size, resample=3, box=None, reducing_gap=None)
            # resample(filter): PIL.Image.NEAREST, PIL.Image.BOX, PIL.Image.BILINEAR, PIL.Image.HAMMING, PIL.Image.BICUBIC
            tensor2img.resize((74, 74))
            tensor2img.resize((224, 224))

            # 다시 이미지를 tensor로 바꾸기
            tensor_img = transforms.ToTensor()(tensor2img)
            inputs[batch] = tensor_img

            # 2. jpeg compression
            tensor2numpy = inputs[batch].cpu().numpy()
            cv_img = np.transpose(tensor2numpy, (1, 2, 0))  # [w, h, c]
            cv_img = cv_img * 255
            encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 15]
            result, encimg = cv2.imencode('.jpg', cv_img, encode_param)
            if False == result:
                print('could not encode image!')
                quit()

            # decode from jpeg format
            jpeg_img = cv2.imdecode(encimg, 1)
            jpeg2input = np.transpose(jpeg_img, (2, 0, 1)) / 255
            inputs[batch] = torch.Tensor(jpeg2input).to(device)

        labels = labels.to(device)

        outputs = model(adv_images)  # forward pass
        _, preds = torch.max(outputs, 1)  # model이 가장 높은 확률로 예측한 label
        loss = criterion(outputs, labels)  # loss 계산

        running_loss += loss.item() * inputs.size(0)
        running_corrects += torch.sum(preds == labels.data)
        num_cnt += inputs.size(0)  # batch size

        test_loss = running_loss / num_cnt
        test_acc = running_corrects.double() / num_cnt
        print('test done : loss/acc : %.2f / %.1f' %
              (test_loss, test_acc * 100))
def ta_bim(x, y, model, eps=4 / 255, alpha=1 / 255, steps=0):
    attack = torchattacks.BIM(model, eps=eps, alpha=alpha, steps=steps)
    advs = attack(x, y)
    return advs
Example #13
0
    ssim = structural_similarity(ori_image,
                                 adv_image,
                                 data_range=255,
                                 multichannel=False)
    psnr = peak_signal_noise_ratio(ori_image, adv_image, data_range=255)
    l2 = np.linalg.norm(ori_image - adv_image)

    return ssim, psnr, l2


################ 开始攻击 ##################
epsilons = [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3]

for epsilon in epsilons:

    attack = torchattacks.BIM(model, eps=epsilon, alpha=1 / 255, iters=0)

    correct = 0
    ssim = 0
    psnr = 0
    l2 = 0
    for data, target in test_loader:
        # Send the data and label to the device
        data, target = data.to(device), target.to(device)
        # 防止对原始样本进行了更改
        data_t = data.clone()
        target_t = target.clone()
        adv_images = attack(data_t, target_t)

        # show_cmp(data, adv_images) # 显示图像,只能batch_size=1的时候才可以
        # break