示例#1
0
def test_all(classifier_name,
             path_to_classifier_weights,
             path_to_CATN,
             fgsm=False,
             cw=False):
    criterion = nn.CrossEntropyLoss()
    _, testloader = example.load_cifar()

    architectures = {
        'VGG16': VGG,
        'res18': resnet.ResNet18,
        'dense121': densenet.densenet_cifar,
        'alex': alexnet.AlexNet,
        'googlenet': googlenet.GoogLeNet,
        'lenet': LeNet
    }

    model = example.prep(architectures[classifier_name]())
    model.load_state_dict(torch.load(path_to_classifier_weights))

    if (fgsm == True):
        attacker_fgsm = attacks.FGSM()
        timestart1 = time.time()
        test_acc, fgsm_test_adv_acc = test(model, criterion, testloader,
                                           attacker_fgsm)
        timeend1 = time.time()
    else:
        fgsm_test_adv_acc = None
    # print(test_acc)
    # print(fgsm_test_adv_acc)
    # print "fgsm time: ", (timeend1-timestart1)
    if (cw == True):
        attacker_cw = attacks.CarliniWagner(verbose=False)
        timestart2 = time.time()
        test_acc, cw_test_adv_acc = test(model, criterion, testloader,
                                         attacker_cw)
        timeend2 = time.time()
    else:
        cw_test_adv_acc = None

# print(test_acc)
# print(cw_test_adv_acc)

    attacker_catn = attacks.DCGAN(train_adv=False)
    attacker_catn.load(path_to_CATN)

    timestart3 = time.time()
    test_acc, catn_test_adv_acc = test(model, criterion, testloader,
                                       attacker_catn)
    timeend3 = time.time()
    # print(catn_test_adv_acc)
    if (fgsm):
        print("fgsm time: ", (timeend1 - timestart1))
    if (cw):
        print("cw time: ", (timeend2 - timestart2))
    print("gatn time: ", (timeend3 - timestart3))
    return test_acc, fgsm_test_adv_acc, cw_test_adv_acc, catn_test_adv_acc
示例#2
0
if __name__ == "__main__":
    trainloader, testloader = load_cifar()
    criterion = nn.CrossEntropyLoss()
    do_train = True
    architectures = [(VGG, 'VGG16', 50), (resnet.ResNet18, 'res18', 500),
                     (densenet.densenet_cifar, 'dense121', 500),
                     (alexnet.AlexNet, 'alex', 500),
                     (googlenet.GoogLeNet, 'googlenet', 500),
                     (LeNet, 'lenet', 250)]

    for init_func, name, epochs in architectures:
        for tr_adv in [False, True]:
            print(name, tr_adv)
            model = prep(init_func())
            attacker = attacks.DCGAN(train_adv=tr_adv)

            optimizer = optim.Adam(model.parameters(), lr=1e-4)
            if do_train:
                train_acc, train_adv_acc = train(model,
                                                 optimizer,
                                                 criterion,
                                                 trainloader,
                                                 name,
                                                 attacker,
                                                 num_epochs=epochs)
                suffix = '_AT' if tr_adv else ''
                attacker.save(
                    'saved/{0}{1}_nodrop_joey_attacker_0.0010.pth'.format(
                        name, suffix))
                torch.save(
def train(**kwargs):
    opt._parse(kwargs)
    dataset = Dataset(opt)
    # 300w_dataset = FaceLandmarksDataset()
    print('load data')
    dataloader = data_.DataLoader(dataset, \
                                  batch_size=1, \
                                  shuffle=True, \
                                  pin_memory=True,\
                                  num_workers=opt.num_workers)
    testset = TestDataset(opt)
    test_dataloader = data_.DataLoader(testset,
                                       batch_size=1,
                                       num_workers=opt.test_num_workers,
                                       shuffle=False, \
                                       pin_memory=True
                                       )
    faster_rcnn = FasterRCNNVGG16()
    print('model construct completed')
    attacker = attacks.DCGAN(train_adv=False)
    if opt.load_attacker:
        attacker.load(opt.load_attacker)
        print('load attacker model from %s' % opt.load_attacker)
    trainer = VictimFasterRCNNTrainer(faster_rcnn, attacker,
                                      attack_mode=True).cuda()
    # trainer = VictimFasterRCNNTrainer(faster_rcnn).cuda()
    if opt.load_path:
        trainer.load(opt.load_path)
        print('load pretrained model from %s' % opt.load_path)

    trainer.vis.text(dataset.db.label_names, win='labels')
    # eval_result = eval(test_dataloader, faster_rcnn, test_num=2000)
    best_map = 0
    for epoch in range(opt.epoch):
        trainer.reset_meters(adv=True)
        for ii, (img, bbox_, label_, scale) in tqdm(enumerate(dataloader)):
            ipdb.set_trace()
            scale = at.scalar(scale)
            img, bbox, label = img.cuda().float(), bbox_.cuda(), label_.cuda()
            img, bbox, label = Variable(img), Variable(bbox), Variable(label)
            trainer.train_step(img, bbox, label, scale)

            if (ii) % opt.plot_every == 0:
                if os.path.exists(opt.debug_file):
                    ipdb.set_trace()

                # plot loss
                trainer.vis.plot_many(trainer.get_meter_data())
                trainer.vis.plot_many(trainer.get_meter_data(adv=True))

                # plot groud truth bboxes
                ori_img_ = inverse_normalize(at.tonumpy(img[0]))
                gt_img = visdom_bbox(ori_img_, at.tonumpy(bbox_[0]),
                                     at.tonumpy(label_[0]))
                trainer.vis.img('gt_img', gt_img)

                # plot predicted bboxes
                _bboxes, _labels, _scores = trainer.faster_rcnn.predict(
                    [ori_img_], visualize=True)
                pred_img = visdom_bbox(ori_img_, at.tonumpy(_bboxes[0]),
                                       at.tonumpy(_labels[0]).reshape(-1),
                                       at.tonumpy(_scores[0]))
                trainer.vis.img('pred_img', pred_img)
                if trainer.attacker is not None:
                    adv_img = trainer.attacker.perturb(img)
                    adv_img_ = inverse_normalize(at.tonumpy(adv_img[0]))
                    _bboxes, _labels, _scores = trainer.faster_rcnn.predict(
                        [adv_img_], visualize=True)
                    adv_pred_img = visdom_bbox(
                        adv_img_, at.tonumpy(_bboxes[0]),
                        at.tonumpy(_labels[0]).reshape(-1),
                        at.tonumpy(_scores[0]))
                    trainer.vis.img('adv_img', adv_pred_img)
                # rpn confusion matrix(meter)
                trainer.vis.text(str(trainer.rpn_cm.value().tolist()),
                                 win='rpn_cm')
                # roi confusion matrix
                trainer.vis.img(
                    'roi_cm',
                    at.totensor(trainer.roi_cm.conf, False).float())

                if (ii) % 500 == 0:
                    best_path = trainer.save(epochs=epoch, save_rcnn=True)

        if epoch % 2 == 0:
            best_path = trainer.save(epochs=epoch)
def img2jpg(img, img_suffix, quality):
    jpg_base = '/media/drive/ibug/300W_cropped/frcnn_adv_jpg/'
    img = img.transpose((1, 2, 0))
    img = Image.fromarray(img.astype('uint8'))
    if not os.path.exists(jpg_base):
        os.makedirs(jpg_base)
    jpg_path = jpg_base + img_suffix
    img.save(jpg_path, format='JPEG', subsampling=0, quality=quality)
    jpg_img = read_image(jpg_path)
    return jpg_img


if __name__ == '__main__':
    _data = FaceLandmarksDataset()
    faster_rcnn = FasterRCNNVGG16()
    attacker = attacks.DCGAN(train_adv=False)
    attacker.load(
        '/home/joey/Desktop/simple-faster-rcnn-pytorch/checkpoints/max_min_attack_6.pth'
    )
    trainer = VictimFasterRCNNTrainer(faster_rcnn, attacker).cuda()
    trainer.load(
        '/home/joey/Desktop/simple-faster-rcnn-pytorch/checkpoints/fasterrcnn_full_03172016_10'
    )
    quality_list = [100, 90, 80, 70, 60, 50, 40, 30, 20]
    threshold = [0.7]
    adv_det_list = []
    for quality in threshold:
        detected = 0
        jpg_detected = 0
        adv_detected = 0
        trainer.faster_rcnn.score_thresh = quality