Beispiel #1
0
def fast_adversarial_DDN(model_name):
    #Saves the minimum epsilon value for successfully attacking each image via PGD based attack as an npy file in the folder corresponding to model_name
    #No Restarts
    #Done for a single batch only since batch size is supposed to be set to 1000 (first 1000 images)
    print (model_name)
    print(device)
    test_batches = Batches(test_set, batch_size, shuffle=False, num_workers=2, gpu_id = torch.cuda.current_device())
    model = PreActResNet18().to(device)
    for m in model.children(): 
        if not isinstance(m, nn.BatchNorm2d):
            m.float()   

    model.load_state_dict(torch.load(model_name+".pt", map_location = device))
    model.eval()
    
    for i,batch in enumerate(test_batches): 
        x,y = batch['input'].float(), batch['target']
        restarts = 1        
        min_norm = np.zeros((restarts, batch_size))
        for i in range(restarts):
            try:
                attacker = DDN(steps=100, device=device)
                adv = attacker.attack(model, x, labels=y, targeted=False)
            except:
                attacker = DDN(steps=100, device=device)
                adv = attacker.attack(model, x, labels=y, targeted=False)
            delta = (adv - x)
            norm = norms(delta).squeeze(1).squeeze(1).squeeze(1).cpu().numpy() 
            min_norm[i] = norm
        min_norm = min_norm.min(axis = 0)
        np.save(model_name + "/" + "DDN" + ".npy" ,min_norm) 
        break
Beispiel #2
0
def fast_adversarial_DDN(model_name):
    #Saves the minimum epsilon value for successfully attacking each image via PGD based attack as an npy file in the folder corresponding to model_name
    #No Restarts
    #Done for a single batch only since batch size is supposed to be set to 1000 (first 1000 images)
    print(model_name)
    test_batches = DataLoader(mnist_test, batch_size=batch_size, shuffle=False)
    print(device)
    try:
        model_test = net().to(device)
        model_address = model_name + ".pt"
        model_test.load_state_dict(
            torch.load(model_address, map_location=device))
    except:
        model_test = Model_Torch().to(device)
        model_address = model_name + ".pt"
        model_test.load_state_dict(
            torch.load(model_address, map_location=device))

    model_test.eval()
    restarts = 1

    for i, batch in enumerate(test_batches):
        x, y = batch[0].to(device), batch[1].to(device)
        min_norm = np.zeros((restarts, batch_size))
        try:
            attacker = DDN(steps=100, device=device)
            adv = attacker.attack(model_test, x, labels=y, targeted=False)
        except:
            attacker = DDN(steps=100, device=device)
            adv = attacker.attack(model_test, x, labels=y, targeted=False)
        delta = (adv - x)
        norm = norms(delta).squeeze(1).squeeze(1).squeeze(1).cpu().numpy()
        min_norm[0] = norm
        min_norm = min_norm.min(axis=0)
        np.save(model_name + "/" + "DDN" + ".npy", min_norm)
        # ipdb.set_trace()
        print(min_norm[min_norm > 2.0].shape[0] / 1000)
        break
Beispiel #3
0
    model = torch.nn.DataParallel(model)

optimizer = SGD(model.parameters(),
                lr=args.lr,
                momentum=args.momentum,
                weight_decay=args.weight_decay)
if args.adv == 0:
    scheduler = lr_scheduler.StepLR(optimizer,
                                    step_size=args.lr_step,
                                    gamma=args.lr_decay)
else:
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=[60, 120, 160],
                                         gamma=0.2)

attacker = DDN(steps=args.steps, device=DEVICE)

max_loss = torch.log(torch.tensor(10.)).item()  # for callback
best_acc = 0
best_epoch = 0

for epoch in range(args.epochs):

    scheduler.step()
    cudnn.benchmark = True
    model.train()
    requires_grad_(m, True)
    accs = AverageMeter()
    losses = AverageMeter()
    attack_norms = AverageMeter()
def main():
    global args

    args = parser.parse_args()
    print(args)

    if not os.path.exists(args.save_folder):
        os.mkdir(args.save_folder)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    image_mean = torch.tensor([0.4802, 0.4481, 0.3975]).view(1, 3, 1, 1)
    image_std = torch.tensor([0.2770, 0.2691, 0.2821]).view(1, 3, 1, 1)

    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        m = models.__dict__[args.arch](pretrained=True)
    else:
        print("=> creating model '{}'".format(args.arch))
        m = models.__dict__[args.arch]()

    model = utils.NormalizedModel(m, image_mean, image_std)
    model.to(device)

    if torch.cuda.device_count() > 1:
        model = torch.nn.DataParallel(model)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                args.lr_step,
                                                gamma=0.1)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            prec1 = checkpoint['prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            scheduler.last_epoch = checkpoint['epoch'] - 1
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    # Data loading code
    train_transform = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.RandomGrayscale(p=0.05),
        transforms.RandomAffine(0, translate=(0.1, 0.1)),
        transforms.ToTensor()
    ])

    test_transform = transforms.Compose([transforms.ToTensor()])

    train_dataset = dataset.TinyImageNet(args.data,
                                         mode='train',
                                         transform=train_transform)
    val_dataset = dataset.TinyImageNet(args.data,
                                       mode='val',
                                       transform=test_transform)

    if args.visdom_port:
        from visdom_logger.logger import VisdomLogger
        callback = VisdomLogger(port=args.visdom_port)
    else:
        callback = None

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               drop_last=True,
                                               num_workers=args.workers,
                                               pin_memory=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batch_size,
                                             num_workers=args.workers,
                                             pin_memory=True)

    attack = DDN(steps=args.steps, device=device)

    if args.evaluate:
        validate(val_loader, model, criterion, device, 0, callback=callback)
        return

    for epoch in range(args.start_epoch, args.epochs):
        scheduler.step()
        print('Learning rate for epoch {}: {:.2e}'.format(
            epoch, optimizer.param_groups[0]['lr']))

        # train for one epoch
        train(train_loader, model, m, criterion, optimizer, attack, device,
              epoch, callback)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion, device, epoch + 1,
                         callback)

        utils.save_checkpoint(state={
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': model.state_dict(),
            'prec1': prec1,
            'optimizer': optimizer.state_dict()
        },
                              filename=os.path.join(
                                  args.save_folder,
                                  'checkpoint_{}.pth'.format(args.arch)))

        utils.save_checkpoint(state=model.state_dict(),
                              filename=os.path.join(
                                  args.save_folder, '{}_epoch-{}.pt'.format(
                                      args.arch, epoch + 1)),
                              cpu=True)
Beispiel #5
0
                             transform=transforms.ToTensor(),
                             download=True)
    loader = data.DataLoader(dataset, shuffle=False, batch_size=16)

    x, y = next(iter(loader))
    x = x.to(device)
    y = y.to(device)

    print('Loading model')
    model = SmallCNN()
    model.load_state_dict(torch.load(args.model_path))
    model.eval().to(device)
    requires_grad_(model, False)

    print('Running DDN attack')
    attacker = DDN(steps=100, device=device)
    start = time.time()
    ddn_atk = attacker.attack(model, x, labels=y, targeted=False)
    ddn_time = time.time() - start

    print('Running C&W attack')
    cwattacker = CarliniWagnerL2(device=device,
                                 image_constraints=(0, 1),
                                 num_classes=10)

    start = time.time()
    cw_atk = cwattacker.attack(model, x, labels=y, targeted=False)
    cw_time = time.time() - start

    # Save images
    all_imgs = torch.cat((x, cw_atk, ddn_atk))
Beispiel #6
0
optimizer = torch.optim.SGD(model.parameters(),
                            lr=args.lr,
                            momentum=args.momentum,
                            weight_decay=args.weight_decay)

if args.adv == 0:
    scheduler = lr_scheduler.StepLR(optimizer,
                                    step_size=args.lr_step,
                                    gamma=args.lr_decay)
else:
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=[60, 120, 160],
                                         gamma=0.2)

attacker = DDN(steps=args.steps, device=DEVICE)

max_loss = torch.log(torch.tensor(10.)).item()  # for callback
best_acc = 0
best_epoch = 0


# augment function         attention crop 和 attention drop
def batch_augment(images,
                  attention_map,
                  mode='crop',
                  theta=0.5,
                  padding_ratio=0.1):
    batches, _, imgH, imgW = images.size()  # B,C,H,W

    if mode == 'crop':  # attention crop
def main():
    global args

    args = parse_args()
    print(args)
    gpu_id = args.gpu_id
    if isinstance(gpu_id, int):
        gpu_id = [gpu_id]

    print("Let's use ", len(gpu_id), " GPUs!")
    print("gpu_ids:", gpu_id)
    if not os.path.exists(args.save_folder):
        os.mkdir(args.save_folder)
    device = torch.device('cuda: %d' %gpu_id[0]  if torch.cuda.is_available() else 'cpu')
    #device = gpu_id
    if 'inception' in args.arch.lower():
        print('Using Inception Normalization!')
        image_mean = torch.tensor([0.5, 0.5, 0.5]).view(1, 3, 1, 1)
        image_std = torch.tensor([0.5, 0.5, 0.5]).view(1, 3, 1, 1)
    else:
        print('Using Imagenet Normalization!')
        image_mean = torch.tensor([0.4802, 0.4481, 0.3975]).view(1, 3, 1, 1)
        image_std = torch.tensor([0.2770, 0.2691, 0.2821]).view(1, 3, 1, 1)

    # create model
    '''
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        m = models.__dict__[args.arch](pretrained=True)
    else:
        print("=> creating model '{}'".format(args.arch))
        m = models.__dict__[args.arch]()
    '''
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
    else:
        print("=> creating model '{}'".format(args.arch))
    model_init = make_model(args.arch, 110, pretrained=args.pretrained)
    #model = utils.NormalizedModel(m, image_mean, image_std)
    model = utils.NormalizedModel(model_init, image_mean, image_std)
    print('model_struct:', model)
    print('model parameters:', sum(param.numel() for param in model.parameters()))
    '''
    for param in model.parameters():
        print(param.shape)
        print(param.numel())
    '''
    model.to(device)


    if len(gpu_id) > 1:
        model = torch.nn.DataParallel(model, device_ids=gpu_id)



    '''
    if torch.cuda.device_count() > 1:
        model = torch.nn.DataParallel(model, device_ids=[i for i in range(torch.cuda.device_count())])
    '''


    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_step, gamma=0.1)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            prec1 = checkpoint['prec1']
            from collections import OrderedDict
            new_state_dict = OrderedDict()
            for k, v in checkpoint['state_dict'].items():
                name = k[7:]  # remove `module.`
                new_state_dict[name] = v
            model.load_state_dict(new_state_dict,strict=True)
            optimizer.load_state_dict(checkpoint['optimizer'])
            scheduler.last_epoch = checkpoint['epoch'] - 1
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    # Data loading code
    input_path = args.data_path
    clean_path = os.path.join(input_path, 'IJCAI_2019_AAAC_train')
    adv_path = os.path.join(input_path, 'IJCAI_2019_AAAC_train_adv')
    img_clean = glob.glob(os.path.join(clean_path, "./*/*.jpg"))
    img_adv = glob.glob(os.path.join(adv_path, "./*/*.jpg"))
    filenames = img_clean + img_adv

    train_transform = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.Resize([args.image_size, args.image_size], interpolation=PIL.Image.BILINEAR),
        transforms.RandomGrayscale(p=0.05),
        transforms.RandomAffine(0, translate=(0.1, 0.1)),
        transforms.ToTensor()
    ])

    test_transform = transforms.Compose([
        transforms.Resize([args.image_size, args.image_size], interpolation=PIL.Image.BILINEAR),
        transforms.ToTensor()
    ])

    all_dataset = dataset.AAAC_dataset(filenames, mode='train', transform=train_transform)
    #val_dataset = dataset.TinyImageNet(args.data, mode='val', transform=test_transform)
    train_size = int(0.8 * len(all_dataset))
    val_size = len(all_dataset) - train_size
    train_dataset, val_dataset = torch.utils.data.random_split(all_dataset, [train_size, val_size])

    '''
    if args.visdom_port:
        from visdom_logger.logger import VisdomLogger
        callback = VisdomLogger(port=args.visdom_port)
    else:
        callback = None
    '''
    callback = None

    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, drop_last=True,
                                               num_workers=args.workers, pin_memory=True)
    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.workers,
                                             pin_memory=True)

    attacker = DDN(steps=args.steps, device=device)

    if args.evaluate:
        validate(val_loader, model, criterion, device, 0, callback=callback)
        return

    for epoch in range(args.start_epoch, args.epochs):
        scheduler.step()
        print('Learning rate for epoch {}: {:.2e}'.format(epoch, optimizer.param_groups[0]['lr']))

        # train for one epoch
        train(train_loader, model, model_init, criterion, optimizer, attacker, device, epoch, callback)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion, device, epoch + 1, callback)

        utils.save_checkpoint(
            state={'epoch': epoch + 1,
                   'arch': args.arch,
                   'state_dict': model.state_dict(),
                   'prec1': prec1,
                   'optimizer': optimizer.state_dict()},
            filename=os.path.join(args.save_folder, 'checkpoint_{}.pth'.format(args.arch)))

        utils.save_checkpoint(
            state=model.state_dict(),
            filename=os.path.join(args.save_folder, '{}_epoch-{}.pt'.format(args.arch, epoch + 1)),
            cpu=True
        )
Beispiel #8
0
        lambda x: wrapper(normalize(x), pcl=pcl),
        eps=epsilon,
        eps_iter=epsilon / 10,
        ord=norm,
        nb_iter=10)
elif args.attack == 'FGSM':
    adversary = GradientSignAttack(lambda x: wrapper(x, pcl=pcl), eps=epsilon)
    # adversary = PGDAttack(lambda x: wrapper(x, pcl=pcl), eps=epsilon, eps_iter=epsilon, nb_iter=1, ord=norm, rand_init=False)
elif args.attack == 'CW':
    adversary = CarliniWagnerL2Attack(lambda x: wrapper(x, pcl=pcl),
                                      10,
                                      binary_search_steps=2,
                                      max_iterations=500,
                                      initial_const=1e-1)
elif args.attack == 'DDN':
    adversary = DDN(steps=100, device=device)
    ddn = True
else:
    adversary = None

criterion = torch.nn.CrossEntropyLoss()
net.eval()

test_acc_adv, test_loss_adv, dist_l2, dist_linf = adv_test(
    lambda x: wrapper(x, pcl=pcl),
    test_loader,
    criterion,
    adversary,
    epsilon,
    args,
    ddn=ddn,
Beispiel #9
0
model_dict5 = torch.load(weight_norm)
model5.load_state_dict(model_dict5)

#model.eval()
#with torch.no_grad():
for i, (images, labels) in enumerate(tqdm.tqdm(test_loader, ncols=80)):
    images, labels = images.to(DEVICE), labels.to(DEVICE)
    logits = model5(images)
    # loss = F.cross_entropy(logits, labels)
    # print(logits)
    test_accs = AverageMeter()
    test_losses = AverageMeter()
    test_accs.append((logits.argmax(1) == labels).float().mean().item())

    ################ADV########################
    attacker = DDN(steps=100, device=DEVICE)
    attacker2 = DeepFool(device=DEVICE)
    adv = attacker.attack(model5, images, labels=labels, targeted=False)
    # deepfool = attacker2.attack(model5, images, labels=labels, targeted=False)
    if adv is None:
        adv = images
    # if deepfool is None:
    #     deepfool = images
    test_accs2 = AverageMeter()
    test_losses2 = AverageMeter()
    logits2 = model5(adv)
    # logits3 = model5(deepfool)
    test_accs2.append((logits2.argmax(1) == labels).float().mean().item())
    #print(test_accs2)

    # test_accs3 = AverageMeter()
Beispiel #10
0
t_img = t_img.to(device)
assert model(t_img).argmax() == label
assert black_box_model(img) == label

# Load surrogate model
smodel = resnext50_32x4d()
smodel = NormalizedModel(smodel, image_mean, image_std)
state_dict = torch.load(args.surrogate_model_path)
smodel.load_state_dict(state_dict)
smodel.eval().to(device)

# Sanity check: image correctly labeled by surrogate classifier:
assert smodel(t_img).argmax() == label

surrogate_models = [smodel]
attacks = [DDN(100, device=device), FGM_L2(1)]

adv = attack(black_box_model,
             surrogate_models,
             attacks,
             img,
             label,
             targeted=False,
             device=device)

pred_on_adv = black_box_model(adv)

print('True label: {}; Prediction on the adversarial: {}'.format(
    label, pred_on_adv))

# Compute l2 norm in range [0, 1]