Ejemplo n.º 1
0
        weight_attention = '/media/unknown/Data/PLP/fast_adv/defenses/weights/cifar10_Attention/cifar10acc0.8729999780654907_120.pth'

        weight_025conv_mixatten_ALP = '/media/unknown/Data/PLP/fast_adv/defenses/weights/best/0.25Mixed+ALP_cifar10_ep_85_val_acc0.8650.pth'

        weight_smooth = '/media/unknown/Data/PLP/fast_adv/defenses/weights/best/2random_smooth_cifar10_ep_120_val_acc0.8510.pth'
        weight_05smooth = '/media/unknown/Data/PLP/fast_adv/defenses/weights/shape_0.5_random/cifar10acc0.6944999784231186_50.pth'
        weight_025smooth = '/media/unknown/Data/PLP/fast_adv/defenses/weights/best/0.25random_smooth_cifar10_ep_146_val_acc0.8070.pth'
        weight_1smooth = '/media/unknown/Data/PLP/fast_adv/defenses/weights/best/1random_smooth_cifar10_ep_107_val_acc0.5380.pth'
        print('loading weights from : ', weight_AT)
        model_dict = torch.load(weight_AT)
        model.load_state_dict(model_dict)
        model.eval()
        model_dict2 = torch.load(weight_025conv_mixatten_ALP)
        model2.load_state_dict(model_dict2)
        model2.eval()
        test_accs = AverageMeter()
        test_losses = AverageMeter()
        widgets = ['test :', Percentage(), ' ', Bar('#'), ' ', Timer(),
                   ' ', ETA(), ' ', FileTransferSpeed()]
        pbar = ProgressBar(widgets=widgets)
        with torch.no_grad():
            for batch_data in pbar(test_loader):
                images, labels = batch_data['image'].to(device), batch_data['label_idx'].to(device)
                noise = torch.randn_like(images, device='cuda') * 0.2
                image_shape = images + noise
                #image_shape = torch.renorm(image_shape - images, p=2, dim=0, maxnorm=1) + images
                #logits,_ = model.forward_attention(images.detach(), image_shape.detach())
                logits= model(images.detach())
                logits2 = model2(images.detach())
                if logits.argmax(1) !=labels and logits2.argmax(1) ==labels:
                    i=0
Ejemplo n.º 2
0
                                         milestones=[60, 120, 160],
                                         gamma=0.2)

attacker = DDN(steps=args.steps, device=DEVICE)

max_loss = torch.log(torch.tensor(10.)).item()  # for callback
best_acc = 0
best_epoch = 0

for epoch in range(args.epochs):

    scheduler.step()
    cudnn.benchmark = True
    model.train()
    requires_grad_(m, True)
    accs = AverageMeter()
    losses = AverageMeter()
    attack_norms = AverageMeter()

    length = len(train_loader)
    for i, (images, labels) in enumerate(tqdm.tqdm(train_loader, ncols=80)):

        images, labels = images.to(DEVICE), labels.to(DEVICE)
        #原图loss
        #logits_clean = model.forward(images)
        logits_clean, feature_clean = model.forward(images)
        #loss = F.cross_entropy(logits_clean, labels)

        if args.adv is not None and epoch >= args.adv:
            model.eval()
            requires_grad_(m, False)
Ejemplo n.º 3
0
# Center Loss for Attention Regularization
##############################################
class CenterLoss(nn.Module):
    def __init__(self):
        super(CenterLoss, self).__init__()
        self.l2_loss = nn.MSELoss(reduction='sum')

    def forward(self, outputs, targets):
        return self.l2_loss(outputs, targets) / outputs.size(0)


cross_entropy_loss = nn.CrossEntropyLoss()
center_loss = CenterLoss()

# loss and metric
loss_container = AverageMeter()

accs = AverageMeter()
raw_metric = AverageMeter()
crop_metric = AverageMeter()
drop_metric = AverageMeter()
attack_norms = AverageMeter()

loss_crop_adv = AverageMeter()
loss_drop = AverageMeter()

valacc_final = 0
# raw_metric = TopKAccuracyMetric(topk=(1, 5))
# crop_metric = TopKAccuracyMetric(topk=(1, 5))
# drop_metric = TopKAccuracyMetric(topk=(1, 5))
import scipy.misc