コード例 #1
0
ファイル: test.py プロジェクト: steven202/robust_union
def fast_adversarial_DDN(model_name):
    #Saves the minimum epsilon value for successfully attacking each image via PGD based attack as an npy file in the folder corresponding to model_name
    #No Restarts
    #Done for a single batch only since batch size is supposed to be set to 1000 (first 1000 images)
    print (model_name)
    print(device)
    test_batches = Batches(test_set, batch_size, shuffle=False, num_workers=2, gpu_id = torch.cuda.current_device())
    model = PreActResNet18().to(device)
    for m in model.children(): 
        if not isinstance(m, nn.BatchNorm2d):
            m.float()   

    model.load_state_dict(torch.load(model_name+".pt", map_location = device))
    model.eval()
    
    for i,batch in enumerate(test_batches): 
        x,y = batch['input'].float(), batch['target']
        restarts = 1        
        min_norm = np.zeros((restarts, batch_size))
        for i in range(restarts):
            try:
                attacker = DDN(steps=100, device=device)
                adv = attacker.attack(model, x, labels=y, targeted=False)
            except:
                attacker = DDN(steps=100, device=device)
                adv = attacker.attack(model, x, labels=y, targeted=False)
            delta = (adv - x)
            norm = norms(delta).squeeze(1).squeeze(1).squeeze(1).cpu().numpy() 
            min_norm[i] = norm
        min_norm = min_norm.min(axis = 0)
        np.save(model_name + "/" + "DDN" + ".npy" ,min_norm) 
        break
コード例 #2
0
ファイル: test.py プロジェクト: steven202/robust_union
def fast_adversarial_DDN(model_name):
    #Saves the minimum epsilon value for successfully attacking each image via PGD based attack as an npy file in the folder corresponding to model_name
    #No Restarts
    #Done for a single batch only since batch size is supposed to be set to 1000 (first 1000 images)
    print(model_name)
    test_batches = DataLoader(mnist_test, batch_size=batch_size, shuffle=False)
    print(device)
    try:
        model_test = net().to(device)
        model_address = model_name + ".pt"
        model_test.load_state_dict(
            torch.load(model_address, map_location=device))
    except:
        model_test = Model_Torch().to(device)
        model_address = model_name + ".pt"
        model_test.load_state_dict(
            torch.load(model_address, map_location=device))

    model_test.eval()
    restarts = 1

    for i, batch in enumerate(test_batches):
        x, y = batch[0].to(device), batch[1].to(device)
        min_norm = np.zeros((restarts, batch_size))
        try:
            attacker = DDN(steps=100, device=device)
            adv = attacker.attack(model_test, x, labels=y, targeted=False)
        except:
            attacker = DDN(steps=100, device=device)
            adv = attacker.attack(model_test, x, labels=y, targeted=False)
        delta = (adv - x)
        norm = norms(delta).squeeze(1).squeeze(1).squeeze(1).cpu().numpy()
        min_norm[0] = norm
        min_norm = min_norm.min(axis=0)
        np.save(model_name + "/" + "DDN" + ".npy", min_norm)
        # ipdb.set_trace()
        print(min_norm[min_norm > 2.0].shape[0] / 1000)
        break
コード例 #3
0
    losses = AverageMeter()
    attack_norms = AverageMeter()

    length = len(train_loader)
    for i, (images, labels) in enumerate(tqdm.tqdm(train_loader, ncols=80)):

        images, labels = images.to(DEVICE), labels.to(DEVICE)
        #原图loss
        #logits_clean = model.forward(images)
        logits_clean, feature_clean = model.forward(images)
        #loss = F.cross_entropy(logits_clean, labels)

        if args.adv is not None and epoch >= args.adv:
            model.eval()
            requires_grad_(m, False)
            adv = attacker.attack(model, images, labels)
            l2_norms = (adv - images).view(args.batch_size, -1).norm(2, 1)
            mean_norm = l2_norms.mean()
            if args.max_norm:
                adv = torch.renorm(
                    adv - images, p=2, dim=0, maxnorm=args.max_norm) + images
            attack_norms.append(mean_norm.item())
            requires_grad_(m, True)
            model.train()

            logits_adv, feature_adv = model(adv.detach())
            loss_adv = F.cross_entropy(logits_adv, labels)
            #loss=loss+ loss_adv #+ 0.5*F.mse_loss(logits_adv,logits)

        if args.shape is not None and epoch >= args.shape:
            model.eval()
コード例 #4
0
    loader = data.DataLoader(dataset, shuffle=False, batch_size=16)

    x, y = next(iter(loader))
    x = x.to(device)
    y = y.to(device)

    print('Loading model')
    model = SmallCNN()
    model.load_state_dict(torch.load(args.model_path))
    model.eval().to(device)
    requires_grad_(model, False)

    print('Running DDN attack')
    attacker = DDN(steps=100, device=device)
    start = time.time()
    ddn_atk = attacker.attack(model, x, labels=y, targeted=False)
    ddn_time = time.time() - start

    print('Running C&W attack')
    cwattacker = CarliniWagnerL2(device=device,
                                 image_constraints=(0, 1),
                                 num_classes=10)

    start = time.time()
    cw_atk = cwattacker.attack(model, x, labels=y, targeted=False)
    cw_time = time.time() - start

    # Save images
    all_imgs = torch.cat((x, cw_atk, ddn_atk))
    save_image(all_imgs, 'images_and_attacks.png', nrow=16, pad_value=0)
コード例 #5
0
ファイル: cifar10_wsdan.py プロジェクト: a-zuoxi/tianchi_f
        imh_cam = np.transpose(imh_cam, (1, 2, 0))  # 把channel那一维放到最后
        scipy.misc.imsave('drop_theta=0.2.png', imh_cam)
        plt.imshow(imh_cam)
        plt.show()
        y_pred_drop, _, _ = model(drop_images)

        # loss                      # cross_entropy_loss(y_pred_crop, y) / 3. - \
        batch_loss = cross_entropy_loss(y_pred_raw, y) / 3. + \
                     cross_entropy_loss(y_pred_crop, y) / 3. - \
                     cross_entropy_loss(y_pred_drop, y) / 3. *0.001 + \
                     center_loss(feature_matrix, feature_center_batch)

        if args.adv is not None and epoch >= args.adv:
            model.eval()
            requires_grad_(m, False)
            adv = attacker.attack(model, X, y)
            l2_norms = (adv - X).view(args.batch_size, -1).norm(2, 1)
            mean_norm = l2_norms.mean()
            if args.max_norm:
                adv = torch.renorm(adv - X, p=2, dim=0,
                                   maxnorm=args.max_norm) + X
            attack_norms.append(mean_norm.item())
            requires_grad_(m, True)
            model.train()
            y_pred_raw_adv, feature_matrix_adv, attention_map_adv = model(
                adv.detach())

            # Update Feature Center###########################################重点代码#####################################
            feature_center_batch_adv = F.normalize(feature_center[y], dim=-1)
            # Q&A
            feature_center[y] += args.beta * (feature_matrix_adv.detach() -
コード例 #6
0
#model.eval()
#with torch.no_grad():
for i, (images, labels) in enumerate(tqdm.tqdm(test_loader, ncols=80)):
    images, labels = images.to(DEVICE), labels.to(DEVICE)
    logits = model5(images)
    # loss = F.cross_entropy(logits, labels)
    # print(logits)
    test_accs = AverageMeter()
    test_losses = AverageMeter()
    test_accs.append((logits.argmax(1) == labels).float().mean().item())

    ################ADV########################
    attacker = DDN(steps=100, device=DEVICE)
    attacker2 = DeepFool(device=DEVICE)
    adv = attacker.attack(model5, images, labels=labels, targeted=False)
    # deepfool = attacker2.attack(model5, images, labels=labels, targeted=False)
    if adv is None:
        adv = images
    # if deepfool is None:
    #     deepfool = images
    test_accs2 = AverageMeter()
    test_losses2 = AverageMeter()
    logits2 = model5(adv)
    # logits3 = model5(deepfool)
    test_accs2.append((logits2.argmax(1) == labels).float().mean().item())
    #print(test_accs2)

    # test_accs3 = AverageMeter()
    # test_losses3 = AverageMeter()
    # test_accs3.append((logits3.argmax(1) == labels).float().mean().item())