def test(epoch, is_adv=False): global is_training, best_acc is_training = False net.eval() test_loss = 0 correct = 0 total = 0 # with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(test_loader): if use_cuda: inputs, targets = inputs.requires_grad_().cuda(), targets.cuda() if is_adv: adversary = PGDAttack(net, loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=args.eps, nb_iter=args.iter, eps_iter=args.eps / args.iter, rand_init=True, clip_min=0.0, clip_max=1.0, targeted=False) with ctx_noparamgrad_and_eval(net): inputs = adversary.perturb(inputs, targets) optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, targets) test_loss += loss.item() _, predicted = torch.max(outputs.data, 1) total += targets.size(0) correct += predicted.eq(targets.data).cpu().sum() correct = correct.item() progress_bar( batch_idx, len(test_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (test_loss / (batch_idx + 1), 100. * correct / total, correct, total)) # Save checkpoint. acc = 100. * correct / total if acc > best_acc: best_acc = acc checkpoint(acc, epoch) return (test_loss / batch_idx, 100. * correct / total)
def train(epoch): global is_training is_training = True print('\nEpoch: %d' % epoch) net.train() train_loss = 0 correct = 0 total = 0 for batch_idx, (inputs, targets) in enumerate(train_loader): if use_cuda: inputs, targets = inputs.cuda().requires_grad_(), targets.cuda() # generate adv img if args.adv_train: adversary = PGDAttack(net, loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=args.eps, nb_iter=args.iter, eps_iter=args.eps / args.iter, rand_init=True, clip_min=0.0, clip_max=1.0, targeted=False) with ctx_noparamgrad_and_eval(net): inputs = adversary.perturb(inputs, targets) optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, targets) loss.backward() optimizer.step() train_loss += loss.item() _, predicted = torch.max(outputs.data, 1) total += targets.size(0) correct += predicted.eq(targets.data).cpu().sum() correct = correct.item() progress_bar( batch_idx, len(train_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (train_loss / (batch_idx + 1), 100. * correct / total, correct, total)) return (train_loss / batch_idx, 100. * correct / total)
acc_adv_image = AverageMeter() acc_adv_latent = AverageMeter() progress_bar = tqdm(testloader) attacker = PGDAttack(predict=net, eps=8.0 / 255, eps_iter=2.0 / 255, nb_iter=nb_iter, clip_min=-1.0, clip_max=1.0) for images, _, labels in progress_bar: images, labels = images.cuda(), labels.cuda() images_adv = attacker.perturb(images, labels) with torch.no_grad(): pred_clean = net(images).argmax(dim=1) pred_adv = net(images_adv).argmax(dim=1) acc_clean.update( (pred_clean == labels).float().mean().item() * 100.0, images.size(0)) acc_adv_image.update( (pred_adv == labels).float().mean().item() * 100.0, images.size(0)) progress_bar.set_description( 'Clean: {acc_clean.avg:.3f} ' 'PGD-{nb_iter}-image: {acc_adv_image.avg:.3f}'.format( acc_clean=acc_clean,
correct_adv = 0 correct_def = 0 for i, (images, labels) in enumerate(progress_bar): if i < start_ind or i >= end_ind: continue images, labels = images.cuda(), labels.cuda() result_path = os.path.join(result_dir, 'batch_{:04d}.pt'.format(i)) if os.path.isfile(result_path): result_dict = torch.load(result_path) images_adv = result_dict['input'].cuda() images_def = result_dict['rec'].cuda() else: # print(images) images_adv = attacker.perturb(images) images_def, z_def = proj_fn(images_adv) # print(images_adv) print(images_def) # torch.save({'input': images_adv, # 'rec': images_def, # 'z_rec': z_def}, result_path) l2_dist = per_pixel_l2_dist(images, images_adv) if i % 1 == 0: clean_path = os.path.join(vis_dir, 'batch_{:04d}_clean.png'.format(i)) adv_path = os.path.join(vis_dir, 'batch_{:04d}_adv.png'.format(i)) def_path = os.path.join(vis_dir, 'batch_{:04d}_def.png'.format(i)) save_image(images, clean_path, nrow=10, padding=2) save_image(images_adv, adv_path, nrow=10, padding=2)
def train_Ours(args, train_loader, val_loader, knownclass, Encoder, Decoder, NorClsfier, SSDClsfier, summary_writer, saver): seed = init_random_seed(args.manual_seed) criterionCls = nn.CrossEntropyLoss() criterionRec = nn.MSELoss() if args.parallel_train: Encoder = DataParallel(Encoder) Decoder = DataParallel(Decoder) NorClsfier = DataParallel(NorClsfier) SSDClsfier = DataParallel(SSDClsfier) optimizer = optim.Adam( list(Encoder.parameters()) + list(NorClsfier.parameters()) + list(SSDClsfier.parameters()) + list(Decoder.parameters()), lr=args.lr) if args.adv is 'PGDattack': print("**********Defense PGD Attack**********") elif args.adv is 'FGSMattack': print("**********Defense FGSM Attack**********") if args.adv is 'PGDattack': from advertorch.attacks import PGDAttack nor_adversary = PGDAttack(predict1=Encoder, predict2=NorClsfier, nb_iter=args.adv_iter) rot_adversary = PGDAttack(predict1=Encoder, predict2=SSDClsfier, nb_iter=args.adv_iter) elif args.adv is 'FGSMattack': from advertorch.attacks import GradientSignAttack nor_adversary = GradientSignAttack(predict1=Encoder, predict2=NorClsfier) rot_adversary = GradientSignAttack(predict1=Encoder, predict2=SSDClsfier) global_step = 0 # ---------- # Training # ---------- for epoch in range(args.n_epoch): Encoder.train() Decoder.train() NorClsfier.train() SSDClsfier.train() for steps, (orig, label, rot_orig, rot_label) in enumerate(train_loader): label = lab_conv(knownclass, label) orig, label = orig.cuda(), label.long().cuda() rot_orig, rot_label = rot_orig.cuda(), rot_label.long().cuda() with ctx_noparamgrad_and_eval(Encoder): with ctx_noparamgrad_and_eval(NorClsfier): with ctx_noparamgrad_and_eval(SSDClsfier): adv = nor_adversary.perturb(orig, label) rot_adv = rot_adversary.perturb(rot_orig, rot_label) latent_feat = Encoder(adv) norpred = NorClsfier(latent_feat) norlossCls = criterionCls(norpred, label) recon = Decoder(latent_feat) lossRec = criterionRec(recon, orig) ssdpred = SSDClsfier(Encoder(rot_adv)) rotlossCls = criterionCls(ssdpred, rot_label) loss = args.norClsWgt * norlossCls + args.rotClsWgt * rotlossCls + args.RecWgt * lossRec optimizer.zero_grad() loss.backward() optimizer.step() #============ tensorboard the log info ============# lossinfo = { 'loss': loss.item(), 'norlossCls': norlossCls.item(), 'lossRec': lossRec.item(), 'rotlossCls': rotlossCls.item(), } global_step += 1 #============ print the log info ============# if (steps + 1) % args.log_step == 0: errors = OrderedDict([ ('loss', loss.item()), ('norlossCls', norlossCls.item()), ('lossRec', lossRec.item()), ('rotlossCls', rotlossCls.item()), ]) saver.print_current_errors((epoch + 1), (steps + 1), errors) # evaluate performance on validation set periodically if ((epoch + 1) % args.val_epoch == 0): # switch model to evaluation mode Encoder.eval() NorClsfier.eval() running_corrects = 0.0 epoch_size = 0.0 val_loss_list = [] # calculate accuracy on validation set for steps, (images, label) in enumerate(val_loader): label = lab_conv(knownclass, label) images, label = images.cuda(), label.long().cuda() adv = nor_adversary.perturb(images, label) with torch.no_grad(): logits = NorClsfier(Encoder(adv)) _, preds = torch.max(logits, 1) running_corrects += torch.sum(preds == label.data) epoch_size += images.size(0) val_loss = criterionCls(logits, label) val_loss_list.append(val_loss.item()) val_loss_mean = sum(val_loss_list) / len(val_loss_list) val_acc = running_corrects.double() / epoch_size print('Val Acc: {:.4f}, Val Loss: {:.4f}'.format( val_acc, val_loss_mean)) valinfo = { 'Val Acc': val_acc.item(), 'Val Loss': val_loss.item(), } for tag, value in valinfo.items(): summary_writer.add_scalar(tag, value, (epoch + 1)) orig_show = vutils.make_grid(orig, normalize=True, scale_each=True) recon_show = vutils.make_grid(recon, normalize=True, scale_each=True) summary_writer.add_image('Ori_Image', orig_show, (epoch + 1)) summary_writer.add_image('Rec_Image', recon_show, (epoch + 1)) if ((epoch + 1) % args.model_save_epoch == 0): model_save_path = os.path.join(args.results_path, args.training_type, 'snapshots', args.datasetname + '-' + args.split, args.denoisemean, args.adv + str(args.adv_iter)) mkdir(model_save_path) torch.save( Encoder.state_dict(), os.path.join(model_save_path, "Encoder-{}.pt".format(epoch + 1))) torch.save( NorClsfier.state_dict(), os.path.join(model_save_path, "NorClsfier-{}.pt".format(epoch + 1))) torch.save( Decoder.state_dict(), os.path.join(model_save_path, "Decoder-{}.pt".format(epoch + 1))) torch.save(Encoder.state_dict(), os.path.join(model_save_path, "Encoder-final.pt")) torch.save(NorClsfier.state_dict(), os.path.join(model_save_path, "NorClsfier-final.pt")) torch.save(Decoder.state_dict(), os.path.join(model_save_path, "Decoder-final.pt"))
out1, out2, out3 = net(inputt) cal_prob = np.zeros(args.brand_size) for i in range(args.batch_size): cal_prob[np.argmax(np.array(out1.data.cpu()), 1)[i]] += 1 Ori_attack_Top1_lable = np.argmax(cal_prob) if Ori_attack_Top1_lable in B_lable: Ori_AB1_category = B_name_set[np.argmax( np.argmax(cal_prob) == B_lable)] Total = np.sum(cal_prob) Ori_Top1_Prop = max(cal_prob) / Total Ori_Top2_Prop, Ori_AB2_category, Ori_attack_Top2_lable = get_Top_prob( cal_prob, Total) Ori_Top3_Prop, Ori_AB3_category, Ori_attack_Top3_lable = get_Top_prob( cal_prob, Total) adv_inputs = adversary.perturb(inputt, labelt) # inputs if args.concat: reconstruct_image(iter, adv_inputs, adv=True) else: save_image(iter, adv_inputs, adv=True) #adv_inputs = inputt #this section is similar to the above section outt, outt2, outt3 = net(adv_inputs) cal_prob = np.zeros(args.brand_size) for i in range(args.batch_size): cal_prob[np.argmax(np.array(outt.data.cpu()), 1)[i]] += 1 attack_Top1_lable = np.argmax(cal_prob) if attack_Top1_lable in B_lable: OB_category = B_name_set[np.argmax( np.argmax(np.array(blabelt.cpu()), 1)[0] == B_lable)]
def precalc_weibull(args, dataloader_train, knownclass, Encoder, NorClsfier): # First generate pre-softmax 'activation vectors' for all training examples print( "Weibull: computing features for all correctly-classified training data" ) activation_vectors = {} if args.adv is 'PGDattack': from advertorch.attacks import PGDAttack adversary = PGDAttack(predict1=Encoder, predict2=NorClsfier, nb_iter=args.adv_iter) elif args.adv is 'FGSMattack': from advertorch.attacks import FGSM adversary = FGSM(predict1=Encoder, predict2=NorClsfier) for _, (images, labels, _, _) in enumerate(dataloader_train): labels = lab_conv(knownclass, labels) images, labels = images.cuda(), labels.long().cuda() print("**********Conduct Attack**********") advimg = adversary.perturb(images, labels) with torch.no_grad(): logits = NorClsfier(Encoder(advimg)) correctly_labeled = (logits.data.max(1)[1] == labels) labels_np = labels.cpu().numpy() logits_np = logits.data.cpu().numpy() for i, label in enumerate(labels_np): if not correctly_labeled[i]: continue # If correctly labeled, add this to the list of activation_vectors for this class if label not in activation_vectors: activation_vectors[label] = [] activation_vectors[label].append(logits_np[i]) print("Computed activation_vectors for {} known classes".format( len(activation_vectors))) for class_idx in activation_vectors: print("Class {}: {} images".format(class_idx, len(activation_vectors[class_idx]))) # Compute a mean activation vector for each class print("Weibull computing mean activation vectors...") mean_activation_vectors = {} for class_idx in activation_vectors: mean_activation_vectors[class_idx] = np.array( activation_vectors[class_idx]).mean(axis=0) # Initialize one libMR Wiebull object for each class print("Fitting Weibull to distance distribution of each class") weibulls = {} for class_idx in activation_vectors: distances = [] mav = mean_activation_vectors[class_idx] for v in activation_vectors[class_idx]: distances.append(np.linalg.norm(v - mav)) mr = libmr.MR() tail_size = min(len(distances), WEIBULL_TAIL_SIZE) mr.fit_high(distances, tail_size) weibulls[class_idx] = mr print("Weibull params for class {}: {}".format(class_idx, mr.get_params())) return activation_vectors, mean_activation_vectors, weibulls
def openset_weibull(args, dataloader_test, knownclass, Encoder, NorClsfier, activation_vectors, mean_activation_vectors, weibulls, mode='openset'): # Apply Weibull score to every logit weibull_scores = [] logits = [] classes = activation_vectors.keys() running_corrects = 0.0 epoch_size = 0.0 if args.adv is 'PGDattack': from advertorch.attacks import PGDAttack adversary = PGDAttack(predict1=Encoder, predict2=NorClsfier, nb_iter=args.adv_iter) elif args.adv is 'FGSMattack': from advertorch.attacks import FGSM adversary = FGSM(predict1=Encoder, predict2=NorClsfier) # reclosslist = [] for steps, (images, labels) in enumerate(dataloader_test): labels = lab_conv(knownclass, labels) images, labels = images.cuda(), labels.long().cuda() print("Calculate weibull_scores in step {}/{}".format( steps, len(dataloader_test))) print("**********Conduct Attack**********") if mode is 'closeset': advimg = adversary.perturb(images, labels) else: advimg = adversary.perturb(images) with torch.no_grad(): batch_logits_torch = NorClsfier(Encoder(advimg)) batch_logits = batch_logits_torch.data.cpu().numpy() batch_weibull = np.zeros(shape=batch_logits.shape) for activation_vector in batch_logits: weibull_row = np.ones(len(knownclass)) for class_idx in classes: mav = mean_activation_vectors[class_idx] dist = np.linalg.norm(activation_vector - mav) weibull_row[class_idx] = 1 - weibulls[class_idx].w_score(dist) weibull_scores.append(weibull_row) logits.append(activation_vector) if mode is 'closeset': _, preds = torch.max(batch_logits_torch, 1) # statistics running_corrects += torch.sum(preds == labels.data) epoch_size += images.size(0) if mode is 'closeset': running_corrects = running_corrects.double() / epoch_size print('Test Acc: {:.4f}'.format(running_corrects)) weibull_scores = np.array(weibull_scores) logits = np.array(logits) openmax_scores = -np.log(np.sum(np.exp(logits * weibull_scores), axis=1)) if mode is 'closeset': return running_corrects, np.array(openmax_scores) else: return np.array(openmax_scores)