test_nat_acc = 0 fgsm_acc = 0 test_pgd20_acc = 0 cw_acc = 0 best_epoch = 0 for epoch in range(start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch + 1) train_time, train_loss, bp_count_avg = train( model, train_loader, optimizer, adjust_tau(epoch + 1, args.dynamictau)) ## Evalutions the same as DAT. loss, test_nat_acc = attack.eval_clean(model, test_loader) loss, fgsm_acc = attack.eval_robust(model, test_loader, perturb_steps=1, epsilon=0.031, step_size=0.031, loss_fn="cent", category="Madry", rand_init=True) loss, test_pgd20_acc = attack.eval_robust(model, test_loader, perturb_steps=20, epsilon=0.031, step_size=0.031 / 4, loss_fn="cent", category="Madry", rand_init=True) loss, cw_acc = attack.eval_robust(model, test_loader, perturb_steps=30, epsilon=0.031,
for epoch in range(start_epoch, args.epochs): # Get lambda Lambda = adjust_Lambda(epoch + 1) # Adversarial training train_robust_loss, lr = train(epoch, model, train_loader, optimizer, Lambda) # Evalutions similar to DAT. _, test_nat_acc = attack.eval_clean(model, test_loader) _, test_pgd20_acc = attack.eval_robust(model, test_loader, perturb_steps=20, epsilon=0.031, step_size=0.031 / 4, loss_fn="cent", category="Madry", random=True) print( 'Epoch: [%d | %d] | Learning Rate: %f | Natural Test Acc %.2f | PGD20 Test Acc %.2f |\n' % (epoch, args.epochs, lr, test_nat_acc, test_pgd20_acc)) logger_test.append([epoch + 1, test_nat_acc, test_pgd20_acc]) # Save the best checkpoint if test_pgd20_acc > best_acc: best_acc = test_pgd20_acc save_checkpoint( {