_args = [net, images, labels, NUM_STEPS, LR, EPS, args.random_step] new_ims = attack(*_args).detach() pred_probs = net(new_ims) loss = loss_fn(pred_probs, labels) pred_classes = pred_probs.argmax(1) # Shape: (BATCH_SIZE) val_num_correct += (pred_classes == labels).float().sum() val_num_total += labels.shape[0] print("###### EPOCH {0} COMPLETE ######".format(ep)) print("Adversarial Validation Accuracy: %f" % (val_num_correct/val_num_total).cpu().item() ) print("############################") if val_num_correct/val_num_total > best_adv_acc: ch.save(net.state_dict(), "results/%s_%s_%s_best" % (args.dataset, MODE, args.save_str)) print("Saved model...") best_adv_acc = val_num_correct/val_num_total scheduler.step() net.eval() with ch.no_grad(): num_correct = 0 num_total = 0 for (images, labels) in testloader: images, labels = images.cuda(), labels.cuda() pred_probs = net(images) # Shape: (BATCH_SIZE x 10) pred_classes = pred_probs.argmax(1) # Shape: (BATCH_SIZE) num_correct += (pred_classes == labels).float().sum() num_total += labels.shape[0] print("###### EPOCH {0} COMPLETE ######".format(ep))
new_ims = encode(images.clone(), only_decode=args.attack_latent) pred_probs = net(new_ims) loss = loss_fn(pred_probs, labels) pred_classes = pred_probs.argmax(1) # Shape: (BATCH_SIZE) val_num_correct += (pred_classes == labels).float().sum() val_num_total += labels.shape[0] print("###### EPOCH {0} COMPLETE ######".format(ep)) print("Adversarial Validation Accuracy: %f" % (val_num_correct / val_num_total).cpu().item()) print("############################") if val_num_correct / val_num_total > best_adv_acc: ch.save(ae.state_dict(), "results/retrained_enc_%s_%s" % (MODE, args.save_str)) ch.save(net.state_dict(), "results/trained_net_%s_%s" % (MODE, args.save_str)) print("Saved model...") best_adv_acc = val_num_correct / val_num_total ae.eval() net.eval() with ch.no_grad(): num_correct = 0 num_total = 0 for (images, labels) in testloader: images, labels = images.cuda(), labels.cuda() new_ims = encode(images) pred_probs = net(new_ims) # Shape: (BATCH_SIZE x 10) pred_classes = pred_probs.argmax(1) # Shape: (BATCH_SIZE) num_correct += (pred_classes == labels).float().sum()