示例#1
0
    args = parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
    gpu_num = max(len(args.gpu_id.split(',')), 1)

    model_name = 'resnet18'
    log_dir = "logs/%s_%s" % (time.strftime("%b%d-%H%M",
                                            time.localtime()), model_name)
    check_mkdir(log_dir)
    log = Logger(log_dir + '/train.log')
    log.print(args)

    device = torch.device('cuda')
    model = ResNet18().to(device)
    model = nn.DataParallel(model, device_ids=[i for i in range(gpu_num)])

    train_loader, test_loader = prepare_cifar(args.batch_size,
                                              args.test_batch_size)
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    best_epoch, best_robust_acc = 0, 0.
    for e in range(args.epoch):
        adjust_learning_rate(optimizer, e)
        train_acc, train_robust_acc, loss = train_adv_epoch(
            model, args, train_loader, device, optimizer, e)
        if e % 3 == 0 or (e >= 74 and e <= 80):
            test_acc, test_robust_acc, _ = eval_model_pgd(
                model, test_loader, device, args.step_size, args.epsilon,
                args.perturb_steps)
        else:
示例#2
0
import torch
import torch.nn.functional as F
from utils import prepare_cifar
import tqdm
import radam
from vgg import vgg13_bn
from models import PreActResNet18
from aegleseeker import AegleSeeker
from eval_model import eval_model_pgd

device = 'cuda:0'
model = vgg13_bn()
model = AegleSeeker(model).to(device)
train_loader, test_loader = prepare_cifar(100, 100)
optim = radam.RAdam(model.parameters())
epsilon = 8 / 255

for epoch in range(100):
    with tqdm.tqdm(train_loader) as train:
        running_loss = 0.0
        running_grad = 0.0
        running_acc = 0.0
        model.train()
        for i, (x, y) in enumerate(train):
            x, y = x.to(device), y.to(device)
            # x_bu = x.detach().clone()
            for _ in range(1):
                x_rg = x.detach().clone().requires_grad_(True) + \
                    torch.randn_like(x) * epsilon / 2
                optim.zero_grad()
                pred = model(x_rg)