Пример #1
0
                        'state_dict': [m.state_dict() for m in model],
                        'err': best_err,
                        'epoch': t,
                        'sampler_indices': sampler_indices
                    }, args.prefix + "_best.pth")

            torch.save(
                {
                    'state_dict': [m.state_dict() for m in model],
                    'err': err,
                    'epoch': t,
                    'sampler_indices': sampler_indices
                }, args.prefix + "_checkpoint.pth")

    args.print = True

    aa = torch.load(args.prefix + "_best.pth")['state_dict'][0]
    model_eval = utils.select_model(args.data, args.model)
    model_eval.load_state_dict(aa)
    print('std testing ...')
    std_err = utils.evaluate(test_loader, model_eval, t, test_log,
                             args.verbose)
    print('pgd testing ...')
    pgd_err = utils.evaluate_pgd(test_loader, model_eval, args)
    print('verification testing ...')
    if args.method == 'BCP':
        last_err = BCP.evaluate_BCP(test_loader, model_eval, args.epsilon, t,
                                    test_log, args.verbose, args, u_list)
    print('Best model evaluation:', std_err.item(), pgd_err.item(),
          last_err.item())
Пример #2
0
def main():
    args = get_args()

    if not os.path.exists(args.out_dir):
        os.mkdir(args.out_dir)
    logfile = os.path.join(args.out_dir, 'output.log')
    if os.path.exists(logfile):
        os.remove(logfile)

    logging.basicConfig(format='[%(asctime)s] - %(message)s',
                        datefmt='%Y/%m/%d %H:%M:%S',
                        level=logging.INFO,
                        filename=os.path.join(args.out_dir, 'output.log'))
    logger.info(args)

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    torch.backends.cudnn.benchmark = True
    device = torch.device('cuda')
    train_loader, test_loader = get_loaders(args.data_dir, args.batch_size)

    epsilon = (args.epsilon / 255.) / std
    alpha = (args.alpha / 255.) / std

    model = WideResNet().cuda()
    opt = torch.optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(opt,
                                                     milestones=[75, 90],
                                                     gamma=0.1)
    criterion = RFAT(args.beta)

    # Training
    start_train_time = time.time()
    logger.info('Epoch \t Train Loss \t Train Acc \t LR \t Time')
    for epoch in range(args.epochs):
        start_epoch_time = time.time()
        train_loss = 0
        train_acc = 0
        train_n = 0
        model.train()
        for i, (X, y) in enumerate(train_loader):
            X, y = X.cuda(), y.cuda()

            delta = torch.zeros_like(X).cuda()
            for j in range(len(epsilon)):
                delta[:, j, :, :].uniform_(-epsilon[j][0][0].item(),
                                           epsilon[j][0][0].item())
            delta.data = clamp(delta, lower_limit - X, upper_limit - X)
            delta.requires_grad = True
            output = model(X + delta)
            loss = F.cross_entropy(output, y)
            loss.backward()
            grad = delta.grad.detach()
            delta.data = clamp(delta + alpha * torch.sign(grad), -epsilon,
                               epsilon)
            delta.data[:X.size(0)] = clamp(delta[:X.size(0)], lower_limit - X,
                                           upper_limit - X)
            delta = delta.detach()

            opt.zero_grad()
            output = model(X)
            adv_output = model(X + delta)
            loss = criterion(output, adv_output, y)
            loss.backward()
            opt.step()

            train_loss += loss.item() * y.size(0)
            train_acc += (output.max(1)[1] == y).sum().item()
            train_n += y.size(0)
        epoch_time = time.time()
        lr = opt.param_groups[0]['lr']
        scheduler.step()

        logger.info('%d \t %.4f \t %.4f \t %.4f \t %.4f', epoch,
                    train_loss / train_n, train_acc / train_n * 100., lr,
                    epoch_time - start_epoch_time)

    train_time = time.time()
    best_state_dict = model.state_dict()

    torch.save(best_state_dict, os.path.join(args.out_dir, 'model.pth'))
    logger.info('Total train time: %.4f minutes',
                (train_time - start_train_time) / 60)

    # Evaluation
    model_test = WideResNet().cuda()
    model_test.load_state_dict(best_state_dict)
    model_test.float()
    model_test.eval()

    logger.info('Attack Iters \t Loss \t Acc')
    pgd_loss, pgd_acc = evaluate_standard(test_loader, model_test)
    logger.info('Nautral Test : %d \t %.4f \t %.4f', 0, pgd_loss,
                pgd_acc * 100.)
    pgd_loss, pgd_acc = evaluate_fgsm(test_loader, model_test)
    logger.info('FGSM Attack : %d \t %.4f \t %.4f', 1, pgd_loss,
                pgd_acc * 100.)
    pgd_loss, pgd_acc = evaluate_pgd(test_loader, model_test, 10, 1)
    logger.info('PGD Attack : %d \t %.4f \t %.4f', 10, pgd_loss,
                pgd_acc * 100.)
    pgd_loss, pgd_acc = evaluate_pgd(test_loader, model_test, 20, 1)
    logger.info('PGD Attack : %d \t %.4f \t %.4f', 20, pgd_loss,
                pgd_acc * 100.)
Пример #3
0
def main():
    args = get_args()

    if not os.path.exists(args.out_dir):
        os.mkdir(args.out_dir)
    logfile = os.path.join(args.out_dir, 'output.log')
    if os.path.exists(logfile):
        os.remove(logfile)

    logging.basicConfig(format='[%(asctime)s] - %(message)s',
                        datefmt='%Y/%m/%d %H:%M:%S',
                        level=logging.INFO,
                        filename=os.path.join(args.out_dir, 'output.log'))
    logger.info(args)

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    train_loader, test_loader = get_loaders(args.data_dir, args.batch_size)

    epsilon = (args.epsilon / 255.) / std
    alpha = (args.alpha / 255.) / std
    pgd_alpha = (2 / 255.) / std

    model = PreActResNet18().cuda()
    model.train()

    opt = torch.optim.SGD(model.parameters(),
                          lr=args.lr_max,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    amp_args = dict(opt_level=args.opt_level,
                    loss_scale=args.loss_scale,
                    verbosity=False)
    if args.opt_level == 'O2':
        amp_args['master_weights'] = args.master_weights
    model, opt = amp.initialize(model, opt, **amp_args)
    criterion = nn.CrossEntropyLoss()

    if args.delta_init == 'previous':
        delta = torch.zeros(args.batch_size, 3, 32, 32).cuda()

    lr_steps = args.epochs * len(train_loader)
    if args.lr_schedule == 'cyclic':
        scheduler = torch.optim.lr_scheduler.CyclicLR(
            opt,
            base_lr=args.lr_min,
            max_lr=args.lr_max,
            step_size_up=lr_steps / 2,
            step_size_down=lr_steps / 2)
    elif args.lr_schedule == 'multistep':
        scheduler = torch.optim.lr_scheduler.MultiStepLR(
            opt, milestones=[lr_steps / 2, lr_steps * 3 / 4], gamma=0.1)

    # Training
    prev_robust_acc = 0.
    start_train_time = time.time()
    logger.info('Epoch \t Seconds \t LR \t \t Train Loss \t Train Acc')
    for epoch in range(args.epochs):
        start_epoch_time = time.time()
        train_loss = 0
        train_acc = 0
        train_n = 0
        for i, (X, y) in enumerate(train_loader):
            X, y = X.cuda(), y.cuda()
            if i == 0:
                first_batch = (X, y)
            if args.delta_init != 'previous':
                delta = torch.zeros_like(X).cuda()
            if args.delta_init == 'random':
                for i in range(len(epsilon)):
                    delta[:, i, :, :].uniform_(-epsilon[i][0][0].item(),
                                               epsilon[0][0][0].item())
                delta.data = clamp(delta, lower_limit - X, upper_limit - X)
            delta.requires_grad = True
            output = model(X + delta[:X.size(0)])
            loss = F.cross_entropy(output, y)
            with amp.scale_loss(loss, opt) as scaled_loss:
                scaled_loss.backward()
            grad = delta.grad.detach()
            delta.data = clamp(delta + alpha * torch.sign(grad), -epsilon,
                               epsilon)
            delta.data[:X.size(0)] = clamp(delta[:X.size(0)], lower_limit - X,
                                           upper_limit - X)
            delta = delta.detach()
            output = model(X + delta[:X.size(0)])
            loss = criterion(output, y)
            opt.zero_grad()
            with amp.scale_loss(loss, opt) as scaled_loss:
                scaled_loss.backward()
            opt.step()
            train_loss += loss.item() * y.size(0)
            train_acc += (output.max(1)[1] == y).sum().item()
            train_n += y.size(0)
            scheduler.step()
        if args.early_stop:
            # Check current PGD robustness of model using random minibatch
            X, y = first_batch
            pgd_delta = attack_pgd(model, X, y, epsilon, pgd_alpha, 5, 1, opt)
            with torch.no_grad():
                output = model(
                    clamp(X + pgd_delta[:X.size(0)], lower_limit, upper_limit))
            robust_acc = (output.max(1)[1] == y).sum().item() / y.size(0)
            if robust_acc - prev_robust_acc < -0.2:
                break
            prev_robust_acc = robust_acc
            best_state_dict = copy.deepcopy(model.state_dict())
        epoch_time = time.time()
        lr = scheduler.get_lr()[0]
        logger.info('%d \t %.1f \t \t %.4f \t %.4f \t %.4f', epoch,
                    epoch_time - start_epoch_time, lr, train_loss / train_n,
                    train_acc / train_n)
    train_time = time.time()
    if not args.early_stop:
        best_state_dict = model.state_dict()
    torch.save(best_state_dict, os.path.join(args.out_dir, 'model.pth'))
    logger.info('Total train time: %.4f minutes',
                (train_time - start_train_time) / 60)

    # Evaluation
    model_test = PreActResNet18().cuda()
    model_test.load_state_dict(best_state_dict)
    model_test.float()
    model_test.eval()

    pgd_loss, pgd_acc = evaluate_pgd(test_loader, model_test, 50, 10)
    test_loss, test_acc = evaluate_standard(test_loader, model_test)

    logger.info('Test Loss \t Test Acc \t PGD Loss \t PGD Acc')
    logger.info('%.4f \t \t %.4f \t %.4f \t %.4f', test_loss, test_acc,
                pgd_loss, pgd_acc)
def main():
    args = get_args()

    if not os.path.exists(args.out_dir):
        os.mkdir(args.out_dir)
    logfile = os.path.join(args.out_dir, 'output.log')
    if os.path.exists(logfile):
        os.remove(logfile)

    logging.basicConfig(
        format='[%(asctime)s] - %(message)s',
        datefmt='%Y/%m/%d %H:%M:%S',
        level=logging.INFO,
        filename=logfile)
    logger.info(args)

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    train_loader, test_loader = get_loaders(args.data_dir, args.batch_size)

    epsilon = (args.epsilon / 255.) / std

    model = PreActResNet18().cuda()
    model.train()

    opt = torch.optim.SGD(model.parameters(), lr=args.lr_max, momentum=args.momentum, weight_decay=args.weight_decay)
    amp_args = dict(opt_level=args.opt_level, loss_scale=args.loss_scale, verbosity=False)
    if args.opt_level == 'O2':
        amp_args['master_weights'] = args.master_weights
    model, opt = amp.initialize(model, opt, **amp_args)
    criterion = nn.CrossEntropyLoss()

    delta = torch.zeros(args.batch_size, 3, 32, 32).cuda()
    delta.requires_grad = True

    lr_steps = args.epochs * len(train_loader) * args.minibatch_replays
    if args.lr_schedule == 'cyclic':
        scheduler = torch.optim.lr_scheduler.CyclicLR(opt, base_lr=args.lr_min, max_lr=args.lr_max,
            step_size_up=lr_steps / 2, step_size_down=lr_steps / 2)
    elif args.lr_schedule == 'multistep':
        scheduler = torch.optim.lr_scheduler.MultiStepLR(opt, milestones=[lr_steps / 2, lr_steps * 3 / 4], gamma=0.1)

    # Training
    start_train_time = time.time()
    logger.info('Epoch \t Seconds \t LR \t \t Train Loss \t Train Acc')
    for epoch in range(args.epochs):
        start_epoch_time = time.time()
        train_loss = 0
        train_acc = 0
        train_n = 0
        for i, (X, y) in enumerate(train_loader):
            X, y = X.cuda(), y.cuda()
            for _ in range(args.minibatch_replays):
                output = model(X + delta[:X.size(0)])
                loss = criterion(output, y)
                opt.zero_grad()
                with amp.scale_loss(loss, opt) as scaled_loss:
                    scaled_loss.backward()
                grad = delta.grad.detach()
                delta.data = clamp(delta + epsilon * torch.sign(grad), -epsilon, epsilon)
                delta.data[:X.size(0)] = clamp(delta[:X.size(0)], lower_limit - X, upper_limit - X)
                opt.step()
                delta.grad.zero_()
                scheduler.step()
            train_loss += loss.item() * y.size(0)
            train_acc += (output.max(1)[1] == y).sum().item()
            train_n += y.size(0)
        epoch_time = time.time()
        lr = scheduler.get_lr()[0]
        logger.info('%d \t %.1f \t \t %.4f \t %.4f \t %.4f',
            epoch, epoch_time - start_epoch_time, lr, train_loss/train_n, train_acc/train_n)
    train_time = time.time()
    torch.save(model.state_dict(), os.path.join(args.out_dir, 'model.pth'))
    logger.info('Total train time: %.4f minutes', (train_time - start_train_time)/60)

    # Evaluation
    model_test = PreActResNet18().cuda()
    model_test.load_state_dict(model.state_dict())
    model_test.float()
    model_test.eval()

    pgd_loss, pgd_acc = evaluate_pgd(test_loader, model_test, 50, 10)
    test_loss, test_acc = evaluate_standard(test_loader, model_test)

    logger.info('Test Loss \t Test Acc \t PGD Loss \t PGD Acc')
    logger.info('%.4f \t \t %.4f \t %.4f \t %.4f', test_loss, test_acc, pgd_loss, pgd_acc)
Пример #5
0
def main():
    args = get_args()

    if not os.path.exists(args.out_dir):
        os.mkdir(args.out_dir)
    logfile = os.path.join(args.out_dir, 'output.log')
    if os.path.exists(logfile):
        os.remove(logfile)

    logging.basicConfig(format='[%(asctime)s] - %(message)s',
                        datefmt='%Y/%m/%d %H:%M:%S',
                        level=logging.INFO,
                        filename=os.path.join(args.out_dir, 'output.log'))
    logger.info(args)

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    torch.backends.cudnn.benchmark = True
    device = torch.device('cuda')

    mnist_train = datasets.MNIST(args.data_dir,
                                 train=True,
                                 download=True,
                                 transform=transforms.ToTensor())
    mnist_test = datasets.MNIST(args.data_dir,
                                train=False,
                                download=True,
                                transform=transforms.ToTensor())
    train_loader = torch.utils.data.DataLoader(mnist_train,
                                               batch_size=args.batch_size,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(mnist_test,
                                              batch_size=args.batch_size,
                                              shuffle=False)

    model = SmallCNN().cuda()
    opt = torch.optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(opt,
                                                     milestones=[55, 75, 90],
                                                     gamma=0.1)
    criterion = RFAT(args.beta)

    # Training
    start_train_time = time.time()
    logger.info('Epoch \t Train Loss \t Train Acc \t LR \t Time')
    for epoch in range(args.epochs):
        start_epoch_time = time.time()
        train_loss = 0
        train_acc = 0
        train_n = 0
        model.train()
        for i, (X, y) in enumerate(train_loader):
            X, y = X.cuda(), y.cuda()

            delta = torch.zeros_like(X).uniform_(-args.epsilon,
                                                 args.epsilon).cuda()
            delta.requires_grad = True
            output = model(X + delta)
            loss = F.cross_entropy(output, y)
            loss.backward()
            grad = delta.grad.detach()
            delta.data = torch.clamp(delta + args.alpha * torch.sign(grad),
                                     -args.epsilon, args.epsilon)
            delta.data = torch.max(torch.min(1 - X, delta.data), 0 - X)
            delta = delta.detach()

            opt.zero_grad()
            output = model(X)
            adv_output = model(X + delta)
            loss = criterion(output, adv_output, y)
            loss.backward()
            opt.step()

            train_loss += loss.item() * y.size(0)
            train_acc += (output.max(1)[1] == y).sum().item()
            train_n += y.size(0)
        epoch_time = time.time()
        lr = opt.param_groups[0]['lr']
        scheduler.step()

        logger.info('%d \t %.4f \t %.4f \t %.4f \t %.4f', epoch,
                    train_loss / train_n, train_acc / train_n * 100., lr,
                    epoch_time - start_epoch_time)

    train_time = time.time()
    best_state_dict = model.state_dict()

    torch.save(best_state_dict, os.path.join(args.out_dir, 'model.pth'))
    logger.info('Total train time: %.4f minutes',
                (train_time - start_train_time) / 60)

    # Evaluation
    model_test = SmallCNN().cuda()
    model_test.load_state_dict(best_state_dict)
    model_test.float()
    model_test.eval()

    logger.info('Attack Iters \t Loss \t Acc')
    pgd_loss, pgd_acc = evaluate_standard(test_loader, model_test)
    logger.info('%d, %.4f, %.4f', 0, pgd_loss, pgd_acc * 100.)
    pgd_loss, pgd_acc = evaluate_pgd(test_loader, model_test, args.epsilon,
                                     args.alpha, 20, 1)
    logger.info('%d, %.4f, %.4f', 20, pgd_loss, pgd_acc * 100.)
    pgd_loss, pgd_acc = evaluate_pgd(test_loader, model_test, args.epsilon,
                                     args.alpha, 40, 1)
    logger.info('%d, %.4f, %.4f', 40, pgd_loss, pgd_acc * 100.)
    pgd_loss, pgd_acc = evaluate_pgd(test_loader, model_test, args.epsilon,
                                     args.alpha, 50, 5)
    logger.info('%d, %.4f, %.4f', 50, pgd_loss, pgd_acc * 100.)