def test_clean_acc_jsons_fast(self):
        config = get_test_config()
        n_ex = 200
        x_test, y_test = load_cifar10(n_ex, config['data_dir'])

        for norm in model_dicts.keys():
            print('Test models robust wrt {}'.format(norm))
            models = list(model_dicts[norm].keys())
            models.remove(
                'Standard'
            )  # removed temporarily to avoid an error for pytorch 1.4.0

            n_tests_passed = 0
            for model_name in models:
                model = load_model(model_name, config['model_dir'],
                                   norm).cuda().eval()

                acc = clean_accuracy(model,
                                     x_test,
                                     y_test,
                                     batch_size=config['batch_size'])

                self.assertGreater(round(acc * 100., 2), 70.0)
                success = round(acc * 100., 2) > 70.0
                n_tests_passed += success
                print(
                    '{}: clean accuracy {:.2%} (on {} examples), test passed: {}'
                    .format(model_name, acc, n_ex, success))

            print('Test is passed for {}/{} models.'.format(
                n_tests_passed, len(models)))
Exemplo n.º 2
0
def evaluate(description):
    load_cfg_fom_args(description)
    assert cfg.CORRUPTION.DATASET == 'cifar10'
    base_model = load_model(cfg.MODEL.ARCH, cfg.CKPT_DIR,
                       'cifar10', ThreatModel.Linf).cuda()
    if cfg.MODEL.ADAPTATION == "dent":
        assert cfg.MODEL.EPISODIC
        dent_model = Dent(base_model, cfg.OPTIM)
        logger.info(dent_model.model)
    x_test, y_test = load_cifar10(cfg.CORRUPTION.NUM_EX, cfg.DATA_DIR)
    x_test, y_test = x_test.cuda(), y_test.cuda()
    adversary = AutoAttack(
        dent_model, norm='Linf', eps=8./255., version='standard',
        log_path=osp.join(cfg.SAVE_DIR, cfg.LOG_DEST))
    adversary.run_standard_evaluation(
        x_test, y_test, bs=cfg.TEST.BATCH_SIZE)
Exemplo n.º 3
0
    def test_clean_acc_jsons_exact(self):
        config = get_test_config()
        device = torch.device(config['device'])
        n_ex = 10000
        x_test, y_test = load_cifar10(n_ex, config['data_dir'])

        for norm in model_dicts.keys():
            print('Test models robust wrt {}'.format(norm))
            models = list(model_dicts[norm].keys())
            models.remove(
                'Standard'
            )  # removed temporarily to avoid an error for pytorch 1.4.0

            n_tests_passed = 0
            for model_name in models:
                model = load_model(model_name, config['model_dir'],
                                   norm).to(device)

                acc = clean_accuracy(model,
                                     x_test,
                                     y_test,
                                     batch_size=config['batch_size'],
                                     device=device)
                with open('./model_info/{}/{}.json'.format(norm, model_name),
                          'r') as model_info:
                    json_dict = json.load(model_info)

                success = abs(
                    round(acc * 100., 2) -
                    float(json_dict['clean_acc'])) <= 0.05
                print('{}: clean accuracy {:.2%}, test passed: {}'.format(
                    model_name, acc, success))
                self.assertLessEqual(
                    abs(round(acc * 100., 2) - float(json_dict['clean_acc'])),
                    0.05)
                n_tests_passed += success

            print('Test is passed for {}/{} models.'.format(
                n_tests_passed, len(models)))
Exemplo n.º 4
0
                        type=str,
                        default='./models',
                        help='where to store downloaded models')
    parser.add_argument('--device',
                        type=str,
                        default='cuda:0',
                        help='device to use for computations')
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = parse_args()
    device = torch.device(args.device)

    x_test, y_test = load_cifar10(args.n_ex, args.data_dir)
    x_test, y_test = x_test.to(device), y_test.to(device)
    model = load_model(args.model_name, args.model_dir,
                       args.norm).to(device).eval()

    acc = clean_accuracy(model,
                         x_test,
                         y_test,
                         batch_size=args.batch_size,
                         device=device)
    print('Clean accuracy: {:.2%}'.format(acc))

    adversary = AutoAttack(model,
                           norm=args.norm,
                           eps=args.eps,
                           version='standard',
Exemplo n.º 5
0
import numpy as np
import torch
import copt
from copt.utils_pytorch import make_func_and_grad

from robustbench.data import load_cifar10
from robustbench.utils import load_model

import matplotlib.pyplot as plt

n_examples = 20
data_batch, target_batch = load_cifar10(n_examples=n_examples,
                                        data_dir='~/datasets')

model = load_model("Standard")
criterion = torch.nn.CrossEntropyLoss()

# Define the constraint set + initial point
alpha = 10.
constraint = copt.constraint.L1Ball(alpha)

for data, target in zip(data_batch, target_batch):
    data, target = data.unsqueeze(0), target.unsqueeze(0)

    # Define the loss function to be minimized, using Pytorch
    def loss_fun(delta):
        adv_input = data + delta
        return -criterion(model(adv_input), target)

    # Change the function to f_grad: returns loss_val, grad in flattened, numpy array
    f_grad = make_func_and_grad(loss_fun,
Exemplo n.º 6
0
from robustbench.data import load_cifar10
from robustbench.utils import load_model

import matplotlib.pyplot as plt

import chop
from chop.image import group_patches, matplotlib_imshow_batch
from chop.logging import Trace

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

batch_size = 8

# Note that this example uses load_cifar10 from the robustbench library
data, target = load_cifar10(n_examples=batch_size, data_dir='~/datasets')
data = data.to(device)
target = target.to(device)

classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
           'ship', 'truck')

model = load_model(
    'Standard')  # Can be changed to any model from the robustbench model zoo
model = model.to(device)
criterion = torch.nn.CrossEntropyLoss(reduction='none')

# Define the constraint set + initial point
print("L2 norm constraint.")
alpha = .5
constraint = chop.constraints.L2Ball(alpha)
import torch

from robustbench.data import load_cifar10
from robustbench.utils import load_model

from constopt.adversary import Adversary
from constopt.optim import PGD, PGDMadry, FrankWolfe, MomentumFrankWolfe
from constopt.constraints import LinfBall

device = torch.device("cuda" if torch.cuda.is_available() else 'cpu')
data, target = load_cifar10(n_examples=100)
model = load_model(model_name='Carmon2019Unlabeled', norm='Linf')
criterion = torch.nn.CrossEntropyLoss()
eps = 8. / 255
constraint = LinfBall(eps)
n_iter = 20

step_size_test = {
    PGD.name: 5e4 * 2.5 * constraint.alpha / n_iter,
    PGDMadry.name: 2.5 / n_iter,
    FrankWolfe.name: None,
    MomentumFrankWolfe.name: None
}

for alg_class in PGD, PGDMadry, FrankWolfe, MomentumFrankWolfe:

    adv = Adversary(data.shape, constraint, alg_class, device)
    adv_loss, delta = adv.perturb(data,
                                  target,
                                  model,
                                  criterion,