Esempio n. 1
0
def get_test_coverage():
    torch.manual_seed(1)
    torch.cuda.manual_seed(1)

    # data loader and model
    train_loader, test_loader = dataset.get100(batch_size=200, num_workers=1)
    cifar100_model = model.cifar100(128, pretrained=None)
    checkpoint = torch.load(os.path.join('./latest.pth'))
    cifar100_model.load_state_dict(checkpoint)
    cifar100_model.cuda()
    hook_all_conv_layer(cifar100_model, get_channel_coverage_group_exp)
    cifar100_model.eval()

    test_loss = 0
    correct = 0
    count = 0
    yhats = []
    labels = []
    print("collecting neuron coverage for all testing data...")

    t = tqdm(test_loader, desc="Evaluating on Test:")
    for batch_idx, (data, target) in enumerate(t):
        labels = labels + list(target)
        indx_target = target.clone()
        data, target = data.cuda(), target.cuda()
        data, target = Variable(data, volatile=True), Variable(target)
        for i in xrange(len(target)):
            globalcoverage.append({})
            globalcoverage[-1]["dataset"] = "test"
        output = cifar100_model(data)
        test_loss += F.cross_entropy(output, target).data
        pred = output.data.max(1)[1]  # get the index of the max log-probability
        yhats = yhats + list(pred.data.cpu().numpy())

        correct += pred.cpu().eq(indx_target).sum()
        count = count + len(target)
        if count % 1000 == 0:
            print("count: " + str(count))
    acc = 100. * correct / len(test_loader.dataset)

    print('acc: ' + str(acc))
    print(len(yhats))
    print(len(labels))
    with open('globalcifar100_test_yhats.pickle', 'wb') as handle:
        pickle.dump(yhats, handle, protocol=pickle.HIGHEST_PROTOCOL)
    with open('globalcifar100_test_labels.pickle', 'wb') as handle:
        pickle.dump(labels, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("========================================")

# seed
args.cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

# data loader and model
assert args.type in ['cifar10', 'cifar100'], args.type
if args.type == 'cifar10':
    train_loader, test_loader = dataset.get10(batch_size=args.batch_size,
                                              num_workers=1)
    model = model.cifar10(n_channel=args.channel)
else:
    train_loader, test_loader = dataset.get100(batch_size=args.batch_size,
                                               num_workers=1)
    model = model.cifar100(n_channel=args.channel)
model = torch.nn.DataParallel(model, device_ids=range(args.ngpu))
if args.cuda:
    model.cuda()

# optimizer
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
decreasing_lr = list(map(int, args.decreasing_lr.split(',')))
print('decreasing_lr: ' + str(decreasing_lr))
best_acc, old_file = 0, None
t_begin = time.time()
try:
    # ready to go
    for epoch in range(args.epochs):
        model.train()
Esempio n. 3
0
args.ngpu = len(args.gpu)

# logger
misc.ensure_dir(args.loaddir)
misc.ensure_dir(args.savedir)
print("=================FLAGS==================")
for k, v in args.__dict__.items():
    print('{}: {}'.format(k, v))
print("========================================")

args.cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

train_loader, test_loader = dataset.get100(batch_size=args.batch_size, data_root=args.data_root, num_workers=4)

algo = {'fgsm': fgsm_gt, 'bim': ifgsm_gt, 'pgd': pgd_gt, 'wrm':wrm_gt}
# attack_algo = algo[args.attack_algo]

attack_algo = algo[args.attack_algo] if args.attack_algo is not None else None
defend_algo = algo[args.defend_algo] if args.defend_algo is not None else None

defend_name = "None" if args.defend_algo is None else args.defend_algo

if args.prune_algo == "l0proj":
    prune_algo = l0proj
elif args.prune_algo is None:
    prune_algo = None
elif args.prune_algo == "baseline":
    prune_algo = l0proj
Esempio n. 4
0
    print('{}: {}'.format(k, v))
print("========================================")

# seed
args.cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

# data loader and model
assert args.type in ['cifar10', 'cifar100'], args.type
if args.type == 'cifar10':
    train_loader, test_loader = dataset.get10(batch_size=args.batch_size, num_workers=1)
    model = model.cifar10(n_channel=args.channel)
else:
    train_loader, test_loader = dataset.get100(batch_size=args.batch_size, num_workers=1)
    model = model.cifar100(n_channel=args.channel)
model = torch.nn.DataParallel(model, device_ids= range(args.ngpu))
if args.cuda:
    model.cuda()

# optimizer
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
decreasing_lr = list(map(int, args.decreasing_lr.split(',')))
print('decreasing_lr: ' + str(decreasing_lr))
best_acc, old_file = 0, None
t_begin = time.time()
try:
    # ready to go
    for epoch in range(args.epochs):
        model.train()