示例#1
0
def get_cifar10_svhn_ood_ensemble(ensemble):
    _, _, _, cifar_test_dataset = get_CIFAR10()
    _, _, _, svhn_test_dataset = get_SVHN()
    
    dataloader, anomaly_targets = prepare_ood_datasets(cifar_test_dataset, svhn_test_dataset)
    scores = []
    accuracies = []
    with torch.no_grad():
        for data, target in dataloader:
            data = data.cuda()
            target = target.cuda()
            output_l=[]
            for i,model in enumerate(ensemble):
                model.eval()
                output = model(data)
                output_l.append(output)
            output_l = torch.stack(output_l)
            kernel_distance=output_l.exp().mean(0)
            pred=kernel_distance.argmax(1)

            kernel_distance=-(kernel_distance*torch.log(kernel_distance)).sum(1)
            
            accuracy = pred.eq(target)
            accuracies.append(accuracy.cpu().numpy())
            scores.append(kernel_distance.cpu().numpy())
    scores = np.concatenate(scores)
    accuracies = np.concatenate(accuracies)
    accuracy = np.mean(accuracies[: len(cifar_test_dataset)])
    auroc = roc_auc_score(anomaly_targets, scores)
    return accuracy, auroc
def get_cifar_svhn_ood(model):
    _, _, _, cifar_test_dataset = get_CIFAR10()
    _, _, _, svhn_test_dataset = get_SVHN()

    return get_auroc_ood(cifar_test_dataset, svhn_test_dataset, model)
log_dir = os.path.join(logs_root_dir, model_name)

start_epoch, optim_state_dict = rh.load_model_checkpoint(
    model, model_dir, device, hps)
model = Cifar10Wrapper(model).to(device)

msda_config = rh.create_msda_config(hps)

#load dataset
od_bs = int(hps.od_bs_factor * hps.bs)

id_config = {}
if hps.dataset == 'cifar10':
    train_loader = dl.get_CIFAR10(train=True,
                                  batch_size=hps.bs,
                                  augm_type=hps.augm,
                                  size=img_size,
                                  config_dict=id_config)
elif hps.dataset == 'semi-cifar10':
    train_loader = dl.get_CIFAR10_ti_500k(train=True,
                                          batch_size=hps.bs,
                                          augm_type=hps.augm,
                                          fraction=0.7,
                                          size=img_size,
                                          config_dict=id_config)
else:
    raise ValueError(f'Dataset {hps.datset} not supported')

if hps.train_type.lower() in [
        'ceda', 'acet', 'advacet', 'tradesacet', 'tradesceda'
]:
示例#4
0
                       folder,
                       checkpoint,
                       temperature,
                       device,
                       load_temp=temp,
                       dataset=dataset)
    model.to(device)

    if len(hps.gpu) > 1:
        model = nn.DataParallel(model, device_ids=device_ids)

    model.eval()
    print(f'\n\n{folder} {checkpoint}\n ')

    if dataset == 'cifar10':
        dataloader = dl.get_CIFAR10(False, batch_size=bs, augm_type='none')
    elif dataset == 'cifar100':
        dataloader = dl.get_CIFAR100(False, batch_size=bs, augm_type='none')
    else:
        raise NotImplementedError()

    acc = 0.0
    with torch.no_grad():
        for data, target in dataloader:
            data = data.to(device)
            target = target.to(device)
            out = model(data)
            _, pred = torch.max(out, dim=1)
            acc += torch.sum(pred == target).item() / len(dataloader.dataset)

    print(f'Clean accuracy {acc}')
    device = torch.device('cuda:' + str(min(device_ids)))
    bs = bs * len(device_ids)
    big_model_bs = big_model_bs * len(device_ids)

model_descriptions = [
    ('WideResNet34x10', 'cifar10_pgd', 'best_avg', None, False),
    ('WideResNet34x10', 'cifar10_apgd', 'best_avg', None, False),
    ('WideResNet34x10', 'cifar10_500k_pgd', 'best_avg', None, False),
    ('WideResNet34x10', 'cifar10_500k_apgd', 'best_avg', None, False),
    ('WideResNet34x10', 'cifar10_500k_apgd_asam', 'best_avg', None, False),
]

model_batchsize = bs * np.ones(len(model_descriptions), dtype=np.int)
num_examples = 16

dataloader = dl.get_CIFAR10(False, bs, augm_type='none')
num_datapoints = len(dataloader.dataset)

class_labels = dl.cifar.get_CIFAR10_labels()
eval_dir = 'Cifar10Eval/'

norm = 'l2'

if norm == 'l1':
    radii = np.linspace(15, 90, 6)
    visual_counterfactuals(model_descriptions, radii, dataloader, model_batchsize, num_examples, class_labels, device,
                           eval_dir, 'cifar10', norm='l1', stepsize=5, device_ids=device_ids)
else:
    radii = np.linspace(0.5, 3, 6)
    visual_counterfactuals(model_descriptions, radii, dataloader, model_batchsize, num_examples, class_labels, device, eval_dir, 'cifar10', device_ids=device_ids)