Exemplo n.º 1
0
def eval_fgsm_bnn(model,
                  data,
                  inv_factors,
                  estimator='kfac',
                  samples=30,
                  epsilon=0.1,
                  stats=True,
                  device=torch.device('cuda'),
                  verbose=True):

    model.eval()
    mean_state = copy.deepcopy(model.state_dict())
    mean_predictions = 0

    samples = tqdm.tqdm(range(samples), disable=not verbose)
    for sample in samples:
        samples.set_postfix({'RAM': ram(), 'VRAM': vram()})
        sample_and_replace_weights(model, inv_factors, estimator)
        predictions, labels, _ = eval_fgsm(model, data, epsilon, stats=False, device=device, verbose=False)
        mean_predictions += predictions
        model.load_state_dict(mean_state)
    mean_predictions /= len(samples)

    if stats:
        acc = accuracy(mean_predictions, labels)
        ece1 = 100 * expected_calibration_error(mean_predictions, labels)[0]
        ece2 = 100 * calibration_curve(mean_predictions, labels)[0]
        nll = negative_log_likelihood(mean_predictions, labels)
        ent = predictive_entropy(mean_predictions, mean=True)
        stats_dict = {"eps": epsilon, "acc": acc, "ece1": ece1, "ece2": ece2, "nll": nll, "ent": ent}

    if verbose:
        print(f"Step: {epsilon:.2f} | Adv. Entropy: {stats_dict['ent']:.2f} | Adv. Accuracy: {stats_dict['acc']:.2f}%")

    return mean_predictions, labels, stats_dict
Exemplo n.º 2
0
def compute_factors(args: Any,
                    model: Union[torch.nn.Module, torch.nn.Sequential],
                    data: iter,
                    factors=None):

    model.train()
    criterion = torch.nn.CrossEntropyLoss().to(args.device)
    est_base = getattr(fisher, args.estimator.upper())
    if args.estimator == 'efb':
        est = est_base(model, factors)
    else:
        est = est_base(model)

    for epoch in range(args.epochs):
        data = tqdm.tqdm(data,
                         desc=f"Epoch [{epoch + 1}/{args.epochs}]",
                         disable=not args.verbose)
        for batch, (images, labels) in enumerate(data):
            data.set_postfix({'RAM': ram(), 'VRAM': vram()})

            logits = model(images.to(args.device, non_blocking=True))
            dist = torch.distributions.Categorical(logits=logits)

            for sample in range(args.samples):
                labels = dist.sample()

                loss = criterion(logits, labels)
                model.zero_grad()
                loss.backward(retain_graph=True)

                est.update(images.size(0))
    return est
Exemplo n.º 3
0
def eval_bnn(model,
             dataset,
             inv_factors,
             estimator='kfac',
             samples=30, stats=False,
             device=torch.device('cuda'),
             verbose=True):

    model.eval()
    mean_state = copy.deepcopy(model.state_dict())
    mean_predictions = 0
    stats_list = {"acc": [], "ece": [], "nll": [], "ent": []}

    with torch.no_grad():
        samples = tqdm.tqdm(range(samples), disable=not verbose)
        for sample in samples:
            samples.set_postfix({'RAM': ram(), 'VRAM': vram()})
            sample_and_replace_weights(model, inv_factors, estimator)
            predictions, labels = eval_nn(model, dataset, device)
            mean_predictions += predictions
            model.load_state_dict(mean_state)

            if stats:
                running_mean = mean_predictions / (sample + 1)
                stats_list["acc"].append(accuracy(running_mean, labels))
                stats_list["ece"].append(100 * expected_calibration_error(running_mean, labels)[0])
                stats_list["nll"].append(negative_log_likelihood(predictions, labels))
                stats_list["ent"].append(predictive_entropy(running_mean, mean=True))
        mean_predictions /= len(samples)

        if verbose:
            print(f"Accuracy: {accuracy(mean_predictions, labels):.2f}% | ECE: {100 * expected_calibration_error(mean_predictions, labels)[0]:.2f}%")

        return mean_predictions, labels, stats_list
Exemplo n.º 4
0
def test_loader():
    args = setup()

    print("Loading data")
    if args.data == 'cifar10':
        data = cifar10(args.torch_data, splits='test')
    if args.data == 'mnist':
        data = mnist(args.torch_data, splits='test')
    elif args.data == 'tiny':
        data = imagenet(args.data_dir,
                        img_size=64,
                        batch_size=args.batch_size,
                        splits='test',
                        tiny=True)
    elif args.data == 'imagenet':
        img_size = 224
        if args.model in ['googlenet', 'inception_v3']:
            img_size = 299
        data = imagenet(args.data_dir,
                        img_size,
                        args.batch_size,
                        workers=args.workers,
                        splits='train')
    elif args.data == 'gtsrb':
        data = gtsrb(args.data_dir,
                     batch_size=args.batch_size,
                     workers=args.workers,
                     splits='train')

    for epoch in range(args.epochs):
        data = tqdm.tqdm(data, desc=f"Epoch [{epoch + 1}/{args.epochs}]")
        for batch, (images, labels) in enumerate(data):
            data.set_postfix({'RAM': ram(), 'VRAM': vram()})
            fig, ax = plt.subplots(5, 12)
            i = 0
            j = 0
            for img in images:
                img = np.moveaxis(img.numpy(), 0, -1)
                img = (img - np.min(img)) / np.ptp(img)
                ax[i, j].imshow(img)
                ax[i, j].axis('off')
                j += 1
                if j > 0 and j % 12 == 0:
                    j = 0
                    i += 1
            plt.show()
            plt.close()
Exemplo n.º 5
0
def eval_nn(model, dataset, device=torch.device('cuda'), verbose=False):
    model.eval()

    with torch.no_grad():
        logits_list = torch.Tensor().to(device)
        labels_list = torch.LongTensor()

        dataset = tqdm.tqdm(dataset, disable=not verbose or len(dataset) == 1)
        for images, labels in dataset:
            dataset.set_postfix({'RAM': ram(), 'VRAM': vram()})

            logits = model(images.to(device, non_blocking=True))
            logits_list = torch.cat([logits_list, logits])
            labels_list = torch.cat([labels_list, labels])

        predictions = torch.nn.functional.softmax(logits_list, dim=1).cpu().numpy()
        labels = labels_list.numpy()

    if verbose:
        print(f"Accuracy: {accuracy(predictions, labels):.2f}% | ECE: {100 * expected_calibration_error(predictions, labels)[0]:.2f}%")

    return predictions, labels
Exemplo n.º 6
0
def eval_fgsm(model,
              data,
              epsilon=0.1,
              stats=True,
              device=torch.device('cuda'),
              verbose=True):

    model.eval()
    logits_list = torch.Tensor().to(device)
    labels_list = torch.LongTensor()
    stats_dict = None

    data = tqdm.tqdm(data, disable=not verbose or len(data) == 1)
    for images, labels in data:
        data.set_postfix({'RAM': ram(), 'VRAM': vram()})

        adv_images = datasets.fgsm(model, images.to(device, non_blocking=True), labels.to(device, non_blocking=True),
                                   epsilon=epsilon)
        with torch.no_grad():
            adv_logits = model(adv_images)

        logits_list = torch.cat([logits_list, adv_logits])
        labels_list = torch.cat([labels_list, labels])

    adv_predictions = torch.nn.functional.softmax(logits_list, dim=1).detach().cpu().numpy()
    labels = labels_list.numpy()

    if stats:
        acc = accuracy(adv_predictions, labels)
        ece1 = 100 * expected_calibration_error(adv_predictions, labels)[0]
        ece2 = 100 * calibration_curve(adv_predictions, labels)[0]
        nll = negative_log_likelihood(adv_predictions, labels)
        ent = predictive_entropy(adv_predictions, mean=True)
        stats_dict = {"eps": epsilon, "acc": acc, "ece1": ece1, "ece2": ece2, "nll": nll, "ent": ent}

    if verbose:
        print(f"Step: {epsilon:.2f} | Adv. Entropy: {stats_dict['ent']:.2f} | Adv. Accuracy: {stats_dict['acc']:.2f}%")

    return adv_predictions, labels, stats_dict
Exemplo n.º 7
0
def train(model, train_loader, val_loader, optimizer, criterion, epochs, learning_rate, device):
    train_loss = 0
    for epoch in range(epochs):
        model.train()
        train_loader = tqdm.tqdm(train_loader, desc=f"Epoch [{epoch + 1}/{epochs}]")
        for batch, (images, labels) in enumerate(train_loader):
            train_loader.set_postfix({'Train loss': train_loss / ((batch + 1) + (epoch * len(train_loader))),
                                      'Train acc.': train_acc if batch > 10 else 0,
                                      'RAM': ram(),
                                      'VRAM': vram()})

            logits = model(images.to(device, non_blocking=True))
            loss = criterion(logits, labels.to(device, non_blocking=True))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if batch % (len(train_loader) // 10) == 0:
                train_loss += loss.detach().cpu().numpy()
                train_acc = accuracy(logits.detach().cpu().numpy(), labels.numpy())

        eval_nn(model, val_loader, device, verbose=True)