Esempio n. 1
0
def main():
    args = setup(seed=None)  # Disable seed to get random loss samples

    print("Preparing directories")
    filename = f"{args.prefix}{args.model}_{args.data}{args.suffix}"
    os.makedirs(os.path.join(args.root_dir, "directions"), exist_ok=True)
    directions_path = os.path.join(args.root_dir, "directions", filename)
    os.makedirs(os.path.join(args.results_dir, "loss1d" if args.loss1d else "loss2d"), exist_ok=True)
    results_path = os.path.join(args.results_dir, "loss1d" if args.loss1d else "loss2d", filename)

    print("Loading model")
    if args.model == 'lenet5':
        model = lenet5.lenet5(pretrained=args.data, device=args.device)
    elif args.model == 'resnet18' and args.data != 'imagenet':
        model = resnet.resnet18(pretrained=os.path.join(args.root_dir, 'weights', f"{args.model}_{args.data}.pt"),
                                num_classes=43 if args.data == 'gtsrb' else 10, device=args.device)
    else:
        model_class = getattr(torchvision.models, args.model)
        if args.model in ['googlenet', 'inception_v3']:
            model = model_class(pretrained=True, aux_logits=False)
        else:
            model = model_class(pretrained=True)
    model.to(args.device).eval()
    if args.parallel:
        model = torch.nn.parallel.DataParallel(model)

    print(f"Loading data")
    data_dir = os.path.join(args.torch_dir, "datasets")
    if args.data == 'cifar10':
        train_data, val_data = datasets.cifar10(data_dir, args.batch_size, args.workers, augment=False)
    elif args.data == 'mnist':
        train_data, val_data = datasets.mnist(data_dir, args.batch_size, args.workers, augment=False)
    elif args.data == 'gtsrb':
        data_dir = os.path.join(args.root_dir, "datasets", "gtsrb")
        train_data, val_data = datasets.gtsrb(data_dir, batch_size=args.batch_size, workers=args.workers)
    elif args.data == 'tiny':
        img_size = 64
        data_dir = os.path.join(args.root_dir, "datasets", "imagenet")
        train_data, val_data = datasets.imagenet(data_dir, img_size, args.batch_size, augment=False,
                                                 workers=args.workers, tiny=True)
    elif args.data == 'imagenet':
        img_size = 224
        if args.model in ['googlenet', 'inception_v3']:
            img_size = 299
        data_dir = os.path.join(args.root_dir, "datasets", "imagenet")
        train_data, val_data = imagenet(data_dir, img_size, args.batch_size, augment=False, shuffle=False)
    else:
        raise ValueError
    cudnn.benchmark = True

    if args.loss1d:
        loss1d(args, model, train_data, val_data, directions_path, results_path)
    elif args.loss2d:
        loss2d(args, model, train_data, directions_path, results_path)
    else:
        print(f"You need to specify either --loss1d or --loss2d.")
Esempio n. 2
0
def main():
    args = setup()

    print("Loading model")
    """
    model_class = getattr(torchvision.models, args.model)
    if args.model in ['googlenet', 'inception_v3']:
        model = model_class(pretrained=True, aux_logits=False)
    else:
        model = model_class(pretrained=True)
    model.fc = torch.nn.Linear(model.fc.in_features, 43)
    """
    model = resnet18(num_classes=43)
    model.to(args.device).train()
    if args.parallel:
        model = torch.nn.parallel.DataParallel(model)
    train_loader, val_loader = gtsrb(args.data_dir,
                                     batch_size=args.batch_size,
                                     workers=args.workers)

    if args.optimizer == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.l2)
    elif args.optimizer == 'adam':
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=args.lr,
                                     weight_decay=args.l2)
    criterion = torch.nn.CrossEntropyLoss()

    train(model, train_loader, val_loader, optimizer, criterion, args.epochs,
          args.lr, args.device)

    path = os.path.join(args.root_dir, 'weights',
                        f"{args.model}_{args.data}.pth")
    torch.save(model.state_dict(), path)
Esempio n. 3
0
def test(args, model, fig_path=""):
    print("Loading data")
    if args.data == 'cifar10':
        test_loader = datasets.cifar10(args.torch_data, splits='test')
    elif args.data == 'gtsrb':
        test_loader = datasets.gtsrb(args.data_dir,
                                     batch_size=args.batch_size,
                                     splits='test')
    if args.data == 'mnist':
        test_loader = datasets.mnist(args.torch_data, splits='test')
    elif args.data == 'tiny':
        test_loader = datasets.imagenet(args.data_dir,
                                        img_size=64,
                                        batch_size=args.batch_size,
                                        splits='test',
                                        tiny=True)
    elif args.data == 'imagenet':
        img_size = 224
        if args.model in ['googlenet', 'inception_v3']:
            img_size = 299
        data_dir = os.path.join(args.root_dir, "datasets", "imagenet")
        test_loader = datasets.imagenet(data_dir,
                                        img_size,
                                        args.batch_size,
                                        workers=args.workers,
                                        splits="test")
    else:
        raise ValueError

    predictions, labels = eval_nn(model, test_loader, args.device,
                                  args.verbose)

    print("Plotting results")
    plot.reliability_diagram(predictions,
                             labels,
                             path=fig_path + "_reliability.pdf")
Esempio n. 4
0
def adversarial_attack(args, model, estimator, results_path, fig_path):
    print("Loading data")
    if args.data == 'cifar10':
        test_loader = datasets.cifar10(args.torch_data, splits='test')
    elif args.data == 'gtsrb':
        test_loader = datasets.gtsrb(args.data_dir, batch_size=args.batch_size, splits='test')
    if args.data == 'mnist':
        test_loader = datasets.mnist(args.torch_data, splits='test')
    elif args.data == 'tiny':
        test_loader = datasets.imagenet(args.data_dir, img_size=64, batch_size=args.batch_size, splits='test',
                                        tiny=True)
    elif args.data == 'imagenet':
        img_size = 224
        if args.model in ['googlenet', 'inception_v3']:
            img_size = 299
        test_loader = datasets.imagenet(args.data_dir, img_size, args.batch_size, workers=args.workers, splits='test')
    else:
        raise ValueError

    if args.epsilon > 0:
        print(eval_fgsm(model, test_loader, args.epsilon, args.device)[-1])
    else:
        stats_dict = {"eps": [], "acc": [], "ece1": [], "ece2": [], "nll": [], "ent": []}
        bnn_stats_dict = {"eps": [], "acc": [], "ece1": [], "ece2": [], "nll": [], "ent": []}
        steps = np.concatenate([np.linspace(0, 0.2, 11), np.linspace(0.3, 1, 8)])
        for step in steps:
            stats = eval_fgsm(model, test_loader, step, args.device, verbose=False)[-1]
            bnn_stats = eval_fgsm_bnn(model, test_loader, estimator, args.samples, step, device=args.device)[-1]
            for (k1, v1), (k2, v2) in zip(stats.items(), bnn_stats.items()):
                stats_dict[k1].append(v1)
                bnn_stats_dict[k2].append(v2)
            np.savez(results_path + "_fgsm.npz", stats=stats_dict, bnn_stats=bnn_stats_dict)
        print(tabulate.tabulate(stats_dict, headers="keys"))
        print(tabulate.tabulate(bnn_stats_dict, headers="keys"))

        plot.adversarial_results(steps, stats_dict, bnn_stats_dict, fig_path)
Esempio n. 5
0
def main():
    args = setup()

    print("Preparing directories")
    os.makedirs(os.path.join(args.root_dir, "factors"), exist_ok=True)
    filename = f"{args.prefix}{args.model}_{args.data}_{args.estimator}{args.suffix}"
    factors_path = os.path.join(args.root_dir, "factors", filename)

    print("Loading model")
    if args.model == 'lenet5':
        model = lenet5.lenet5(pretrained=args.data, device=args.device)
    elif args.model == 'resnet18' and args.data != 'imagenet':
        model = resnet.resnet18(pretrained=os.path.join(
            args.root_dir, 'weights', f"{args.model}_{args.data}.pth"),
                                num_classes=43 if args.data == 'gtsrb' else 10,
                                device=args.device)
    else:
        model_class = getattr(torchvision.models, args.model)
        if args.model in ['googlenet', 'inception_v3']:
            model = model_class(pretrained=True, aux_logits=False)
        else:
            model = model_class(pretrained=True)
    model.to(args.device).train()
    if args.parallel:
        model = torch.nn.parallel.DataParallel(model)

    if args.estimator != 'inf':
        print(f"Loading data")
        if args.data == 'cifar10':
            data = datasets.cifar10(args.torch_data,
                                    args.batch_size,
                                    args.workers,
                                    args.augment,
                                    splits='train')
        elif args.data == 'mnist':
            data = datasets.mnist(args.torch_data,
                                  args.batch_size,
                                  args.workers,
                                  args.augment,
                                  splits='train')
        elif args.data == 'gtsrb':
            data_dir = os.path.join(args.root_dir, "datasets", "gtsrb")
            data = datasets.gtsrb(data_dir,
                                  batch_size=args.batch_size,
                                  workers=args.workers,
                                  splits='train')
        elif args.data == 'tiny':
            img_size = 64
            data_dir = os.path.join(args.root_dir, "datasets", "imagenet")
            data = datasets.imagenet(data_dir,
                                     img_size,
                                     args.batch_size,
                                     splits='train',
                                     tiny=True)
        elif args.data == 'imagenet':
            img_size = 224
            data_dir = os.path.join(args.root_dir, "datasets", "imagenet")
            if args.model in ['googlenet', 'inception_v3']:
                img_size = 299
            data = datasets.imagenet(data_dir,
                                     img_size,
                                     args.batch_size,
                                     workers=args.workers,
                                     splits='train')
    torch.backends.cudnn.benchmark = True

    print("Computing factors")
    if args.estimator == 'inf':
        est = compute_inf(args)
    elif args.estimator == 'efb':
        factors = torch.load(factors_path.replace("efb", "kfac") + '.pth')
        est = compute_factors(args, model, data, factors)
    else:
        est = compute_factors(args, model, data)

    print("Saving factors")
    if args.estimator == "inf":
        torch.save(est.state, f"{factors_path}{args.rank}.pth")
    elif args.estimator == "efb":
        torch.save(list(est.state.values()), factors_path + '.pth')
        torch.save(list(est.diags.values()),
                   factors_path.replace("efb", "diag") + '.pth')
    else:
        torch.save(list(est.state.values()), factors_path + '.pth')
Esempio n. 6
0
def out_of_domain(args, model, inv_factors, results_path="", fig_path=""):
    """Evaluates the model on in- and out-of-domain data.

    Each dataset has its own out-of-domain dataset which is loaded automatically alongside the in-domain dataset
    specified in `args.data`. For each image (batch) in the in- and out-of-domain data a forward pass through the
    provided `model` is performed and the predictions are stored under `results_path`. This is repeated for the Bayesian
    variant of the model (Laplace approximation).

    Parameters
    ----------
    args : Todo: Check type
        The arguments provided to the script on execution.
    model : torch.nn.Module Todo: Verify
        A `torchvision` or custom neural network (a `torch.nn.Module` or `torch.nn.Sequential` instance)
    inv_factors : list
        A list KFAC factors, Eigenvectors of KFAC factors or diagonal terms. Todo: INF
    results_path : string, optional
        The path where results (in- and out-of-domain predictions) should be stored. Results are not stored if
        argument `args.no_results` is provided.
    fig_path : string, optional
        The path where figures should be stored. Figures are only generated if argument `args.plot` is provided.
    """
    print("Loading data")
    if args.data == 'cifar10':
        in_data = datasets.cifar10(args.torch_data, splits='test')
        out_data = datasets.svhn(args.torch_data, splits='test')
    elif args.data == 'mnist':
        in_data = datasets.mnist(args.torch_data, splits='test')
        out_data = datasets.kmnist(args.torch_data, splits='test')
    elif args.data == 'gtsrb':
        in_data = datasets.gtsrb(args.data_dir,
                                 batch_size=args.batch_size,
                                 splits='test')
        out_data = datasets.cifar10(args.torch_data, splits='test')
    elif args.data == 'tiny':
        in_data = datasets.imagenet(args.data_dir,
                                    img_size=64,
                                    batch_size=args.batch_size,
                                    splits='test',
                                    tiny=True,
                                    use_cache=True)
        out_data = datasets.art(args.data_dir,
                                img_size=64,
                                batch_size=args.batch_size,
                                use_cache=True)
    elif args.data == 'imagenet':
        img_size = 224
        if args.model in ['googlenet', 'inception_v3']:
            img_size = 299
        data_dir = os.path.join(args.root_dir, "datasets", "imagenet")
        in_data = datasets.imagenet(data_dir,
                                    img_size,
                                    args.batch_size,
                                    workers=args.workers,
                                    splits='test')
        out_data = datasets.art(data_dir,
                                img_size,
                                args.batch_size,
                                workers=args.workers)

    # Compute NN and BNN predictions on validation set of training data
    predictions, bnn_predictions, labels, stats = eval_nn_and_bnn(
        model,
        in_data,
        inv_factors,
        args.estimator,
        args.samples,
        args.stats,
        args.device,
        verbose=True)

    # Compute NN and BNN predictions on out-of-distribution data
    ood_predictions, bnn_ood_predictions, _, _ = eval_nn_and_bnn(
        model,
        out_data,
        inv_factors,
        args.estimator,
        args.samples,
        False,
        args.device,
        verbose=True)

    if not args.no_results:
        print("Saving results")
        np.savez_compressed(results_path,
                            stats=stats,
                            labels=labels,
                            predictions=predictions,
                            bnn_predictions=bnn_predictions,
                            ood_predictions=ood_predictions,
                            bnn_ood_predictions=bnn_ood_predictions)

    if args.plot:
        print("Plotting results")
        fig, ax = plt.subplots(figsize=(12, 7), tight_layout=True)
        plot.inv_ecdf_vs_pred_entropy(predictions,
                                      color='dodgerblue',
                                      linestyle='--',
                                      axis=ax)
        plot.inv_ecdf_vs_pred_entropy(ood_predictions,
                                      color='crimson',
                                      linestyle='--',
                                      axis=ax)
        plot.inv_ecdf_vs_pred_entropy(bnn_predictions,
                                      color='dodgerblue',
                                      axis=ax)
        plot.inv_ecdf_vs_pred_entropy(bnn_ood_predictions,
                                      color='crimson',
                                      axis=ax)
        ax.legend([
            f"NN {args.data.upper()} | Acc.: {accuracy(predictions, labels):.2f}%",
            f"NN OOD",
            f"BNN {args.data.upper()} | Acc.: {accuracy(bnn_predictions, labels):.2f}%",
            f"BNN OOD"
        ],
                  fontsize=16,
                  frameon=False)
        plt.savefig(fig_path + "_ecdf.pdf", forma='pdf', dpi=1200)

        plot.reliability_diagram(predictions,
                                 labels,
                                 path=fig_path + "_reliability.pdf")
        plot.reliability_diagram(bnn_predictions,
                                 labels,
                                 path=fig_path + "_bnn_reliability.pdf")

        plot.entropy_hist(predictions,
                          ood_predictions,
                          path=fig_path + "_entropy.pdf")
        plot.entropy_hist(bnn_predictions,
                          bnn_ood_predictions,
                          path=fig_path + "_bnn_entropy.pdf")
Esempio n. 7
0
def main():
    args = setup()

    print("Preparing directories")
    filename = f"{args.prefix}{args.model}_{args.data}{args.suffix}"
    factors_path = os.path.join(
        args.root_dir, "factors",
        f"{args.prefix}{args.model}_{args.data}_{args.estimator}{args.suffix}")
    weights_path = os.path.join(args.root_dir, "weights",
                                f"{args.model}_{args.data}.pth")
    if args.exp_id == -1:
        if not args.no_results:
            os.makedirs(os.path.join(args.results_dir, args.model, "data",
                                     args.estimator, args.optimizer),
                        exist_ok=True)
        if args.plot:
            os.makedirs(os.path.join(args.results_dir, args.model, "figures",
                                     args.estimator, args.optimizer),
                        exist_ok=True)
        results_path = os.path.join(args.results_dir, args.model, "data",
                                    args.estimator, args.optimizer, filename)
    else:
        if not args.no_results:
            os.makedirs(os.path.join(args.results_dir, args.model, "data",
                                     args.estimator, args.optimizer,
                                     args.exp_id),
                        exist_ok=True)
        if args.plot:
            os.makedirs(os.path.join(args.results_dir, args.model, "figures",
                                     args.estimator, args.optimizer,
                                     args.exp_id),
                        exist_ok=True)
        results_path = os.path.join(args.results_dir, args.model, "data",
                                    args.estimator, args.optimizer,
                                    args.exp_id, filename)

    print("Loading model")
    if args.model == 'lenet5':
        model = lenet5(pretrained=args.data, device=args.device)
    elif args.model == 'resnet18' and args.data != 'imagenet':
        model = resnet18(pretrained=weights_path,
                         num_classes=43 if args.data == 'gtsrb' else 10,
                         device=args.device)
    else:
        model_class = getattr(torchvision.models, args.model)
        if args.model in ['googlenet', 'inception_v3']:
            model = model_class(pretrained=True, aux_logits=False)
        else:
            model = model_class(pretrained=True)
    model.to(args.device).eval()
    if args.parallel:
        model = torch.nn.parallel.DataParallel(model)

    print("Loading data")
    if args.data == 'mnist':
        val_loader = datasets.mnist(args.torch_data, splits='val')
    elif args.data == 'cifar10':
        val_loader = datasets.cifar10(args.torch_data, splits='val')
    elif args.data == 'gtsrb':
        val_loader = datasets.gtsrb(args.data_dir,
                                    batch_size=args.batch_size,
                                    splits='val')
    elif args.data == 'imagenet':
        img_size = 224
        if args.model in ['googlenet', 'inception_v3']:
            img_size = 299
        data_dir = os.path.join(args.root_dir, "datasets", "imagenet")
        val_loader = datasets.imagenet(data_dir,
                                       img_size,
                                       args.batch_size,
                                       splits="val")
    else:
        raise ValueError

    print("Loading factors")
    if args.estimator in ["diag", "kfac"]:
        factors = torch.load(factors_path + '.pth')
    elif args.estimator == 'efb':
        kfac_factors = torch.load(factors_path.replace("efb", "kfac") + '.pth')
        lambdas = torch.load(factors_path + '.pth')

        factors = list()
        eigvecs = get_eigenvectors(kfac_factors)

        for eigvec, lambda_ in zip(eigvecs, lambdas):
            factors.append((eigvec[0], eigvec[1], lambda_))
    elif args.estimator == 'inf':
        factors = torch.load(f"{factors_path}{args.rank}.pth")
    torch.backends.cudnn.benchmark = True

    norm_min = -10
    norm_max = 10
    scale_min = -10
    scale_max = 10
    if args.boundaries:
        x0 = [[norm_min, scale_min], [norm_max, scale_max],
              [norm_min, scale_max], [norm_max, scale_min],
              [norm_min / 2., scale_min], [norm_max / 2., scale_max],
              [norm_min, scale_max / 2.], [norm_max, scale_min / 2.],
              [norm_min / 2., scale_min / 2.], [norm_max / 2., scale_max / 2.],
              [norm_min / 2., scale_max / 2.], [norm_max / 2., scale_min / 2.]]
    else:
        x0 = None

    space = list()
    space.append(
        skopt.space.Real(norm_min, norm_max, name=f"norm", prior='uniform'))
    space.append(
        skopt.space.Real(scale_min, scale_max, name=f"scale", prior='uniform'))

    try:
        stats = np.load(
            results_path +
            f"_hyperopt_stats{'_layer.npy' if args.layer else '.npy'}",
            allow_pickle=True).item()
        print(f"Found {len(stats['cost'])} Previous evaluations.")
    except FileNotFoundError:
        stats = {
            "norms": [],
            "scales": [],
            "acc": [],
            "ece": [],
            "nll": [],
            "ent": [],
            "cost": []
        }

    @skopt.utils.use_named_args(dimensions=space)
    def objective(**params):
        norms = [10**params["norm"]] * len(factors)
        scales = [10**params["scale"]] * len(factors)
        print("Norm:", norms[0], "Scale:", scales[0])
        try:
            inv_factors = invert_factors(factors, norms,
                                         args.pre_scale * scales,
                                         args.estimator)
        except (RuntimeError, np.linalg.LinAlgError):
            print(f"Error: Singular matrix")
            return 200

        predictions, labels, _ = eval_bnn(model,
                                          val_loader,
                                          inv_factors,
                                          args.estimator,
                                          args.samples,
                                          stats=False,
                                          device=args.device,
                                          verbose=args.verbose)

        err = 100 - accuracy(predictions, labels)
        ece = 100 * expected_calibration_error(predictions, labels)[0]
        nll = negative_log_likelihood(predictions, labels)
        ent = predictive_entropy(predictions, mean=True)
        stats["norms"].append(norms)
        stats["scales"].append(scales)
        stats["acc"].append(100 - err)
        stats["ece"].append(ece)
        stats["nll"].append(nll)
        stats["ent"].append(ent)
        stats["cost"].append(err + ece)
        print(
            f"Err.: {err:.2f}% | ECE: {ece:.2f}% | NLL: {nll:.3f} | Ent.: {ent:.3f}"
        )
        np.save(
            results_path +
            f"_hyperopt_stats{'_layer.npy' if args.layer else '.npy'}", stats)

        return err + ece

    with warnings.catch_warnings():
        warnings.filterwarnings("ignore", category=FutureWarning)

        if args.optimizer == "gbrt":
            res = skopt.gbrt_minimize(func=objective,
                                      dimensions=space,
                                      n_calls=args.calls,
                                      x0=x0,
                                      verbose=True,
                                      n_jobs=args.workers,
                                      n_random_starts=0 if x0 else 10,
                                      acq_func='EI')

        # EI (neg. expected improvement)
        # LCB (lower confidence bound)
        # PI (neg. prob. of improvement): Usually favours exploitation over exploration
        # gp_hedge (choose probabilistically between all)
        if args.optimizer == "gp":
            res = skopt.gp_minimize(func=objective,
                                    dimensions=space,
                                    n_calls=args.calls,
                                    x0=x0,
                                    verbose=True,
                                    n_jobs=args.workers,
                                    n_random_starts=0 if x0 else 1,
                                    acq_func='gp_hedge')

        # acq_func: EI (neg. expected improvement), LCB (lower confidence bound), PI (neg. prob. of improvement)
        # xi: how much improvement one wants over the previous best values.
        # kappa: Importance of variance of predicted values. High: exploration > exploitation
        # base_estimator: RF (random forest), ET (extra trees)
        elif args.optimizer == "forest":
            res = skopt.forest_minimize(func=objective,
                                        dimensions=space,
                                        n_calls=args.calls,
                                        x0=x0,
                                        verbose=True,
                                        n_jobs=args.workers,
                                        n_random_starts=0 if x0 else 1,
                                        acq_func='EI')

        elif args.optimizer == "random":
            res = skopt.dummy_minimize(func=objective,
                                       dimensions=space,
                                       n_calls=args.calls,
                                       x0=x0,
                                       verbose=True)

        elif args.optimizer == "grid":
            space = [
                np.arange(norm_min, norm_max + 1, 10),
                np.arange(scale_min, scale_max + 1, 10)
            ]
            res = grid(func=objective, dimensions=space)
        else:
            raise ValueError

        print(f"Minimal cost of {min(stats['cost'])} found at:")
        print("Norm:", stats['norms'][np.argmin(stats['cost'])][0], "Scale:",
              stats['scales'][np.argmin(stats['cost'])][0])

    if not args.no_results:
        print("Saving results")
        del res.specs['args']['func']
        np.save(f"{results_path}_hyperopt_stats.npy", stats)
        skopt.dump(res, f"{results_path}_hyperopt_dump.pkl")

        all_stats = {
            "norms": [],
            "scales": [],
            "acc": [],
            "ece": [],
            "nll": [],
            "ent": [],
            "cost": []
        }
        path = os.path.join(args.results_dir, args.model, "data",
                            args.estimator)
        paths = [subdir[0] for subdir in os.walk(path)]
        for p in paths:
            try:
                tmp_stats = np.load(p, allow_pickle=True).item()
                for key, value in tmp_stats.items():
                    all_stats[key].extend(value)
            except FileNotFoundError:
                pass
        np.save(os.path.join(path, f"{filename}_best_params.npy"), [
            all_stats['norms'][np.argmin(all_stats['cost'])],
            all_stats['scales'][np.argmin(all_stats['cost'])]
        ])

    if args.plot:
        print("Plotting results")
        hyperparameters(args)