Пример #1
0
def main(args):
    trainloader, testloader = build_dataset(
        "cifar10",
        dataroot=args.dataroot,
        batch_size=args.batch_size,
        eval_batch_size=args.eval_batch_size,
        num_workers=2,
    )
    if args.fname:
        print("Loading model from %s" % args.fname)
        model = torch.load(args.fname, map_location="cpu").cuda()
    else:
        model = build_model("ResNet18", num_classes=10)
    criterion = torch.nn.CrossEntropyLoss()
    eigenvals, eigenvecs = compute_hessian_eigenthings(
        model,
        testloader,
        criterion,
        args.num_eigenthings,
        mode=args.mode,
        # power_iter_steps=args.num_steps,
        max_samples=args.max_samples,
        # momentum=args.momentum,
        full_dataset=args.full_dataset,
        use_gpu=args.cuda,
    )
    print("Eigenvecs:")
    print(eigenvecs)
    print("Eigenvals:")
    print(eigenvals)
Пример #2
0
def build_single_class_dataset(name, class_ind=0, **dataset_params):
    """
    wrapper for the base skeletor dataset loader `build_dataset`
    this will take in the same arguments, but the loader will only iterate
    over examples of the given class

    I'm just going to overwrite standard cifar loading data for now
    """
    trainloader, testloader = build_dataset(name, **dataset_params)

    def _filter(loader, mode='train'):
        dataset = loader.dataset
        assert name in ['cifar10', 'svhn'],\
            'we only support cifar and svhn right now'
        if name == 'cifar10':
            data_attr = mode + '_data'  # e.g. train imgs in dataset.train_data
            label_attr = mode + '_labels'
        else:
            data_attr = 'data'
            label_attr = 'labels'
        data = getattr(dataset, data_attr)
        targets = np.array(getattr(dataset, label_attr))
        class_inds = np.where(targets == int(class_ind))
        data, targets = data[class_inds], targets[class_inds]
        setattr(dataset, data_attr, data)
        setattr(dataset, label_attr, targets)
        return loader

    return _filter(trainloader, mode='train'), _filter(testloader, mode='test')
Пример #3
0
def test_datasets():
    print("Testing datasets")
    for name in datasets:
        trainloader, testloader = build_dataset(name,
                                                dataroot='./tests/data/',
                                                batch_size=128,
                                                eval_batch_size=100,
                                                num_workers=1)
Пример #4
0
def run(ensemble, proj_df, results_dir='./logs', dataroot='./data',
        batch_size=128, eval_batch_size=100, cuda=False, num_workers=2,
        **unused):
    """
    this evaluates both the ensemble and the baseline model on the full
    test set

    we also evaluate each model and compute their individual losses, so that
    we can plot the variance around the ensemble's dashed horizontal line
        (see top of file)
    """
    trainloader, testloader = build_dataset('cifar10',
                                            dataroot=dataroot,
                                            batch_size=batch_size,
                                            eval_batch_size=eval_batch_size,
                                            num_workers=2)
    ensemble_criterion = SoftmaxNLL()
    track.debug("[baseline] testing the ensemble on full dataset")
    ensemble_loss, ensemble_acc = test(testloader, ensemble,
                                       ensemble_criterion, epoch=-1,
                                       cuda=cuda, metric=False)

    # get the no-noise baseline evaluation
    proj = track.Project(results_dir)
    best_model, best_df = load_trial(proj, noise_scale=0.0)

    track.debug("[baseline] testing no-noise baseline model on full dataset")
    baseline_criterion = torch.nn.CrossEntropyLoss()
    baseline_loss, baseline_acc = test(testloader, best_model,
                                       baseline_criterion,
                                       epoch=-1, cuda=cuda, metric=False)

    # now, test each of the ensemble's models
    model_losses = []
    model_accs = []
    track.debug("[baseline] testing individual models on full dataset")
    for i, model in enumerate(ensemble.models):
        track.debug("[baseline] testing model %d of %d" %
                    (i, len(ensemble.models)))
        model_loss, model_acc = test(testloader, model,
                                     baseline_criterion,
                                     epoch=-1, cuda=cuda, metric=False)
        model_losses.append(model_loss)
        model_accs.append(model_acc)

    # we just need to track the scalar results of this evaluation
    # we can access the baseline test *curve* from the jupyter notebook (later)
    track.metric(iteration=0, ensemble_loss=ensemble_loss,
                 ensemble_acc=ensemble_acc,
                 best_baseline_loss=baseline_loss,
                 best_baseline_acc=baseline_acc,
                 model_losses=model_losses,
                 model_accs=model_accs)
Пример #5
0
def do_training(args):
    trainloader, testloader = build_dataset(
        args.dataset,
        dataroot=args.dataroot,
        batch_size=args.batch_size,
        eval_batch_size=args.eval_batch_size,
        num_workers=2)
    model = build_model(args.arch, num_classes=num_classes(args.dataset))
    if args.cuda:
        model = torch.nn.DataParallel(model).cuda()

    # Calculate total number of model parameters
    num_params = sum(p.numel() for p in model.parameters())
    track.metric(iteration=0, num_params=num_params)

    if args.optimizer == 'sgd':
        optimizer = SGD(model.parameters(),
                        lr=args.lr,
                        momentum=args.momentum,
                        weight_decay=args.weight_decay)
    else:
        optimizer = EKFAC(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay,
                          eps=args.eps,
                          update_freq=args.update_freq)

    criterion = torch.nn.CrossEntropyLoss()

    best_acc = 0.0
    for epoch in range(args.epochs):
        track.debug("Starting epoch %d" % epoch)
        args.lr = adjust_learning_rate(epoch, optimizer, args.lr,
                                       args.schedule, args.gamma)
        train_loss, train_acc = train(trainloader, model, criterion, optimizer,
                                      epoch, args.cuda)
        test_loss, test_acc = test(testloader, model, criterion, epoch,
                                   args.cuda)
        track.debug('Finished epoch %d... | train loss %.3f | train acc %.3f '
                    '| test loss %.3f | test acc %.3f' %
                    (epoch, train_loss, train_acc, test_loss, test_acc))
        # Save model
        model_fname = os.path.join(track.trial_dir(),
                                   "model{}.ckpt".format(epoch))
        torch.save(model, model_fname)
        if test_acc > best_acc:
            best_acc = test_acc
            best_fname = os.path.join(track.trial_dir(), "best.ckpt")
            track.debug("New best score! Saving model")
            torch.save(model, best_fname)
Пример #6
0
def do_training(args):
    trainloader, testloader = build_dataset(
        args.dataset,
        dataroot=args.dataroot,
        batch_size=args.batch_size,
        eval_batch_size=args.eval_batch_size,
        num_workers=2)
    model = build_model(args.arch, num_classes=num_classes(args.dataset))
    if args.cuda:
        model = torch.nn.DataParallel(model).cuda()

    # Calculate total number of model parameters
    num_params = sum(p.numel() for p in model.parameters())
    track.metric(iteration=0, num_params=num_params)

    num_chunks = max(1, args.batch_size // args.max_samples_per_gpu)

    optimizer = LARS(params=model.parameters(),
                     lr=args.lr,
                     momentum=args.momentum,
                     weight_decay=args.weight_decay,
                     eta=args.eta,
                     max_epoch=args.epochs)

    criterion = torch.nn.CrossEntropyLoss()

    best_acc = 0.0
    for epoch in range(args.epochs):
        track.debug("Starting epoch %d" % epoch)
        train_loss, train_acc = train(trainloader,
                                      model,
                                      criterion,
                                      optimizer,
                                      epoch,
                                      args.cuda,
                                      num_chunks=num_chunks)
        test_loss, test_acc = test(testloader, model, criterion, epoch,
                                   args.cuda)
        track.debug('Finished epoch %d... | train loss %.3f | train acc %.3f '
                    '| test loss %.3f | test acc %.3f' %
                    (epoch, train_loss, train_acc, test_loss, test_acc))
        # Save model
        model_fname = os.path.join(track.trial_dir(),
                                   "model{}.ckpt".format(epoch))
        torch.save(model, model_fname)
        if test_acc > best_acc:
            best_acc = test_acc
            best_fname = os.path.join(track.trial_dir(), "best.ckpt")
            track.debug("New best score! Saving model")
            torch.save(model, best_fname)
Пример #7
0
def main(args):
    trainloader, testloader = build_dataset(
        'cifar10',
        dataroot=args.dataroot,
        batch_size=args.batch_size,
        eval_batch_size=args.eval_batch_size,
        num_workers=2)
    model = build_model('ResNet18', num_classes=10)
    criterion = torch.nn.CrossEntropyLoss()
    eigenvals, eigenvecs = compute_hessian_eigenthings(model,
                                                       testloader,
                                                       criterion,
                                                       args.num_eigenthings,
                                                       args.num_steps,
                                                       momentum=args.momentum,
                                                       use_gpu=args.cuda)
    print("Eigenvecs:")
    print(eigenvecs)
    print("Eigenvals:")
    print(eigenvals)
    track.metric(iteration=0, eigenvals=eigenvals)
Пример #8
0
def run(ensemble,
        trial_df,
        results_dir='./logs',
        dataroot='./data',
        class_ind=0,
        batch_size=128,
        eval_batch_size=100,
        cuda=False,
        num_workers=2,
        start_epoch=160,
        end_epoch=200,
        **unused):

    trainloader, testloader = build_dataset('cifar10',
                                            dataroot=dataroot,
                                            batch_size=batch_size,
                                            eval_batch_size=eval_batch_size,
                                            num_workers=2)

    # this will only iterate over examples of one class
    class_trainlaoder, class_testloader = build_single_class_dataset(
        'cifar10',
        class_ind=class_ind,
        dataroot=dataroot,
        batch_size=batch_size,
        eval_batch_size=eval_batch_size,
        num_workers=2)

    full_ensemble = ensemble
    track.debug("[ensemble_size] starting to test all ensembles (class = %d)" %
                class_ind)
    for i in range(len(ensemble.models)):
        ensemble_size = i + 1
        model_ind = len(ensemble.models) - 1 - i
        track.debug("[ensemble_size] starting size %d / %d ensemble" %
                    (i + 1, len(ensemble.models)))
        ensemble_loss = SoftmaxNLL()
        one_loss = CrossEntropyLoss()

        entropy_criterion = Entropy()

        ensemble = Ensemble(full_ensemble.models[model_ind:])
        single_model = full_ensemble.models[model_ind]

        # we want to do metrics for (a) the ensemble with varying sizes and
        #   (b) the individual models corresponding to that epoch
        def _test_dataset(model, testloader, criterion):
            loss, acc = test(testloader,
                             model,
                             criterion,
                             epoch=-1,
                             cuda=cuda,
                             metric=False)
            # compute the entropy of the model post-hoc as well
            entropy = test(testloader,
                           model,
                           entropy_criterion,
                           epoch=-1,
                           cuda=cuda,
                           metric=False,
                           criterion_has_labels=False,
                           compute_acc=False)
            return loss, acc, entropy

        # metrics for the both models over both datasets
        # (a) on the whole dataset
        #      (i) for the ensemble
        #      (ii)for the single model from this epoch
        # (b) on a single class
        #      (i) for the ensemble
        #      (ii)for the single model from this epoch
        stats = {}
        models = (ensemble, single_model)
        loaders = (testloader, class_testloader)
        losses = ensemble_loss, one_loss
        model_names = ['ensemble', 'single_model']
        loader_names = ['full', 'single_class']
        for i, j in itertools.product(range(len(models)), range(len(loaders))):
            track.debug("[ensemble size: %d] Evaluating loss/acc/entropy for "
                        "%s on %s dataset" %
                        (ensemble_size, model_names[i], loader_names[i]))
            metric = model_names[i] + '_' + loader_names[i]
            loss, acc, entropy = _test_dataset(models[i], loaders[j],
                                               losses[i])
            stats[metric + '_loss'] = loss
            stats[metric + '_acc'] = acc
            stats[metric + '_entropy'] = entropy
        track.metric(ensemble_size=ensemble_size, **stats)