Exemplo n.º 1
0
def get_model_and_paths(args):
    #model = cifar_clf.MedNet_5().cuda()
    #model = cifar_clf.MedNet().cuda()
    #model = mnist_clf.Small2().cuda()
    model = mnist_clf.Small3().cuda()
    modeldef = netdef.nets()[args.target]
    args.names = modeldef['layer_names']
    paths = natsorted(glob('saved_models/mnist/lrg/*.pt'))
    paths = sorted(paths, key=lambda i: float(i[41:-3]))
    print (paths[41:])
    #p.append(paths[-1])
    #paths = p
    return model, [paths[1]]
Exemplo n.º 2
0
                        correct, loss = train_clf(args, [g1, g2, g3, g4, g5],
                                                  data, y)
                        test_acc += correct.item()
                        test_loss += loss.item()
                test_loss /= len(cifar_test.dataset) * args.batch_size
                test_acc /= len(cifar_test.dataset) * args.batch_size
                print('Test Accuracy: {}, Test Loss: {}'.format(
                    test_acc, test_loss))
                if test_loss < best_test_loss or test_acc > best_test_acc:
                    utils.save_hypernet_cifar(args,
                                              [netE, W1, W2, W3, W4, W5, netD],
                                              test_acc)
                    print('==> new best stats, saving')
                    if test_loss < best_test_loss:
                        best_test_loss = test_loss
                        args.best_loss = test_loss
                    if test_acc > best_test_acc:
                        best_test_acc = test_acc
                        args.best_acc = test_acc


if __name__ == '__main__':

    args = load_args()
    modeldef = netdef.nets()[args.model]
    pprint.pprint(modeldef)
    # log some of the netstat quantities so we don't subscript everywhere
    args.stat = modeldef
    args.shapes = modeldef['shapes']
    train(args)
Exemplo n.º 3
0
                    test_loss /= len(mnist_test.dataset) * args.batch_size
                    test_acc /= len(mnist_test.dataset) * args.batch_size

                    print('Test Accuracy: {}, Test Loss: {}'.format(
                        test_acc, test_loss))
                    #print ('Clf Accuracy: {}, Clf Loss: {}'.format(clf_acc, clf_loss))
                    if test_loss < best_test_loss:
                        best_test_loss, args.best_loss = test_loss, test_loss
                    if test_acc > best_test_acc:
                        #    best_clf_acc, args.best_clf_acc = clf_acc, clf_acc
                        utils.save_hypernet_mnist(args,
                                                  [netE, netD, W1, W2, W3],
                                                  test_acc)
                    if test_acc > best_test_acc:
                        best_test_acc, args.best_acc = test_acc, test_acc


if __name__ == '__main__':

    args = load_args()
    import arch.models_mnist_bias as models

    model = mnist_clf.Small2().cuda()
    modeldef = netdef.nets()[args.target]
    pprint.pprint(modeldef)
    # log some of the netstat quantities so we don't subscript everywhere
    args.stat = modeldef
    args.shapes = modeldef['shapes']
    logger = stats.init_stat_dict()
    train(args)
Exemplo n.º 4
0
            print(accs, losses)
        else:
            ckpt = torch.load(path)
            state = ckpt['state_dict']
            try:
                model.load_state_dict()
            except RuntimeError:
                model_dict = model.state_dict()
                filtered = {k: v for k, v in state.items() if k in model_dict}
                model_dict.update(filtered)
                model.load_state_dict(filtered)


if __name__ == '__main__':
    args = load_args()
    args.stat = netdef.nets()[args.net]
    args.shapes = netdef.nets()[args.net]['shapes']
    if args.scratch:
        path = '/scratch/eecs-share/ratzlafn/HyperGAN/'
        if args.hyper:
            path = path + 'exp_models'
    else:
        path = './'

    if args.task == 'test':
        load_models(args, path)
    elif args.task == 'train':
        run_model_search(args, path)
    elif args.task == 'measure':
        run_measure(args, path)
Exemplo n.º 5
0
def load_test_hypergan():
    args = arg.load_mnist_args()
    model = mnist_clf.Small2().cuda()
    modeldef = netdef.nets()[args.target]
    names = modeldef['layer_names']
    paths = natsorted(glob('saved_models/mnist/noadds/*.pt'))
    paths = sorted(paths, key=lambda i: float(i[44:-3]))
    print(paths)

    def get_stats(l1, l2, l3, inspect):
        acc, loss = test_mnist(args, [l1, l2, l3], names, model)
        norms = []
        for i in range(len(inspect)):
            norms.append(np.linalg.norm(inspect[i].detach()))
        print('Acc: {}, Loss: {}, Norm: {}'.format(acc, loss,
                                                   np.array(norms).mean()))
        print('mean: {}, std: {}'.format(
            np.array(norms).mean(),
            np.array(norms).std()))
        return acc

    full, stds, bias = [], [], []
    for path in paths:
        netE, netD, W1, W2, W3 = utils.load_hypernet_mnist(args, path)
        print('Starting output test')
        accs = []
        for i in range(4):
            l1, l2, l3, codes = utils.sample_hypernet_mnist(
                args, [netE, W1, W2, W3], 32)
            acc = get_stats(l1, l2, l3, inspect=l2)
            accs.append(acc)
        stds.append(np.array(accs).std())
        full.append(np.array(accs).mean())
        print('\ntesting with 0 vector\n')
        x_dist = utils.create_d(args.ze)
        z = utils.sample_d(x_dist, args.batch_size)
        code1, code2, code3 = netE(z)
        fake1 = W1(torch.zeros_like(code1).cuda())
        fake2 = W2(torch.zeros_like(code2).cuda())
        fake3 = W3(torch.zeros_like(code3).cuda())
        acc = get_stats(fake1, fake2, fake3, inspect=fake2)
        bias.append(acc)
        print('\nInput test\n')
        norms = []
        for i in range(len(code2)):
            norms.append(np.linalg.norm(code2[i].detach()))
        print('mean: {}, std: {}'.format(
            np.array(norms).mean(),
            np.array(norms).std()))

    plt.plot(range(len(bias)), np.array(bias), color='r', label='Zero input')
    plt.errorbar(range(len(full)),
                 np.array(full),
                 np.array(stds),
                 color='b',
                 label='Gaussian input')
    plt.legend(loc='best')
    plt.grid(True)
    plt.xlabel('training checkpoint')
    plt.ylabel('accuracy')
    plt.title('bias power over training')
    plt.show()
Exemplo n.º 6
0
                    #utils.save_clf(args, z_test, test_acc)
                    if test_loss < best_test_loss:
                        best_test_loss = test_loss
                        args.best_loss = test_loss
                archE = sampleE(args).cuda()
                archD = sampleD(args).cuda()
                rand = np.random.randint(args.batch_size)
                eweight = list(zip(*Eweights))[rand]
                dweight = list(zip(*Dweights))[rand]
                modelE = utils.weights_to_clf(eweight, archE,
                                              args.statE['layer_names'])
                modelD = utils.weights_to_clf(dweight, archD,
                                              args.statD['layer_names'])
                utils.generate_image(args, batch_idx, modelE, modelD,
                                     data.cuda())


if __name__ == '__main__':
    args = load_args()
    import models.models_ae as models
    modeldef1 = netdef.nets()['aeE']
    modeldef2 = netdef.nets()['aeD']
    pprint.pprint(modeldef1)
    pprint.pprint(modeldef2)
    # log some of the netstat quantities so we don't subscript everywhere
    args.statE = modeldef1
    args.statD = modeldef2
    args.shapesE = modeldef1['shapes']
    args.shapesD = modeldef2['shapes']
    train(args)
Exemplo n.º 7
0
logging.disable(logging.CRITICAL)
import warnings
warnings.filterwarnings("ignore")
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import torch
import matplotlib.pyplot as plt
import adversarial_test as adv
import adversarial_test as adv
args = adv.load_args()
arch = adv.get_network(args)
get_ipython().run_line_magic('clear', '')
import utils
hypernet = utils.load_hypernet('hypermnist_0_0.984390625.pt')
import netdef
args.stat = netdef.nets()[args.net]
model_base, fmodel_base = adv.sample_fmodel(args, hypernet, arch)
criterion = Misclassification()
fgs = foolbox.attacks.BIM(fmodel_base, criterion)
import datagen
_, test = datagen.load_mnist(args)
batch = []
y = []
for (data, target) in test:
    batch.append(data)
    y.append(target)
    if len(batch) == 32:
        break

stack_data = torch.stack(batch)
stack_y = torch.stack(y)