def init_test_entropy(args, paths, model, n):
    entropies = []
    # generate ensemble of size n
    for i, path in enumerate(paths):
        netE, netD, W1, W2, W3 = utils.load_hypernet_mnist(args, path)
        nets = [netE.eval(), W1.eval(), W2.eval(), W3.eval()]
        l1, l2, l3, codes = utils.sample_hypernet_mnist(args, nets, n)
        # test ensemble entropy over the mean
        ent = test_entropy(args, [l1, l2, l3], args.names, model, n)
        x, y = plot_e(ent)
        entropies.append((x, y))
        #entropies.append(ent)
    return entropies
def run_range_hyper(args):
    for n in [10, 100]:
        print ('Testing {} model ensemble'.format(n))
        model, paths = get_model_and_paths(args)
        for path in paths:
            accs = []
            netE, netD, W1, W2, W3, W4 = utils.load_hypernet_mnist(args, path)
            nets = [netE.eval(), W1.eval(), W2.eval(), W3.eval(), W4.eval()]
            for _ in range(10): # tests
                acc = test_f(args, path, [netE, W1, W2, W3, W4], n)
                print (acc) 
                #l1, l2, l3, codes = utils.sample_hypernet_mnist(args, nets, n)
                # test ensemble on mnist using majority vote
                #acc, loss = test_acc_loss(args, [l1, l2, l3], args.names, model, n)
                accs.append(acc*100)
            accs = np.array(accs)
            print ('acc mean: {}, std: {}'.format(accs.mean(), accs.std()))
def init_test_diversity(args, paths, model, n):
    accs, losses, cv = [], [], []
    # generate ensemble of size n
    for i, path in enumerate(paths):
        netE, netD, W1, W2, W3 = utils.load_hypernet_mnist(args, path)
        nets = [netE.eval(), W1.eval(), W2.eval(), W3.eval()]
        l1, l2, l3, codes = utils.sample_hypernet_mnist(args, nets, n)
        # test ensemble on mnist using majority vote
        acc, loss = test_acc_loss(args, [l1, l2, l3], args.names, model, n)
        num = np.random.randint(n)
        #acc, loss = test_acc_single(args, [[l1[num]], [l2[num]], [l3[num]]], args.names, model, 1)
        m, s = get_stats(l1, l2, l3, inspect=l2)
        idx = i * 100 
        #print ('ITER {}: Acc: {}, Loss: {} Nmean: {} Nstd: {}\n'.format(idx, acc, loss, m, s))
        accs.append(acc)
        losses.append(loss)
        cv.append(s/m)
    return accs, losses, cv
def run_normality(args):
    n = 100
    from scipy.stats import normaltest
    with torch.no_grad():
        model, paths = get_model_and_paths(args)
        for i, path in enumerate(paths):
            netE, netD, W1, W2, W3 = utils.load_hypernet_mnist(args, path)
            nets = [netE.eval(), W1.eval(), W2.eval(), W3.eval()]
            l1, l2, l3, codes = utils.sample_hypernet_mnist(args, nets, n)
            # test ensemble entropy over the mean
            for code in codes:
                code = code[0]
                k2, p = normaltest(code.cpu().numpy())
                print ('mean: {}, std: {}, k2: {}, p: {}'.format(
                    code.mean(), code.std(), k2, p))
                print (code.shape)
            bigcode = torch.stack(codes)
            print (bigcode.shape)
            bigcode = bigcode.view(-1).cpu().numpy()
            k2, p = normaltest(bigcode)
            print ('mean: {}, std: {}, k2: {}, p: {}'.format(
                bigcode.mean(), bigcode.std(), k2, p))
Exemple #5
0
def load_test_hypergan():
    args = arg.load_mnist_args()
    model = mnist_clf.Small2().cuda()
    modeldef = netdef.nets()[args.target]
    names = modeldef['layer_names']
    paths = natsorted(glob('saved_models/mnist/noadds/*.pt'))
    paths = sorted(paths, key=lambda i: float(i[44:-3]))
    print(paths)

    def get_stats(l1, l2, l3, inspect):
        acc, loss = test_mnist(args, [l1, l2, l3], names, model)
        norms = []
        for i in range(len(inspect)):
            norms.append(np.linalg.norm(inspect[i].detach()))
        print('Acc: {}, Loss: {}, Norm: {}'.format(acc, loss,
                                                   np.array(norms).mean()))
        print('mean: {}, std: {}'.format(
            np.array(norms).mean(),
            np.array(norms).std()))
        return acc

    full, stds, bias = [], [], []
    for path in paths:
        netE, netD, W1, W2, W3 = utils.load_hypernet_mnist(args, path)
        print('Starting output test')
        accs = []
        for i in range(4):
            l1, l2, l3, codes = utils.sample_hypernet_mnist(
                args, [netE, W1, W2, W3], 32)
            acc = get_stats(l1, l2, l3, inspect=l2)
            accs.append(acc)
        stds.append(np.array(accs).std())
        full.append(np.array(accs).mean())
        print('\ntesting with 0 vector\n')
        x_dist = utils.create_d(args.ze)
        z = utils.sample_d(x_dist, args.batch_size)
        code1, code2, code3 = netE(z)
        fake1 = W1(torch.zeros_like(code1).cuda())
        fake2 = W2(torch.zeros_like(code2).cuda())
        fake3 = W3(torch.zeros_like(code3).cuda())
        acc = get_stats(fake1, fake2, fake3, inspect=fake2)
        bias.append(acc)
        print('\nInput test\n')
        norms = []
        for i in range(len(code2)):
            norms.append(np.linalg.norm(code2[i].detach()))
        print('mean: {}, std: {}'.format(
            np.array(norms).mean(),
            np.array(norms).std()))

    plt.plot(range(len(bias)), np.array(bias), color='r', label='Zero input')
    plt.errorbar(range(len(full)),
                 np.array(full),
                 np.array(stds),
                 color='b',
                 label='Gaussian input')
    plt.legend(loc='best')
    plt.grid(True)
    plt.xlabel('training checkpoint')
    plt.ylabel('accuracy')
    plt.title('bias power over training')
    plt.show()
Exemple #6
0
    norms = []
    for i in range(len(inspect)):
        norms.append(np.linalg.norm(inspect[i].detach()))
    m, s = np.array(norms).mean(), np.array(norms).std()
    return m, s


args = arg.load_mnist_args()
model = mnist_clf.Small2().cuda()
modeldef = netdef.nets()[args.target]
names = modeldef['layer_names']
paths = natsorted(glob('saved_models/mnist/noadds/*.pt'))
paths = sorted(paths, key=lambda i: float(i[44:-3]))
accs, losses, cv = [], [], []
for path in paths:
    netE, netD, W1, W2, W3 = utils.load_hypernet_mnist(args, path)
    l1, l2, l3, codes = utils.sample_hypernet_mnist(args, [netE.eval(), W1.eval(), W2.eval(), W3.eval()], 1)
    acc, loss = test_mnist(args, [l1, l2, l3], names, model, 1)
    m, s = get_stats(l1, l2, l3, inspect=l2)
    print ('Acc: {}, Loss: {} Nmean: {} Nstd: {}\n'.format(acc, loss, m, s))
    accs.append(acc)
    losses.append(loss)
    cv.append(s/m)
accs = np.array(accs)
losses = np.array(losses)
cv = np.array(cv)
plt.plot(range(len(accs)), accs, c='r', label='accuracy')
plt.plot(range(len(accs)), losses, c='g', label='cross entropy')
plt.plot(range(len(accs)), cv, c='b', label='coeff variation')

plt.legend(loc='best')