Beispiel #1
0
def test_call():
    data_iter = MNIST(source='/Users/devon/Data/mnist.pkl.gz', batch_size=27)
    gbn = test_build_GBN(dim_in=data_iter.dims[data_iter.name])

    X = T.matrix('x', dtype=floatX)
    results, samples = gbn(X, X, n_samples=7)

    f = theano.function([X], samples.values() + results.values())

    x = data_iter.next()[data_iter.name]
    assert False, f(x)
Beispiel #2
0
def test_infer():
    data_iter = MNIST(source='/Users/devon/Data/mnist.pkl.gz', batch_size=27)
    gbn = test_vae.test_build_GBN(dim_in=data_iter.dims[data_iter.name])

    gdir = test_build_gdir(gbn)

    X = T.matrix('x', dtype=floatX)

    inference_args = dict(n_inference_samples=13,
                          n_inference_steps=17,
                          pass_gradients=True)

    rval, constants, updates = gdir.inference(X, X, **inference_args)

    f = theano.function([X], rval.values(), updates=updates)
    x = data_iter.next()[data_iter.name]

    results, samples, full_results, updates = gdir(X, X, **inference_args)
    f = theano.function([X], results.values(), updates=updates)

    print f(x)
Beispiel #3
0
def eval_model(model_file,
               steps=50,
               data_samples=10000,
               out_path=None,
               optimizer=None,
               optimizer_args=dict(),
               batch_size=100,
               valid_scores=None,
               mode='valid',
               prior='logistic',
               center_input=True,
               n_layers=2,
               z_init='recognition_net',
               inference_method='momentum',
               inference_rate=.01,
               rate=0.,
               n_mcmc_samples=20,
               posterior_samples=20,
               inference_samples=20,
               dataset=None,
               dataset_args=None,
               extra_inference_args=dict(),
               **kwargs):

    if rate > 0:
        inference_rate = rate

    model_args = dict(prior=prior,
                      n_layers=n_layers,
                      z_init=z_init,
                      inference_method=inference_method,
                      inference_rate=inference_rate,
                      n_inference_samples=inference_samples)

    models, _ = load_model(model_file, unpack, **model_args)

    if dataset == 'mnist':
        data_iter = MNIST(batch_size=data_samples,
                          mode=mode,
                          inf=False,
                          **dataset_args)
        valid_iter = MNIST(batch_size=500,
                           mode='valid',
                           inf=False,
                           **dataset_args)
    else:
        raise ValueError()

    model = models['sbn']
    tparams = model.set_tparams()

    # ========================================================================
    print 'Setting up Theano graph for lower bound'

    X = T.matrix('x', dtype=floatX)

    if center_input:
        print 'Centering input with train dataset mean image'
        X_mean = theano.shared(data_iter.mean_image.astype(floatX),
                               name='X_mean')
        X_i = X - X_mean
    else:
        X_i = X

    x, _ = data_iter.next()
    x_v, _ = valid_iter.next()

    dx = 100
    data_samples = min(data_samples, data_iter.n)
    xs = [x[i:(i + dx)] for i in range(0, data_samples, dx)]
    N = data_samples // dx

    print(
        'Calculating final lower bound and marginal with %d data samples, %d posterior samples '
        'with %d validated inference steps' %
        (N * dx, posterior_samples, steps))

    outs_s, updates_s = model(X_i,
                              X,
                              n_inference_steps=steps,
                              n_samples=posterior_samples,
                              calculate_log_marginal=True)
    f_lower_bound = theano.function([X],
                                    [outs_s['lower_bound'], outs_s['nll']] +
                                    outs_s['lower_bounds'] + outs_s['nlls'],
                                    updates=updates_s)
    lb_t = []
    nll_t = []
    nlls_t = []
    lbs_t = []

    pbar = ProgressBar(maxval=len(xs)).start()
    for i, x in enumerate(xs):
        outs = f_lower_bound(x)
        lb, nll = outs[:2]
        outs = outs[2:]
        lbs = outs[:len(outs) / 2]
        nlls = outs[len(outs) / 2:]
        lbs_t.append(lbs)
        nlls_t.append(nlls)
        lb_t.append(lb)
        nll_t.append(nll)
        pbar.update(i)

    lb_t = np.mean(lb_t)
    nll_t = np.mean(nll_t)
    lbs_t = np.mean(lbs_t, axis=0).tolist()
    nlls_t = np.mean(nlls_t, axis=0).tolist()
    print 'Final lower bound and NLL: %.2f and %.2f' % (lb_t, nll_t)
    print lbs_t
    print nlls_t

    if out_path is not None:
        plt.savefig(out_path)
        print 'Sampling from the prior'

        np.save(path.join(out_path, 'lbs.npy'), lbs_t)
        np.save(path.join(out_path, 'nlls.npy'), nlls_t)

        py_p = model.sample_from_prior()
        f_prior = theano.function([], py_p)

        samples = f_prior()
        data_iter.save_images(samples[:, None],
                              path.join(out_path, 'samples_from_prior.png'),
                              x_limit=10)