예제 #1
0
def test_flatten_rate():
    n = 1000

    init_data = np.random.randint(1 << 16, size=8 * n, dtype='uint64')

    init_message = cs.base_message((1, ))

    for datum in init_data:
        init_message = cs.Uniform(16).push(init_message, datum)

    l_init = len(cs.flatten(init_message))

    ps = np.random.rand(n, 1)
    data = np.random.rand(n, 1) < ps

    message = init_message
    for p, datum in zip(ps, data):
        message = cs.Bernoulli(p, 14).push(message, datum)

    l_scalar = len(cs.flatten(message))

    message = init_message
    message = cs.reshape_head(message, (n, 1))
    message = cs.Bernoulli(ps, 14).push(message, data)

    l_vector = len(cs.flatten(message))

    assert (l_vector - l_init) / (l_scalar - l_init) - 1 < 0.001
예제 #2
0
def test_bernoulli():
    precision = 4
    shape = (2, 3, 5)
    p = rng.random(shape)
    data = np.uint64(rng.random(shape) < p)
    check_codec(shape, cs.Bernoulli(p, precision), data)
예제 #3
0
latent_dim = 40
latent_shape = (batch_size, latent_dim)
latent_size = np.prod(latent_shape)
obs_shape = (batch_size, 28 * 28)
obs_size = np.prod(obs_shape)

## Setup codecs
# VAE codec
model = BinaryVAE(hidden_dim=100, latent_dim=40)
model.load_state_dict(torch.load('vae_params'))

rec_net = torch_fun_to_numpy_fun(model.encode)
gen_net = torch_fun_to_numpy_fun(model.decode)

obs_codec = lambda p: cs.Bernoulli(p, bernoulli_precision)

def vae_view(head):
    return ag_tuple((np.reshape(head[:latent_size], latent_shape),
                     np.reshape(head[latent_size:], obs_shape)))

vae_append, vae_pop = cs.repeat(cs.substack(
    bb_ans.VAE(gen_net, rec_net, obs_codec, prior_precision, q_precision),
    vae_view), num_batches)

## Load mnist images
images = datasets.MNIST(sys.argv[1], train=False, download=True).data.numpy()
images = np.uint64(rng.random_sample(np.shape(images)) < images / 255.)
images = np.split(np.reshape(images, (num_images, -1)), num_batches)

## Encode