Ejemplo n.º 1
0
                                  train=False,
                                  download=True,
                                  transform=None)
dataset_size = len(train)
input_variable = np.reshape(train.train_data.numpy(),
                            newshape=(dataset_size, number_pixels, 1))
output_labels = train.train_labels.numpy()

# Data sampling model
minibatch_size = 30
minibatch_indices = RandomIndices(dataset_size=dataset_size,
                                  batch_size=minibatch_size,
                                  name="indices",
                                  is_observed=True)
x = EmpiricalVariable(input_variable,
                      indices=minibatch_indices,
                      name="x",
                      is_observed=True)
labels = EmpiricalVariable(output_labels,
                           indices=minibatch_indices,
                           name="labels",
                           is_observed=True)

# Architecture parameters
number_hidden_units = 20
b1 = NormalVariable(np.zeros((number_hidden_units, 1)), 10 * np.ones(
    (number_hidden_units, 1)), "b1")
b2 = NormalVariable(np.zeros((number_output_classes, 1)), 10 * np.ones(
    (number_output_classes, 1)), "b2")
weights1 = NormalVariable(np.zeros(
    (number_hidden_units, number_pixels)), 10 * np.ones(
        (number_hidden_units, number_pixels)), "weights1")
Ejemplo n.º 2
0
number_output_classes = 3
dataset_size = 50
dataset = datasets.load_iris()
ind = list(range(dataset["target"].shape[0]))
np.random.shuffle(ind)
input_variable = dataset["data"][ind[:dataset_size], :].astype("float32")
output_labels = dataset["target"][ind[:dataset_size]].astype("int32")

# Data sampling model
minibatch_size = dataset_size
minibatch_indices = RandomIndices(dataset_size=dataset_size,
                                  batch_size=minibatch_size,
                                  name="indices",
                                  is_observed=True)
x = EmpiricalVariable(input_variable,
                      indices=minibatch_indices,
                      name="x",
                      is_observed=True)
labels = EmpiricalVariable(output_labels,
                           indices=minibatch_indices,
                           name="labels",
                           is_observed=True)

# Architecture parameters
weights = NormalVariable(
    np.zeros((number_output_classes, number_regressors)), 10 * np.ones(
        (number_output_classes, number_regressors)), "weights")

# Forward pass
final_activations = BF.matmul(weights, x)
k = CategoricalVariable(logits=final_activations, name="k")
        dataset_size = 50
        dataset = datasets.load_iris()
        ind = list(range(dataset["target"].shape[0]))
        np.random.shuffle(ind)
        input_variable = dataset["data"][ind[:dataset_size], :].astype(
            "float32")
        output_labels = dataset["target"][ind[:dataset_size]].astype("int32")

        # Data sampling model
        minibatch_size = dataset_size
        minibatch_indices = RandomIndices(dataset_size=dataset_size,
                                          batch_size=minibatch_size,
                                          name="indices",
                                          is_observed=True)
        x = EmpiricalVariable(input_variable,
                              indices=minibatch_indices,
                              name="x",
                              is_observed=True)
        labels = EmpiricalVariable(output_labels,
                                   indices=minibatch_indices,
                                   name="labels",
                                   is_observed=True)

        # Architecture parameters
        weights1 = NormalVariable(
            np.zeros((number_hidden_nodes, number_regressors)), 10 * np.ones(
                (number_hidden_nodes, number_regressors)), "weights1")
        weights2 = NormalVariable(
            np.zeros((number_output_classes, number_hidden_nodes)),
            10 * np.ones(
                (number_output_classes, number_hidden_nodes)), "weights2")
Ejemplo n.º 4
0
        output_log_sd = self.l3(h)
        return {"mean": output_mean, "sd": self.softplus(output_log_sd) + 0.01}


# Initialize encoder and decoders
encoder = BF.BrancherFunction(EncoderArchitecture(image_size=image_size, latent_size=latent_size))
decoder = BF.BrancherFunction(DecoderArchitecture(latent_size=latent_size, image_size=image_size))

# Generative model
z = NormalVariable(np.zeros((latent_size,)), np.ones((latent_size,)), name="z")
decoder_output = decoder(z)
x = NormalVariable(decoder_output["mean"], decoder_output["sd"], name="x")
model = ProbabilisticModel([x, z])

# Amortized variational distribution
Qx = EmpiricalVariable(dataset, batch_size=50, name="x", is_observed=True)
encoder_output = encoder(Qx)
Qz = NormalVariable(encoder_output["mean"], encoder_output["sd"], name="z")
model.set_posterior_model(ProbabilisticModel([Qx, Qz]))

# Joint-contrastive inference
inference.perform_inference(model,
                            number_iterations=5000,
                            number_samples=1,
                            optimizer="Adam",
                            lr=0.005)
loss_list = model.diagnostics["loss curve"]

#Plot results
plt.plot(loss_list)
plt.show()
Ejemplo n.º 5
0
    decoder_output3 = DeterministicVariable(decoder3(z3),
                                            name="decoder_output3")
    x = BinomialVariable(total_count=1,
                         logits=decoder_output3["mean"],
                         name="x")
    model = ProbabilisticModel([x, z1, z2, z3, labels])

    # Amortized variational distribution

    minibatch_indices = RandomIndices(dataset_size=dataset_size,
                                      batch_size=b_size,
                                      name="indices",
                                      is_observed=True)

    Qx = EmpiricalVariable(dataset,
                           indices=minibatch_indices,
                           name="x",
                           is_observed=True)

    Qlabels = EmpiricalVariable(output_labels,
                                indices=minibatch_indices,
                                name="labels",
                                is_observed=True)

    encoder_output1 = DeterministicVariable(encoder1(Qx),
                                            name="encoder_output1")
    Qz3 = NormalVariable(encoder_output1["mean"],
                         encoder_output1["sd"],
                         name="z3")
    encoder_output2 = DeterministicVariable(encoder2(encoder_output1["mean"]),
                                            name="encoder_output2")
    Qz2 = NormalVariable(encoder_output2["mean"],
Ejemplo n.º 6
0
import matplotlib.pyplot as plt
import numpy as np

from brancher.variables import DeterministicVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable, EmpiricalVariable
from brancher import inference
import brancher.functions as BF

# Data

# Neural architectures
#Encoder
#Decoder

# Generative model
latent_size = (10, )
z = NormalVariable(np.zeros(latent_size), np.ones(latent_size))
decoder_output = decoder(z)
x = NormalVariable(decoder_output["mean"],
                   BF.exp(decoder_output["log_var"]),
                   name="x")
model = ProbabilisticModel([x, z])

# Amortized variational distribution
Qx = EmpiricalVariable(dataset, name="x")
encoder_output = encoder(Qx)
Qz = NormalVariable(decoder_output["mean"],
                    BF.exp(decoder_output["log_var"]),
                    name="z")
variational_model = ProbabilisticModel([Qx, Qz])
from brancher import inference
from brancher.inference import WassersteinVariationalGradientDescent as WVGD

# Data
number_pixels = 28*28
number_output_classes = 10
train, test = chainer.datasets.get_mnist()
#dataset_size = len(train)
dataset_size = 50
input_variable = np.array([np.reshape(image[0], newshape=(number_pixels, 1)) for image in train][0:dataset_size]).astype("float32")
output_labels = np.array([image[1]*np.ones((1, 1)) for image in train][0:dataset_size]).astype("int32")

# Data sampling model
minibatch_size = 50
minibatch_indices = RandomIndices(dataset_size=dataset_size, batch_size=minibatch_size, name="indices", is_observed=True)
x = EmpiricalVariable(input_variable, indices=minibatch_indices, name="x", is_observed=True)
labels = EmpiricalVariable(output_labels, indices=minibatch_indices, name="labels", is_observed=True)

# Architecture parameters
weights = NormalVariable(np.zeros((number_output_classes, number_pixels)),
                         10*np.ones((number_output_classes, number_pixels)), "weights")

# Forward pass
final_activations = BF.matmul(weights, x)
k = CategoricalVariable(softmax_p=final_activations, name="k")

# Probabilistic model
model = ProbabilisticModel([k])

# Observations
k.observe(labels)
import numpy as np

import chainer
import chainer.links as L
import chainer.functions as F

from brancher.variables import DeterministicVariable, RandomVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable, EmpiricalVariable, RandomIndices
from brancher.functions import BrancherFunction
import brancher.functions as BF

## Data ##
dataset_size = 100
number_dimensions = 1
dataset1 = np.random.normal(0, 1, (dataset_size, number_dimensions))

## Variables ##
indices = RandomIndices(dataset_size=dataset_size, batch_size=5, name="indices")
a = EmpiricalVariable(dataset1, indices=indices, name='a', is_observed=True)
b = EmpiricalVariable(dataset1, indices=indices, name='a', is_observed=True)

model = ProbabilisticModel([a, b])


## Sample ##
samples = model._get_sample(1)

print(samples[a])
print(samples[b])