Ejemplo n.º 1
0
#Generate data
data = y.get_sample(20, input_values={z1: 1, z2: 0})
data.hist(bins=20)
plt.show()

#Observe data
y.observe(data)

#Variational Model
Qz1 = BernulliVariable(logits=0., name="z1", learnable=True)
Qz2 = BernulliVariable(logits=0., name="z2", learnable=True)
variational_model = ProbabilisticModel([Qz1, Qz2])
model.set_posterior_model(variational_model)

# Joint-contrastive inference
inference.perform_inference(
    model,
    inference_method=ReverseKL(gradient_estimator=Taylor1Estimator),
    number_iterations=600,
    number_samples=20,
    optimizer="SGD",
    lr=0.001)
loss_list = model.diagnostics["loss curve"]

#Plot results
plt.plot(loss_list)
plt.show()

#Plot posterior
model.get_posterior_sample(200).hist(bins=20)
plt.show()
# Variational process #
#Qx0 = Normal(0, 1, "x_0", learnable=True)
#QX = MarkovProcess(Qx0, lambda t, x: Normal(0., 0.5, name="x_{}".format(t), has_bias=False, learnable=True))
Qx10 = Normal(float(temporal_sample[9:10].values), 0.25, "x_10")
QX = [Qx10]
for idx in range(11, 30):
    QX.append(Normal(QX[idx-11], 0.25, "x_{}".format(idx), has_bias=True, learnable=True))
QX = ProbabilisticModel(QX)

#X.set_posterior_model(process=QX)

## Perform ML inference ##
perform_inference(X,
                  posterior_model=QX,
                  inference_method=ReverseKL(),
                  number_iterations=3000,
                  number_samples=50,
                  optimizer="SGD",
                  lr=0.005)
loss_list = X.active_submodel.diagnostics["loss curve"]
plt.plot(loss_list)
plt.show()


## Sample ##
post_temporal_sample = X.get_posterior_timeseries_sample(100, query_points=100)
post_temporal_sample.plot(alpha=0.25)
temporal_sample.plot()
plt.show()
Ejemplo n.º 3
0
                   np.ones((latent_size, )),
                   name="z")
decoder_output = DeterministicVariable(decoder(z), name="decoder_output")
x = BinomialVariable(total_count=1, logits=decoder_output["mean"], name="x")
model = ProbabilisticModel([x, z])

# Amortized variational distribution
Qx = EmpiricalVariable(dataset, batch_size=100, name="x", is_observed=True)
encoder_output = DeterministicVariable(encoder(Qx), name="encoder_output")
Qz = NormalVariable(encoder_output["mean"], encoder_output["sd"], name="z")
model.set_posterior_model(ProbabilisticModel([Qx, Qz]))

# Joint-contrastive inference
inference.perform_inference(
    model,
    inference_method=ReverseKL(gradient_estimator=PathwiseDerivativeEstimator),
    number_iterations=1000,
    number_samples=1,
    optimizer="Adam",
    lr=0.001)
loss_list = model.diagnostics["loss curve"]

#Plot results
plt.plot(loss_list)
plt.show()

sigmoid = lambda x: 1 / (np.exp(-x) + 1)
image_grid = []
z_range = np.linspace(-3, 3, 30)
for z1 in z_range:
    image_row = []