示例#1
0
# betaNormal/Binomial model
number_tosses = 1
p = BetaVariable(1., 1., "p")
k = BinomialVariable(number_tosses, probs=p, name="k")
model = ProbabilisticModel([k, p])

# Generate data
p_real = 0.8
data = model.get_sample(number_samples=30, input_values={p: p_real})

# Observe data
k.observe(data)

# Inference
inference.perform_inference(model,
                            number_iterations=1000,
                            number_samples=500,
                            lr=0.1,
                            optimizer='SGD')
loss_list = model.diagnostics["loss curve"]

#Plot loss
plt.plot(loss_list)
plt.title("Loss (negative ELBO)")
plt.show()

#Plot posterior
plot_posterior(model, variables=["p"])
plt.show()
## Sample ##
num_timepoints = 20
temporal_sample = Y.get_timeseries_sample(1,
                                          query_points=num_timepoints,
                                          input_values={
                                              sigma: 1.,
                                              b: 1.
                                          })
temporal_sample.plot()
plt.show()

## Observe model ##
data = temporal_sample
query_points = range(num_timepoints)
Y.observe(data, query_points)

## Perform ML inference ##
perform_inference(Y,
                  inference_method=ReverseKL(),
                  number_iterations=1000,
                  optimizer="SGD",
                  lr=0.005)
loss_list = Y.active_submodel.diagnostics["loss curve"]
plt.plot(loss_list)
plt.show()

## Sample ##
post_temporal_sample = Y.get_posterior_timeseries_sample(20, query_points=100)
post_temporal_sample.plot()
plt.show()
print("Done")
num_timepoints = 50
temporal_sample = X.get_timeseries_sample(1,
                                          query_points=num_timepoints,
                                          input_values={
                                              sigma: 1.,
                                              b: 1.,
                                              c: -0.05
                                          })
#temporal_sample.plot()
#plt.show()

## Observe model ##
data = temporal_sample
query_points = range(num_timepoints)
X.observe(data, query_points)

## Perform ML inference ##
perform_inference(X,
                  inference_method=MAP(),
                  number_iterations=1000,
                  optimizer="SGD",
                  lr=0.001)
loss_list = X.active_submodel.diagnostics["loss curve"]
plt.plot(loss_list)
plt.show()

## Sample ##
post_temporal_sample = X.get_posterior_timeseries_sample(20, query_points=100)
post_temporal_sample.plot()
plt.show()
示例#4
0
# Observations
k.observe(labels)

# Variational Model
Qweights = NormalVariable(np.zeros((number_output_classes, number_pixels)),
                          0.1 * np.ones(
                              (number_output_classes, number_pixels)),
                          "weights",
                          learnable=True)
variational_model = ProbabilisticModel([Qweights])
model.set_posterior_model(variational_model)

# Inference
inference.perform_inference(model,
                            number_iterations=1000,
                            number_samples=10,
                            optimizer="Adam",
                            lr=0.005)

# Loss Curve
plt.plot(model.diagnostics["loss curve"])
plt.show()

# Test accuracy
num_images = 2000
test_size = len(test)
test_indices = RandomIndices(dataset_size=test_size,
                             batch_size=1,
                             name="test_indices",
                             is_observed=True)
test_images = EmpiricalVariable(np.reshape(test.test_data.numpy(),
                               learnable=True),
                NormalVariable(loc=loc2,
                               scale=0.1,
                               name="weights2",
                               learnable=True)
            ]) for loc1, loc2 in zip(initial_locations1, initial_locations2)
        ]

        # Inference
        inference_method = WVGD(variational_samplers=variational_samplers,
                                particles=particles,
                                biased=False)
        inference.perform_inference(model,
                                    inference_method=inference_method,
                                    number_iterations=1000,
                                    number_samples=100,
                                    optimizer="Adam",
                                    lr=0.0025,
                                    posterior_model=particles,
                                    pretraining_iterations=0)
        loss_list = model.diagnostics["loss curve"]

        # Test accuracy
        test_size = len(ind[dataset_size:])
        num_images = test_size * 3
        test_indices = RandomIndices(dataset_size=test_size,
                                     batch_size=1,
                                     name="test_indices",
                                     is_observed=True)
        test_images = EmpiricalVariable(
            dataset["data"][ind[dataset_size:], :].astype("float32"),
            indices=test_indices,
            if t in y_range:
                l = 1.
            else:
                l = 1.
            Qx_mean.append(RootVariable(0, x_names[t] + "_mean", learnable=True))
            Qlambda.append(RootVariable(l, x_names[t] + "_lambda", learnable=True))
            new_mu = (-1 - omega ** 2 * dt ** 2 + b * dt) * Qx[t - 2] + (2 - b * dt) * Qx[t - 1]
            Qx.append(NormalVariable(BF.sigmoid(Qlambda[t])*new_mu + (1 - BF.sigmoid(Qlambda[t]))*Qx_mean[t],
                                     np.sqrt(dt) * driving_noise, x_names[t], learnable=True))
        variational_posterior = ProbabilisticModel(Qx)
        AR_model.set_posterior_model(variational_posterior)

        # Inference #
        inference.perform_inference(AR_model,
                                    number_iterations=N_itr,
                                    number_samples=N_smpl,
                                    optimizer=optimizer,
                                    lr=lr)

        loss_list1 = AR_model.diagnostics["loss curve"]

        # ELBO
        ELBO1.append(float(AR_model.estimate_log_model_evidence(N_ELBO_smpl).detach().numpy()))
        print("PE {}".format(ELBO1[-1]))

        # Mean-field variational distribution #
        Qx = [NormalVariable(0., 1., 'x0', learnable=True)]

        for t in range(1, T):
            Qx.append(NormalVariable(0, 2., x_names[t], learnable=True))
        variational_posterior = ProbabilisticModel(Qx)
示例#7
0
#Generate data
data = y.get_sample(20, input_values={z1: 1, z2: 0})
data.hist(bins=20)
plt.show()

#Observe data
y.observe(data)

#Variational Model
Qz1 = BernulliVariable(logits=0., name="z1", learnable=True)
Qz2 = BernulliVariable(logits=0., name="z2", learnable=True)
variational_model = ProbabilisticModel([Qz1, Qz2])
model.set_posterior_model(variational_model)

# Joint-contrastive inference
inference.perform_inference(
    model,
    inference_method=ReverseKL(gradient_estimator=Taylor1Estimator),
    number_iterations=600,
    number_samples=20,
    optimizer="SGD",
    lr=0.001)
loss_list = model.diagnostics["loss curve"]

#Plot results
plt.plot(loss_list)
plt.show()

#Plot posterior
model.get_posterior_sample(200).hist(bins=20)
plt.show()
示例#8
0
#z2 = Deterministic(BF.relu(BF.matmul(W2, z1)), "z2")
#rho = Deterministic(0.1*BF.matmul(W3, z2), "rho")
rho = Deterministic(BF.matmul(V, x / 255), "rho")
k = Categorical(logits=rho, name="k")

# Observe
k.observe(labels)
model = ProbabilisticModel([k])

# Train
from brancher.inference import MaximumLikelihood
from brancher.inference import perform_inference

perform_inference(model,
                  inference_method=MaximumLikelihood(),
                  number_iterations=150,
                  optimizer="Adam",
                  lr=0.01)
loss_list = model.diagnostics["loss curve"]
plt.plot(loss_list)
print(loss_list[-1])
plt.show()

# import torch
#
#
# class PytorchNetwork(torch.nn.Module):
#     def __init__(self):
#         super(PytorchNetwork, self).__init__()
#         out_channels = 5
#         image_size = 28
# Observe
observable_data = sample[[x.name
                          for x in x_series] + [y.name for y in y_series]]
dynamic_causal_model.observe(observable_data)

# Variational model
Qa = LogNormalVariable(0., 0.5, name="a", learnable=True)
Qb = LogNormalVariable(0., 0.5, name="b", learnable=True)
Qc = NormalVariable(0., 0.1, name="c", learnable=True)
Qd = NormalVariable(0., 0.1, name="d", learnable=True)
Qe = NormalVariable(0., 5., name="e", learnable=True)
Qxi = LogNormalVariable(0.1, 0.1, name="xi", learnable=True)
Qchi = LogNormalVariable(0.1, 0.1, name="chi", learnable=True)
variational_posterior = ProbabilisticModel([Qa, Qb, Qc, Qd, Qe, Qxi, Qchi])
dynamic_causal_model.set_posterior_model(variational_posterior)

# Inference #
inference.perform_inference(dynamic_causal_model,
                            number_iterations=100,
                            number_samples=5,
                            optimizer='Adam',
                            lr=0.01)
loss_list = dynamic_causal_model.diagnostics["loss curve"]
plt.plot(loss_list)
plt.show()

# Plot posterior
plot_posterior(dynamic_causal_model,
               variables=["a", "b", "c", "d", "e", "xi", "chi"])
plt.show()
示例#10
0
# Importance sampling distributions
variational_samplers = [
    ProbabilisticModel(
        [NormalVariable(mu=location, sigma=0.1, name="theta", learnable=True)])
    for location in initial_locations
]

# Inference
inference_method = WVGD(variational_samplers=variational_samplers,
                        particles=particles,
                        biased=False,
                        number_post_samples=20000)
inference.perform_inference(model,
                            inference_method=inference_method,
                            number_iterations=800,
                            number_samples=50,
                            optimizer=chainer.optimizers.Adam(0.005),
                            posterior_model=particles,
                            pretraining_iterations=0)
loss_list = model.diagnostics["loss curve"]

# Local variational models
plt.plot(loss_list)
plt.show()

# Samples
print(inference_method.weights)
M = 2000
[sampler._get_sample(M) for sampler in inference_method.sampler_model]
samples = [sampler.get_sample(M) for sampler in inference_method.sampler_model]
ensemble_histogram(samples,
示例#11
0
                   torch.ones((T, 1)),
                   "z",
                   learnable=True)
Qtrz = PlanarFlow(w2, u2, b2)(PlanarFlow(w1, u1, b1)(z))

Qx = []
for t in range(0, T):
    Qx.append(DeterministicVariable(Qtrz[t], name=x_names[t]))

variational_model = ProbabilisticModel(Qx)
AR_model.set_posterior_model(variational_model)

# Inference #
inference.perform_inference(AR_model,
                            number_iterations=300,
                            number_samples=50,
                            optimizer="Adam",
                            lr=0.01)

loss_list = AR_model.diagnostics["loss curve"]

# Statistics
posterior_samples = AR_model._get_posterior_sample(2000)

x_mean = []
lower_bound = []
upper_bound = []
for xt in x:
    x_posterior_samples = posterior_samples[xt].detach().numpy().flatten()
    mean = np.mean(x_posterior_samples)
    sd = np.sqrt(np.var(x_posterior_samples))
示例#12
0
k.observe(labels)

# Variational model
num_particles = 5 #10
initial_locations = [np.random.normal(0., 1., (number_output_classes, number_regressors))
                     for _ in range(num_particles)]
particles = [ProbabilisticModel([RootVariable(location, name="weights", learnable=True)])
             for location in initial_locations]
initial_particles = copy.deepcopy(particles)

# Inference
inference_method = SVGD()
inference.perform_inference(model,
                            inference_method=inference_method,
                            number_iterations=3000,
                            number_samples=100,
                            optimizer="SGD",
                            lr=0.0025,
                            posterior_model=particles)
loss_list = model.diagnostics["loss curve"]

# Local variational models
plt.plot(loss_list)
plt.show()
plot_particles(initial_particles, "weights", 1, 2, c="r")
plot_particles(particles, "weights", 1, 2, c="b")
plt.show()

# Test accuracy
test_size = len(ind[dataset_size:])
num_images = test_size*3
from brancher import geometric_ranges

x = DeterministicVariable(1., "x", is_observed=True)
y = NormalVariable(-1., 0.1, "y", is_observed=True)
z = NormalVariable(0., 0.1, "z", is_observed=True)
w = DirichletVariable(np.ones((3, 1)), "w", is_policy=True, learnable=True)
r = DeterministicVariable((w[0] * x + w[1] * y + w[2] * z),
                          "r",
                          is_reward=True,
                          is_observed=True)

model = ProbabilisticModel([w, x, y, z, r])

print(model.get_average_reward(10))

# Train control
num_itr = 3000
inference.perform_inference(model,
                            inference_method=inference.MaximumLikelihood(),
                            number_iterations=num_itr,
                            number_samples=9,
                            optimizer="Adam",
                            lr=0.01)
reward_list = model.diagnostics[
    "reward curve"]  #TODO: Very important. Solve the trained determinant problem. (it should be possible to specify which parameter is trainable)

print(model.get_sample(20)[["r"]])

plt.plot(reward_list)
plt.show()
print(model.get_average_reward(15))
model = ProbabilisticModel([k])

# Observations
k.observe(labels)

# Variational model
initial_weights = np.random.normal(0., 1.,
                                   (number_output_classes, number_regressors))
model.set_posterior_model(
    ProbabilisticModel(
        [RootVariable(initial_weights, name="weights", learnable=True)]))

# Inference
inference.perform_inference(model,
                            inference_method=MAP(),
                            number_iterations=3000,
                            number_samples=100,
                            optimizer="SGD",
                            lr=0.0025)
loss_list = model.diagnostics["loss curve"]
plt.show()

# Test accuracy
test_size = len(ind[dataset_size:])
num_images = test_size * 3
test_indices = RandomIndices(dataset_size=test_size,
                             batch_size=1,
                             name="test_indices",
                             is_observed=True)
test_images = EmpiricalVariable(
    dataset["data"][ind[dataset_size:], :].astype("float32"),
    indices=test_indices,
Qx = [NormalVariable(0., 1., 'x0', learnable=True)]
Qx_mean = [RootVariable(0., 'x0_mean', learnable=True)]
for t in range(1, T):
    Qx_mean.append(RootVariable(0., x_names[t] + "_mean", learnable=True))
    Qx.append(
        NormalVariable(logit_b_post * Qx[t - 1] + Qx_mean[t],
                       1.,
                       x_names[t],
                       learnable=True))
variational_posterior = ProbabilisticModel([Qb] + Qx)
AR_model.set_posterior_model(variational_posterior)

# Inference #
inference.perform_inference(AR_model,
                            number_iterations=200,
                            number_samples=100,
                            optimizer='Adam',
                            lr=0.05)

loss_list = AR_model.diagnostics["loss curve"]

# Statistics
posterior_samples = AR_model._get_posterior_sample(2000)
b_posterior_samples = posterior_samples[b].detach().numpy().flatten()
b_mean = np.mean(b_posterior_samples)
b_sd = np.sqrt(np.var(b_posterior_samples))

x_mean = []
lower_bound = []
upper_bound = []
for xt in x:
示例#16
0
print(sample)

# Print samples from single variable
x_sample = x.get_sample(10)
print(x_sample)

# Print samples conditional on an input
in_sample = model.get_sample(10, input_values={mu: 100.})
print(in_sample)

# # Generate data
data = x_real._get_sample(number_samples=50)

# Observe data
x.observe(data[x_real][:, 0, :])

# Variational model
Qnu = LogNormalVariable(0., 1., "nu", learnable=True)
Qmu = NormalVariable(0., 1., "mu", learnable=True)
model.set_posterior_model(ProbabilisticModel([Qmu, Qnu]))

# Inference
inference.perform_inference(model,
                            number_iterations=100,
                            number_samples=50,
                            optimizer=chainer.optimizers.Adam(0.1))
loss_list = model.diagnostics["loss curve"]

# print posterior sample
post_samples = model.get_posterior_sample(10)
print(post_samples)
示例#17
0
                   name="z")
decoder_output = DeterministicVariable(decoder(z), name="decoder_output")
x = BinomialVariable(total_count=1, logits=decoder_output["mean"], name="x")
model = ProbabilisticModel([x, z])

# Amortized variational distribution
Qx = EmpiricalVariable(dataset, batch_size=100, name="x", is_observed=True)
encoder_output = DeterministicVariable(encoder(Qx), name="encoder_output")
Qz = NormalVariable(encoder_output["mean"], encoder_output["sd"], name="z")
model.set_posterior_model(ProbabilisticModel([Qx, Qz]))

# Joint-contrastive inference
inference.perform_inference(
    model,
    inference_method=ReverseKL(gradient_estimator=PathwiseDerivativeEstimator),
    number_iterations=1000,
    number_samples=1,
    optimizer="Adam",
    lr=0.001)
loss_list = model.diagnostics["loss curve"]

#Plot results
plt.plot(loss_list)
plt.show()

sigmoid = lambda x: 1 / (np.exp(-x) + 1)
image_grid = []
z_range = np.linspace(-3, 3, 30)
for z1 in z_range:
    image_row = []
    for z2 in z_range:
mu_real = -2.
data = model.get_sample(number_samples=20,
                        input_values={
                            mu: mu_real,
                            nu: nu_real
                        })

# Observe data
x.observe(data)

# Variational model
Qnu = LogNormalVariable(0., 1., "nu", learnable=True)
Qmu = NormalVariable(0., 1., "mu", learnable=True)
model.set_posterior_model(ProbabilisticModel([Qmu, Qnu]))

# Inference
inference.perform_inference(model,
                            number_iterations=300,
                            number_samples=100,
                            optimizer='SGD',
                            lr=0.0001)
loss_list = model.diagnostics["loss curve"]

plt.plot(loss_list)
plt.title("Loss (negative ELBO)")
plt.show()

from brancher.visualizations import plot_posterior

plot_posterior(model, variables=["mu", "nu", "x"])
plt.show()
            NormalVariable(PCoperator(F(Qz[-1], W1, W2), Qalpha[-1],
                                      Qlambda[-1]),
                           0.5 * np.ones((h_size, 1)),
                           "z{}".format(t),
                           learnable=True))
    variational_model = ProbabilisticModel(Qz)
    model.set_posterior_model(variational_model)

    # Inference

    from brancher import inference

    # Inference #
    inference.perform_inference(model,
                                number_iterations=n_itr,
                                number_samples=3,
                                optimizer="Adam",
                                lr=0.01)

    loss_list1.append(model.diagnostics["loss curve"])

    psamples = model._get_posterior_sample(1)

    images1.append([
        np.reshape(psamples[img[t]].detach().numpy(),
                   (3, image_size, image_size)) for t in range(T)
    ])
    # plt.show()

    #### 2 Mean field ####
particles = [ProbabilisticModel([l]) for l in particle_locations]

# Importance sampling distributions
voranoi_set = VoronoiSet(particle_locations) #TODO: Bug if you use variables instead of probabilistic models
variational_samplers = [ProbabilisticModel([TruncatedNormalVariable(mu=initial_location_1, sigma=0.1,
                                                truncation_rule=lambda a: voranoi_set(a, 0),
                                                name="weights", learnable=True)]),
                        ProbabilisticModel([TruncatedNormalVariable(mu=initial_location_2, sigma=0.1,
                                                truncation_rule=lambda a: voranoi_set(a, 1),
                                                name="weights", learnable=True)])]

# Inference
inference.perform_inference(model,
                            inference_method=WVGD(biased=True),
                            number_iterations=1000,
                            number_samples=50,
                            optimizer=chainer.optimizers.Adam(0.005),
                            posterior_model=particles,
                            sampler_model=variational_samplers,
                            pretraining_iterations=0)
loss_list = model.diagnostics["loss curve"]

# Local variational models
plt.plot(loss_list)
plt.show()

# Test accuracy
num_images = 2000
test_size = len(test)
test_indices = RandomIndices(dataset_size=test_size, batch_size=1, name="test_indices", is_observed=True)
test_images = EmpiricalVariable(np.array([np.reshape(image[0], newshape=(number_pixels, 1)) for image in test]).astype("float32"),
                                indices=test_indices, name="x_test", is_observed=True)
# Variational process #
#Qx0 = Normal(0, 1, "x_0", learnable=True)
#QX = MarkovProcess(Qx0, lambda t, x: Normal(0., 0.5, name="x_{}".format(t), has_bias=False, learnable=True))
Qx10 = Normal(float(temporal_sample[9:10].values), 0.25, "x_10")
QX = [Qx10]
for idx in range(11, 30):
    QX.append(Normal(QX[idx-11], 0.25, "x_{}".format(idx), has_bias=True, learnable=True))
QX = ProbabilisticModel(QX)

#X.set_posterior_model(process=QX)

## Perform ML inference ##
perform_inference(X,
                  posterior_model=QX,
                  inference_method=ReverseKL(),
                  number_iterations=3000,
                  number_samples=50,
                  optimizer="SGD",
                  lr=0.005)
loss_list = X.active_submodel.diagnostics["loss curve"]
plt.plot(loss_list)
plt.show()


## Sample ##
post_temporal_sample = X.get_posterior_timeseries_sample(100, query_points=100)
post_temporal_sample.plot(alpha=0.25)
temporal_sample.plot()
plt.show()
                       "muy_{}".format(t + 1),
                       is_observed=True))

    mx.append(new_mx)
    my.append(new_my)
    sx.append(new_sx)
    sy.append(new_sy)

model = ProbabilisticModel(w + r + mux + muy)
variational_filter = ProbabilisticModel(Qmux + Qmuy)

# Variational model

print(model.get_average_reward(10))

# Train control
num_itr = 3000
inference.perform_inference(model,
                            posterior_model=variational_filter,
                            number_iterations=num_itr,
                            number_samples=9,
                            optimizer="Adam",
                            lr=0.01)
reward_list = model.diagnostics[
    "reward curve"]  #TODO: Very important. Solve the trained determinant problem. (it should be possible to specify which parameter is trainable)

print(model.get_sample(20)[["r"]])

plt.plot(reward_list)
plt.show()
print(model.get_average_reward(15))
示例#23
0
                               learnable=True))

            Qz.append(
                NormalVariable(BF.sigmoid(Qzlambda[t]) * new_z +
                               (1 - BF.sigmoid(Qzlambda[t])) * Qz_mean[t],
                               2 * driving_noise,
                               z_names[t],
                               learnable=True))

        variational_posterior = ProbabilisticModel(Qx + Qh + Qz)
        AR_model.set_posterior_model(variational_posterior)

        # Inference #
        inference.perform_inference(AR_model,
                                    number_iterations=N_itr_PC,
                                    number_samples=N_smpl,
                                    optimizer=optimizer,
                                    lr=lr)

        loss_list1 = AR_model.diagnostics["loss curve"]

        # ELBO
        #ELBO1.append(float(AR_model.estimate_log_model_evidence(N_ELBO_smpl).detach().numpy()))
        #print("PC {}".format(ELBO1[-1]))

        # MSE
        posterior_samples = AR_model._get_posterior_sample(2000)

        x_mean1 = []
        lower_bound1 = []
        upper_bound1 = []
示例#24
0
Qw2 = Norm(0., 1., name="w2", learnable=True)
Qw12 = Norm(0., 1., name="w12", learnable=True)
Qnu = LogNorm(0.2, 0.5, name="nu", learnable=True)
variational_model = ProbabilisticModel([Qb, Qw1, Qw2, Qw12, Qnu])
model.set_posterior_model(variational_model)

# Generate data
ground_samples = model._get_sample(1)

# Observe data
data = np.reshape(ground_samples[y].cpu().detach().numpy(), newshape=(n, 1, 1))
y.observe(data)

# Inference
inference.perform_inference(model,
                            number_iterations=5000,
                            number_samples=100,
                            optimizer='Adam')

# Plot
#plt.plot(model.diagnostics["loss curve"])
#plt.show()

n_post_samples = 1000
post_samples = model._get_posterior_sample(n_post_samples)
s_x1 = np.reshape(x1.value.cpu().detach().numpy(), newshape=(n, ))
s_x2 = np.reshape(x2.value.cpu().detach().numpy(), newshape=(n, ))
post_mean = 0.
for k in range(n_post_samples):
    s_b = float(post_samples[b].cpu().detach().numpy()[k, :])
    s_w1 = float(post_samples[w1].cpu().detach().numpy()[k, :])
    s_w2 = float(post_samples[w2].cpu().detach().numpy()[k, :])
示例#25
0
                           0.2 * np.ones((number_hidden_units, number_pixels)),
                           "weights1",
                           learnable=True)
Qweights2 = NormalVariable(np.zeros(
    (number_output_classes, number_hidden_units)),
                           0.2 * np.ones(
                               (number_output_classes, number_hidden_units)),
                           "weights2",
                           learnable=True)
variational_model = ProbabilisticModel([Qb1, Qb2, Qweights1, Qweights2])
model.set_posterior_model(variational_model)

# Inference
inference.perform_inference(model,
                            number_iterations=2000,
                            number_samples=50,
                            optimizer='Adam',
                            lr=0.005)  #0.05

# Test accuracy
num_images = 500
test_size = len(test)
test_indices = RandomIndices(dataset_size=test_size,
                             batch_size=1,
                             name="test_indices",
                             is_observed=True)
test_images = EmpiricalVariable(np.reshape(test.test_data.numpy(),
                                           newshape=(test.test_data.shape[0],
                                                     number_pixels, 1)),
                                indices=test_indices,
                                name="x_test",
示例#26
0
           scale=1. * np.ones((num_classes, 1)),
           name="b")
reshaped_z = BF.reshape(z, shape=(image_size * image_size * out_channels, 1))
k = Categorical(logits=BF.linear(reshaped_z, Wl, b), name="k")

k.observe(labels)

from brancher.inference import MAP
from brancher.inference import perform_inference
from brancher.variables import ProbabilisticModel

convolutional_model = ProbabilisticModel([k])

perform_inference(convolutional_model,
                  inference_method=MAP(),
                  number_iterations=1,
                  optimizer="Adam",
                  lr=0.0025)
loss_list = convolutional_model.diagnostics["loss curve"]
#plt.plot(loss_list)
#plt.show()

import torch

import torch


## PyTorch model ##
class PytorchNetwork(torch.nn.Module):
    def __init__(self):
        super(PytorchNetwork, self).__init__()