Qx.append(NormalVariable(BF.sigmoid(Qlambda[t])*new_mu + (1 - BF.sigmoid(Qlambda[t]))*Qx_mean[t],
                                     np.sqrt(dt) * driving_noise, x_names[t], learnable=True))
        variational_posterior = ProbabilisticModel(Qx)
        AR_model.set_posterior_model(variational_posterior)

        # Inference #
        inference.perform_inference(AR_model,
                                    number_iterations=N_itr,
                                    number_samples=N_smpl,
                                    optimizer=optimizer,
                                    lr=lr)

        loss_list1 = AR_model.diagnostics["loss curve"]

        # ELBO
        ELBO1.append(float(AR_model.estimate_log_model_evidence(N_ELBO_smpl).detach().numpy()))
        print("PE {}".format(ELBO1[-1]))

        # Mean-field variational distribution #
        Qx = [NormalVariable(0., 1., 'x0', learnable=True)]

        for t in range(1, T):
            Qx.append(NormalVariable(0, 2., x_names[t], learnable=True))
        variational_posterior = ProbabilisticModel(Qx)
        AR_model.set_posterior_model(variational_posterior)

        # Inference #
        inference.perform_inference(AR_model,
                                    number_iterations=N_itr,
                                    number_samples=N_smpl,
                                    optimizer=optimizer,
        variational_posterior = ProbabilisticModel(Qx + Qh + Qz)
        AR_model.set_posterior_model(variational_posterior)

        # Inference #
        inference.perform_inference(AR_model,
                                    number_iterations=N_itr,
                                    number_samples=N_smpl,
                                    optimizer=optimizer,
                                    lr=lr)

        loss_list1 = AR_model.diagnostics["loss curve"]

        # ELBO
        ELBO1.append(
            float(
                AR_model.estimate_log_model_evidence(
                    N_ELBO_smpl).detach().numpy()))
        print("PE {}".format(ELBO1[-1]))

        # Mean field
        Qx = [NormalVariable(0., 1., 'x0', learnable=True)]
        Qh = [NormalVariable(0., 1., 'h0', learnable=True)]
        Qz = [NormalVariable(0., 1., 'z0', learnable=True)]

        for t in range(1, T):
            Qx.append(
                NormalVariable(0., driving_noise, x_names[t], learnable=True))
            Qh.append(
                NormalVariable(0., driving_noise, h_names[t], learnable=True))
            Qz.append(
                NormalVariable(0., driving_noise, z_names[t], learnable=True))
Пример #3
0
model.set_posterior_model(ProbabilisticModel(Qpeople_means + Qgroup_means))

# Inference #
N_itr = 300
N_smpl = 50
optimizer = "SGD"
lr = 0.00001
inference.perform_inference(model,
                            number_iterations=N_itr,
                            number_samples=N_smpl,
                            optimizer=optimizer,
                            lr=lr)
loss_list2 = model.diagnostics["loss curve"]

N_ELBO = 1000
ELBO2 = model.estimate_log_model_evidence(1000)

# Structured NN distribution #
hidden_size = 5
latent_size = 5
out_size = N_groups + N_people
Qepsilon = Normal(np.zeros((latent_size, 1)),
                  np.ones((latent_size, )),
                  'epsilon',
                  learnable=True)
W1 = RootVariable(np.random.normal(0, 0.1, (hidden_size, latent_size)),
                  "W1",
                  learnable=True)
W2 = RootVariable(np.random.normal(0, 0.1, (out_size, hidden_size)),
                  "W2",
                  learnable=True)
    encoder_output3 = DeterministicVariable(encoder3(encoder_output2["mean"]), name="encoder_output3")
    Qz1 = NormalVariable(encoder_output3["mean"], encoder_output3["sd"], name="z1")
    model.set_posterior_model(ProbabilisticModel([Qx, Qz1, Qz2, Qz3]))

    # Joint-contrastive inference
    inference.perform_inference(model,
                                inference_method=ReverseKL(gradient_estimator=PathwiseDerivativeEstimator),
                                number_iterations=num_itr,
                                number_samples=1,
                                optimizer="Adam",
                                lr=0.0005)
    loss_list1.append(np.array(model.diagnostics["loss curve"]))

    ELBO1 = []
    for n in range(N_ELBO_ITR):
        ELBO1.append(model.estimate_log_model_evidence(N_ELBO).detach().numpy())
    print("MF ELBO: {} +- {}".format(np.mean(ELBO1), np.std(ELBO1)/np.sqrt(float(N_ELBO_ITR))))

    ## Structered hierarchical model
    # Initialize encoder and decoders
    noise_inpt_size = 50
    encoder1 = BF.BrancherFunction(EncoderArchitecture1(image_size=image_size, latent_size3=latent_size3, noise_inpt_size=noise_inpt_size))
    encoder2 = BF.BrancherFunction(EncoderArchitecture2(latent_size2=latent_size2, latent_size3=latent_size3, noise_inpt_size=noise_inpt_size))
    encoder3 = BF.BrancherFunction(EncoderArchitecture3(latent_size1=latent_size1, latent_size2=latent_size2, noise_inpt_size=noise_inpt_size))

    decoder1 = BF.BrancherFunction(DecoderArchitecture1(latent_size1=latent_size1, latent_size2=latent_size2))
    decoder2 = BF.BrancherFunction(DecoderArchitecture2(latent_size2=latent_size2, latent_size3=latent_size3))
    decoder3 = BF.BrancherFunction(DecoderArchitecture3(latent_size3=latent_size3, image_size=image_size))

    # Generative model
    z1 = NormalVariable(np.zeros((latent_size1,)), z1sd*np.ones((latent_size1,)), name="z1")
Пример #5
0
    # Joint-contrastive inference
    inference.perform_inference(
        model,
        inference_method=ReverseKL(
            gradient_estimator=PathwiseDerivativeEstimator),
        number_iterations=num_itr,
        number_samples=1,
        optimizer="Adam",
        lr=0.0005)
    loss_list1.append(np.array(model.diagnostics["loss curve"]))

    ELBO1 = []
    for n in range(N_ELBO_ITR):
        ELBO1.append(
            model.estimate_log_model_evidence(N_ELBO).detach().numpy())
    print("MF ELBO: {} +- {}".format(
        np.mean(ELBO1),
        np.std(ELBO1) / np.sqrt(float(N_ELBO_ITR))))

    # ## Structered hierarchical model
    # # Initialize encoder and decoders
    # noise_inpt_size = 50
    # encoder1 = BF.BrancherFunction(
    #     EncoderArchitecture1(image_size=image_size, latent_size3=latent_size3, noise_inpt_size=noise_inpt_size))
    # encoder2 = BF.BrancherFunction(
    #     EncoderArchitecture2(latent_size2=latent_size2, latent_size3=latent_size3, noise_inpt_size=noise_inpt_size))
    # encoder3 = BF.BrancherFunction(
    #     EncoderArchitecture3(latent_size1=latent_size1, latent_size2=latent_size2, noise_inpt_size=noise_inpt_size))
    #
    # decoder1 = BF.BrancherFunction(DecoderArchitecture1(latent_size1=latent_size1, latent_size2=latent_size2))
Пример #6
0
            model_output = sum([
                output * w for output, w in zip(model_output_list,
                                                inference_method.weights)
            ])

            output_label = int(np.argmax(model_output))
            scores_ne.append(1 if output_label ==
                             int(test_label.detach().numpy()) else 0)
            s += 1 if output_label == int(test_label.detach().numpy()) else 0
        #print("Accuracy Ensemble: {} %".format(100*s/float(num_images)))

        PELBO = sum([
            w * float(
                model.estimate_log_model_evidence(
                    number_samples=50000,
                    posterior_model=sampler,
                    for_gradient=False).detach().numpy()) for sampler, w in
            zip(inference_method.sampler_model, inference_method.weights)
        ])
        entropy = -sum([
            w * np.log(w) if w > 0. else 0. for w in inference_method.weights
        ])
        print("ELBO: " + str(PELBO + entropy))

        #current_results.append(100*s/float(num_images))
        current_results.append(PELBO + entropy)
    print("Exp {}: {} +- {}".format(
        N, np.mean(current_results),
        np.sqrt(np.var(current_results) / num_repetitions)))
    results.append(current_results)
    errors.append((np.mean(current_results),
Пример #7
0
            ProbabilisticModel(Qpeople_means + Qgroup_means))

        # Inference #
        N_itr = 300
        N_smpl = 50
        optimizer = "SGD"
        lr = 0.00001
        inference.perform_inference(model,
                                    number_iterations=N_itr,
                                    number_samples=N_smpl,
                                    optimizer=optimizer,
                                    lr=lr)
        loss_list2 = model.diagnostics["loss curve"]

        N_ELBO = 1000
        ELBO2 += [model.estimate_log_model_evidence(1000).detach().numpy()]

        # Structured NN distribution #
        hidden_size = 5
        latent_size = 5
        out_size = N_groups + N_people
        Qepsilon = Normal(np.zeros((latent_size, 1)),
                          np.ones((latent_size, )),
                          'epsilon',
                          learnable=True)
        W1 = RootVariable(np.random.normal(0, 0.1, (hidden_size, latent_size)),
                          "W1",
                          learnable=True)
        W2 = RootVariable(np.random.normal(0, 0.1, (out_size, hidden_size)),
                          "W2",
                          learnable=True)
# Joint-contrastive inference
num_itr = 2000
inference.perform_inference(
    model,
    inference_method=ReverseKL(gradient_estimator=PathwiseDerivativeEstimator),
    number_iterations=num_itr,
    number_samples=1,
    optimizer="Adam",
    lr=0.0005)
loss_list1 = model.diagnostics["loss curve"]

N_ELBO = 20
N_ELBO_ITR = 1
ELBO = 0
for n in range(N_ELBO_ITR):
    ELBO += (model.estimate_log_model_evidence(N_ELBO) /
             float(N_ELBO_ITR)).detach().numpy()
print(ELBO)

#
# sigmoid = lambda x: 1/(np.exp(-x) + 1)
# image_grid = []
# z_range = np.linspace(-3, 3, 30)
# for z1a in z_range:
#     image_row = []
#     for z1b in z_range:
#         sample = model.get_sample(1, input_values={z1: np.array([z1a, z1b])})
#         image = sigmoid(np.reshape(sample["decoder_output2"].values[0]["mean"], newshape=(28, 28)))
#         image_row += [image]
#     image_grid += [np.concatenate(image_row, axis=0)]
# image_grid = np.concatenate(image_grid, axis=1)
Пример #9
0
                       x_names[t],
                       learnable=True))
variational_posterior = ProbabilisticModel(Qx)
AR_model.set_posterior_model(variational_posterior)

# Inference #
inference.perform_inference(AR_model,
                            number_iterations=N_itr,
                            number_samples=N_smpl,
                            optimizer=optimizer,
                            lr=lr)

loss_list1 = AR_model.diagnostics["loss curve"]

N_ELBO_smpl = 1000
ELBO1 = AR_model.estimate_log_model_evidence(N_ELBO_smpl)
print("PE {}".format(ELBO1))

# Statistics
posterior_samples1 = AR_model._get_posterior_sample(2000)

x_mean1 = []
lower_bound1 = []
upper_bound1 = []

sigmoid = lambda x: 1 / (1 + np.exp(-x))
lambda_list = [sigmoid(float(l._get_sample(1)[l].detach().numpy())) \
               for l in Qlambda]

for xt in x:
    x_posterior_samples1 = transform(
Пример #10
0
# Inference #
N_iter = 400
n_samples = 10
optimizer = "Adam"
lr = 0.01
inference.perform_inference(AR_model,
                            number_iterations=N_iter,
                            number_samples=n_samples,
                            optimizer=optimizer,
                            lr=lr)

loss_list = AR_model.diagnostics["loss curve"]

# ELBO
ELBO = AR_model.estimate_log_model_evidence(15000)
print("The ELBO is {}".format(ELBO))

# Statistics
posterior_samples = AR_model._get_posterior_sample(2000)
b_posterior_samples = posterior_samples[b].detach().numpy().flatten()
b_mean = np.mean(b_posterior_samples)
b_sd = np.sqrt(np.var(b_posterior_samples))

x_mean = []
lower_bound = []
upper_bound = []
for xt in x:
    x_posterior_samples = posterior_samples[xt].detach().numpy().flatten()
    mean = np.mean(x_posterior_samples)
    sd = np.sqrt(np.var(x_posterior_samples))