Ejemplo n.º 1
0

def gradient_step(i, state, model):
    params = get_params(state)
    model.prior.hyp = params[0]
    model.likelihood.hyp = params[1]
    # neg_log_marg_lik, gradients = model.run_model()
    neg_log_marg_lik, gradients = model.neg_log_marg_lik()
    print(
        'iter %2d: var_f=%1.2f len_f=%1.2f, nlml=%2.2f' %
        (i, softplus(params[0][0]), softplus(params[0][1]), neg_log_marg_lik))
    return opt_update(i, gradients, state)


for j in range(3):
    neg_log_marg_lik, gradients = sde_gp_model.run()
print(neg_log_marg_lik)

params = [sde_gp_model.prior.hyp, sde_gp_model.likelihood.hyp]
neg_log_marg_lik, dlZ = value_and_grad(sde_gp_model.kalman_filter, argnums=2)(
    sde_gp_model.y_train, sde_gp_model.dt_train, params, False, False, None,
    sde_gp_model.sites.site_params)
print(neg_log_marg_lik)
print(dlZ)

# print('optimising the hyperparameters ...')
# t0 = time.time()
# for j in range(20):
#     opt_state = gradient_step(j, opt_state, sde_gp_model)
# t1 = time.time()
# print('optimisation time: %2.2f secs' % (t1-t0))
Ejemplo n.º 2
0
    inf_method = approx_inf.EP(power=0.01, intmethod='GH')

elif method == 8:
    inf_method = approx_inf.VI(intmethod='UT')
elif method == 9:
    inf_method = approx_inf.VI(intmethod='GH')

model = SDEGP(prior=prior,
              likelihood=lik,
              t=x_train,
              y=y_train,
              t_test=x_test,
              y_test=y_test,
              approx_inf=inf_method)

neg_log_marg_lik, gradients = model.run()
print(gradients)
neg_log_marg_lik, gradients = model.run()
print(gradients)
neg_log_marg_lik, gradients = model.run()
print(gradients)

print('optimising the hyperparameters ...')
time_taken = np.zeros([10, 1])
for j in range(10):
    t0 = time.time()
    neg_log_marg_lik, gradients = model.run()
    print(gradients)
    t1 = time.time()
    time_taken[j] = t1 - t0
    print('optimisation time: %2.2f secs' % (t1 - t0))
Ejemplo n.º 3
0
    # neg_log_marg_lik, gradients = mod.run_two_stage()  # <-- less elegant but reduces compile time
    print(
        'iter %2d: var_f=%1.2f len_f=%1.2f, nlml=%2.2f' %
        (i, softplus(params[0][0]), softplus(params[0][1]), neg_log_marg_lik))
    return opt_update(i, gradients, state)


# print('optimising the hyperparameters ...')
# t0 = time.time()
# for j in range(20):
#     opt_state = gradient_step(j, opt_state, sde_gp_model_1)
# t1 = time.time()
# print('optimisation time: %2.2f secs' % (t1-t0))

for i in range(5):
    model_1.run()
    model_2.run()

# calculate posterior predictive distribution via filtering and smoothing at train & test locations:
print('calculating the posterior predictive distribution ...')
t0 = time.time()
posterior_mean_1, posterior_var_1, _, nlpd1 = model_1.predict()
posterior_mean_2, posterior_var_2, _, nlpd2 = model_2.predict()
t1 = time.time()
print('prediction time: %2.2f secs' % (t1 - t0))
print(model_1.sites.site_params[0][100] - model_2.sites.site_params[0][100])
print(posterior_mean_1 - posterior_mean_2)

lb_1 = posterior_mean_1[:, 0] - 1.96 * posterior_var_1[:, 0]**0.5
ub_1 = posterior_mean_1[:, 0] + 1.96 * posterior_var_1[:, 0]**0.5
lb_2 = posterior_mean_2[:, 0] - 1.96 * posterior_var_2[:, 0]**0.5