示例#1
0
print("start beta", beta, "log likelihood:", log_like_out)

# beta_out = model.solve_for_optimal_beta(verbose=True)
# print("optimal solve complete")
# # print(model.optim_function_state.val_grad_function(beta))
# print("start beta", beta, "likelihood:", np.exp(-log_like_out))
# print("best beta", beta_out, "likelihood:", np.exp(-model.optim_function_state.value))
# print("best beta known", beta_known, "likelihood:", np.exp(-model.eval_log_like_at_new_beta(
#     beta_known)[0]))
# print("best beta incorrect", model.get_beta_vec())
#
import seaborn as sns
sns.set()
import matplotlib.pyplot as plt
beta = np.arange(-0.1, -1, -0.05)
like = np.zeros(len(beta))
log_like = np.zeros(len(beta))

for i in range(len(beta)):
    like[i] = np.exp(-model.eval_log_like_at_new_beta(beta[i])[0])
    log_like[i] = -model.eval_log_like_at_new_beta(beta[i])[0]
plt.scatter(beta, log_like, label=rf'$\beta_{{true}} = {beta_known}$')
plt.legend()

print("actually generated with beta= ", beta_known, "like = ", np.exp(
    -model.eval_log_like_at_new_beta(beta_known)[0]))

plt.show()
# model.update_beta_vec(np.array([-16]))
# print(model.get_log_likelihood())
# ll2, grad2 = model.get_log_likelihood()
# print("LL2:", ll2, grad2)
# print("finite difference:", (ll2- log_like_out)/h)
#
#
# ls_out = model.solve_for_optimal_beta()
# print(model.optim_function_state.val_grad_function(beta))
# =======================================================
print(120 * "=", 'redo with scipy')
optimiser = optimisers.ScipyOptimiser(method='bfgs')

model = RecursiveLogitModelEstimation(network_struct,
                                      observations_record=obs_ak,
                                      initial_beta=beta_vec,
                                      mu=1,
                                      optimiser=optimiser)
log_like_out, grad_out = model.get_log_likelihood()
print("start beta", beta, "log likelihood:", log_like_out)

beta_out = model.solve_for_optimal_beta(verbose=True)
# print(model.optim_function_state.val_grad_function(beta))
print("start beta", beta, "log likelihood:", log_like_out)
print("best beta", beta_out, "log likelihood:",
      model.optim_function_state.value)
print("best beta known", beta_known, "log likelihood:",
      model.eval_log_like_at_new_beta(beta_known)[0])
print("best beta incorrect", model.get_beta_vec())

# model.update_beta_vec(np.array([-16]))
# print(model.get_log_likelihood())