コード例 #1
0
data_list = [distances]
network_struct = ModelDataStruct(data_list, incidence_mat,
                                          data_array_names_debug=("distances",))

beta = -5
beta_vec = np.array([beta])  # 4.96 diverges
optimiser = optimisers.LineSearchOptimiser(optimisers.OptimHessianType.BFGS, max_iter=40)

model = RecursiveLogitModelEstimation(network_struct, observations_record=obs_ak,
                                      initial_beta=beta_vec, mu=1,
                                      optimiser=optimiser)
log_like_out, grad_out = model.get_log_likelihood()
print("LL1:", log_like_out, grad_out)
h = 0.0002
model.update_beta_vec([beta + h])
ll2, grad2 = model.get_log_likelihood()
print("LL2:", ll2, grad2)
print("finite difference:", (ll2 - log_like_out) / h)

ls_out = model.solve_for_optimal_beta()
print(model.optim_function_state.val_grad_function(beta))
# =======================================================
print(120 * "=", 'redo with scipy')
optimiser = optimisers.ScipyOptimiser(method='bfgs')

model = RecursiveLogitModelEstimation(network_struct, observations_record=obs_ak,
                                      initial_beta=beta_vec, mu=1,
                                      optimiser=optimiser)
log_like_out, grad_out = model.get_log_likelihood()
print("LL1:", log_like_out, grad_out)
コード例 #2
0
# DATA

beta = -2
beta_vec = np.array([beta])  # 4.96 diverges
optimiser = optimisers.LineSearchOptimiser(optimisers.OptimHessianType.BFGS,
                                           max_iter=40)

model = RecursiveLogitModelEstimation(network_struct,
                                      observations_record=obs_ak,
                                      initial_beta=beta_vec,
                                      mu=1,
                                      optimiser=optimiser)
log_like_out, grad_out = model.get_log_likelihood()
print("LL1:", log_like_out, grad_out)
h = 0.0002
model.update_beta_vec([beta + h])
ll2, grad2 = model.get_log_likelihood()
print("LL2:", ll2, grad2)
print("finite difference:", (ll2 - log_like_out) / h)

ls_out = model.solve_for_optimal_beta()

print(model.optim_function_state.val_grad_function(beta))

# out =scopt.minimize_scalar(lambda x: model.optim_function_state.function(x)[0],
#                       )
out_bfgs = scopt.minimize(
    lambda x: model.optim_function_state.val_grad_function(x)[0],
    x0=np.array([-5]),
    method='BFGS',
    jac=lambda x: model.optim_function_state.val_grad_function(x)[1],