def laplace_z(calc): m = minimize(pot_value, xd, args=calc, method='L-BFGS-B', jac=True, options={ 'disp': False }).x # A = pot_hess(m) A = calc.hessian(m) L = np.linalg.cholesky(A) half_log_det_A = np.sum(np.log(np.diag(L))) return -pot_value(m, calc)[0] + lap_c1 - half_log_det_A
f = minimize(pot_value, g, args=calc, method='L-BFGS-B', jac=True, options={'disp': False}) if (f.fun < f_val): f_val = f.fun m = f.x # Estimate likelihood with Laplace approximation # A = pot_hess(m) A = calc.hessian(m) L = np.linalg.cholesky(A) half_log_det_A = np.sum(np.log(np.diag(L))) lap = -pot_value(m, calc)[0] + lap_c1 - half_log_det_A like_values[i, j] = -lap - pot_value(xd, calc)[0] except: print("exception encountered in the try block") None # If minimize fails set value to previous, otherwise update previous if like_values[i, j] == 0: like_values[i, j] = last_like else: last_like = like_values[i, j] # Output results idx = np.unravel_index(like_values.argmax(), like_values.shape) print("Fitted alpha and beta values:") print(XX[idx], YY[idx] * 2. / 1.4e6, like_values[idx])
# MCMC tuning parameters L = 10 #Number of leapfrog steps eps = 0.1 #Leapfrog step size # Set-up MCMC mcmc_n = 1000 temp_n = 5 inverse_temps = np.array([1., 1. / 2., 1. / 4., 1. / 8., 1. / 16.]) samples = np.empty((mcmc_n, mm)) # X-values #Initialize MCMC xx = -np.log(mm) * np.ones((temp_n, mm)) V = np.empty(temp_n) gradV = np.empty((temp_n, mm)) for j in range(temp_n): V[j], gradV[j] = pot_value(xx[j], calc) # Counts to keep track of accept rates ac = np.zeros(temp_n) pc = np.zeros(temp_n) acs = 0 pcs = 1 tic = timeit.default_timer() # MCMC algorithm for i in range(mcmc_n): for j in range(temp_n): #Initialize leapfrog integrator for HMC proposal p = np.random.normal(0., 1., mm) # fixed Multivariate Standard Gaussian