def Feynman_diagrams(phi_t, R, Delta, t, N): # Prepare the stuff for the case of maxent or finite t if not np.isfinite(t): G = len(phi_t) alpha = Delta._kernel_dim # Evaluate propagator matrix Delta_sparse = Delta.get_sparse_matrix() Delta_mat = Delta_sparse.todense() * (N / G) Delta_diagonalized = np.linalg.eigh(Delta_mat) kernel_basis = np.zeros([G, alpha]) for i in range(alpha): kernel_basis[:, i] = Delta_diagonalized[1][:, i].ravel() M_mat = diags(sp.exp(-phi_t), 0).todense() * (N / G) M_mat_on_kernel = sp.mat(kernel_basis).T * M_mat * sp.mat(kernel_basis) M_inv_on_kernel = sp.linalg.inv(M_mat_on_kernel) P_mat = sp.mat(kernel_basis) * M_inv_on_kernel * sp.mat(kernel_basis).T # Evaluate vertex vector V = sp.exp(-phi_t) * (N / G) else: G = len(phi_t) # Evaluate propagator matrix H = deft_core.hessian(phi_t, R, Delta, t, N) A_mat = H.todense() * (N / G) P_mat = np.linalg.inv(A_mat) # Evaluate vertex vector V = sp.exp(-phi_t) * (N / G) # Calculate Feynman diagrams correction = diagrams_1st_order(G, P_mat, V) # Return the correction and other stuff w_sample_mean = 1.0 w_sample_mean_std = 0.0 return correction, w_sample_mean, w_sample_mean_std
def Metropolis_Monte_Carlo(phi_t, R, Delta, t, N, num_samples, go_parallel, pt_sampling): G = len(phi_t) num_thermalization_steps = 10 * G num_steps_per_sample = G phi_samples = np.zeros([G, num_samples]) sample_index = 0 # Prepare the stuff for the case of maxent or finite t, and then do Monte Carlo sampling if not np.isfinite(t): # Find the kernel basis alpha = Delta._kernel_dim Delta_sparse = Delta.get_sparse_matrix() Delta_mat = Delta_sparse.todense() * (N / G) Delta_diagonalized = np.linalg.eigh(Delta_mat) kernel_basis = np.zeros([G, alpha]) for i in range(alpha): kernel_basis[:, i] = Delta_diagonalized[1][:, i].ravel() # Find coefficients of phi_t in the kernel basis coeffs = np.zeros(alpha) for i in range(alpha): coeffs[i] = sp.mat(kernel_basis[:, i]) * sp.mat(phi_t).T # Find eigen-modes of the Hessian matrix H = maxent.hessian_per_datum_from_coeffs(coeffs, R, kernel_basis) A_mat = sp.mat(H) * N U_mat = np.linalg.eigh(A_mat) eig_vals = np.abs(sp.array(U_mat[0])) eig_vecs = np.abs(sp.array(U_mat[1])) # Initialize coeffs_current = coeffs S_current = maxent.action_per_datum_from_coeffs(coeffs_current, R, kernel_basis) * N # Do Monte Carlo sampling for k in range( num_thermalization_steps + num_samples * num_steps_per_sample + 1): i = np.random.randint(0, alpha) eig_val = eig_vals[i] eig_vec = eig_vecs[i, :] step_size = np.random.normal(0, 1.0 / np.sqrt(eig_val)) coeffs_new = coeffs_current + eig_vec * step_size S_new = maxent.action_per_datum_from_coeffs(coeffs_new, R, kernel_basis) * N if np.log(np.random.uniform(0, 1)) < (S_current - S_new): coeffs_current = coeffs_new S_current = S_new if (k > num_thermalization_steps) and ( k % num_steps_per_sample == 0): phi_samples[:, sample_index] = maxent.coeffs_to_field( coeffs_current, kernel_basis) sample_index += 1 else: # Find eigen-modes of the Hessian matrix H = deft_core.hessian(phi_t, R, Delta, t, N) A_mat = H.todense() * (N / G) U_mat = np.linalg.eigh(A_mat) eig_vals = np.abs(sp.array(U_mat[0])) eig_vecs = np.abs(sp.array(U_mat[1])) # Initialize phi_current = phi_t S_current = deft_core.action(phi_current, R, Delta, t, N) * (N / G) # Do Monte Carlo sampling for k in range( num_thermalization_steps + num_samples * num_steps_per_sample + 1): i = np.random.randint(0, G) eig_val = eig_vals[i] eig_vec = eig_vecs[:, i] step_size = np.random.normal(0, 1.0 / np.sqrt(eig_val)) phi_new = phi_current + eig_vec * step_size S_new = deft_core.action(phi_new, R, Delta, t, N) * (N / G) if np.log(np.random.uniform(0, 1)) < (S_current - S_new): phi_current = phi_new S_current = S_new if (k > num_thermalization_steps) and ( k % num_steps_per_sample == 0): phi_samples[:, sample_index] = phi_current sample_index += 1 # Return phi samples and phi weights return phi_samples, np.ones(num_samples)
def Laplace_approach(phi_t, R, Delta, t, N, num_samples, go_parallel, pt_sampling=False): # Prepare the stuff for the case of maxent or finite t if not np.isfinite(t): G = len(phi_t) alpha = Delta._kernel_dim Delta_sparse = Delta.get_sparse_matrix() Delta_mat = Delta_sparse.todense() * (N / G) Delta_diagonalized = np.linalg.eigh(Delta_mat) kernel_basis = np.zeros([G, alpha]) for i in range(alpha): kernel_basis[:, i] = Delta_diagonalized[1][:, i].ravel() M_mat = diags(sp.exp(-phi_t), 0).todense() * (N / G) M_mat_on_kernel = sp.mat(kernel_basis).T * M_mat * sp.mat(kernel_basis) U_mat_on_kernel = np.linalg.eigh(M_mat_on_kernel) # Below are what will be used y_dim = alpha eig_vals = np.abs(sp.array(U_mat_on_kernel[0])) transf_matrix = sp.mat(kernel_basis) * U_mat_on_kernel[1] lambdas = sp.exp(-phi_t) * (N / G) else: G = len(phi_t) H = deft_core.hessian(phi_t, R, Delta, t, N) # H = deft_code.deft_core.hessian(phi_t, R, Delta, t, N) A_mat = H.todense() * (N / G) U_mat = np.linalg.eigh(A_mat) # Below are what will be used y_dim = G eig_vals = np.abs(sp.array(U_mat[0])) transf_matrix = U_mat[1] lambdas = sp.exp(-phi_t) * (N / G) # If requested to go parallel, set up a pool of workers for parallel computation if go_parallel: num_cores = mp.cpu_count() pool = mp.Pool(processes=num_cores) # For each eigen-component, draw y samples according to the distribution if go_parallel: inputs = itertools.izip(itertools.repeat(num_samples), eig_vals) outputs = pool.map(y_sampling_of_Lap, inputs) y_samples = sp.array(outputs) else: y_samples = np.zeros([y_dim, num_samples]) for i in range(y_dim): inputs = [num_samples, eig_vals[i]] outputs = y_sampling_of_Lap(inputs) y_samples[i, :] = outputs # Transform y samples to x samples x_samples = sp.array(transf_matrix * sp.mat(y_samples)) for i in range(G): x_vec = x_samples[i, :] x_vec[x_vec < x_MIN] = x_MIN # Shift x samples to get phi samples phi_samples = np.zeros([G, num_samples]) for k in range(num_samples): phi_samples[:, k] = x_samples[:, k] + phi_t # Calculate the weight of each sample x_combo = sp.exp(-x_samples) - np.ones( [G, num_samples]) + x_samples - 0.5 * np.square(x_samples) dS_vals = sp.array(sp.mat(lambdas) * sp.mat(x_combo)).ravel() phi_weights = sp.exp(-dS_vals) # If called from posterior sampling, return phi samples along with their weights at this point if pt_sampling: return phi_samples, phi_weights # Calculate sample mean and sample mean std w_sample_mean = sp.mean(phi_weights) w_sample_mean_std = sp.std(phi_weights) / sp.sqrt(num_samples) # Return correction and other stuff correction = sp.log(w_sample_mean) return correction, w_sample_mean, w_sample_mean_std