def CVpolyOne(traj, traj_grad, f_target, params, W_spec): n, d = traj.shape if f_target == "sum": samples = traj.sum(axis=1).reshape(-1, 1) elif f_target == "sum_comps": samples = traj[:, params["ind"]].reshape(-1, 1) elif f_target == "sum_comps_squared": samples = np.square(traj[:, params["ind"]]).reshape(-1, 1) elif f_target == "sum_squared": samples = np.square(traj).sum(axis=1).reshape(-1, 1) elif f_target == "sum_4th": samples = ((traj)**4).sum(axis=1).reshape(-1, 1) elif f_target == "exp_sum": samples = np.exp(traj.sum(axis=1)).reshape(-1, 1) else: traj = np.expand_dims(traj, axis=0) samples = set_function(f_target, traj, [0], params) traj = traj[0] samples = samples[0] #covariance = np.cov(np.concatenate((traj, samples), axis=1), rowvar=False) #paramCV1 = covariance[:d, d:] paramCV1 = ( np.transpose(traj) @ (samples - np.mean(samples))) / traj.shape[0] print("CV1: ", paramCV1) CV1 = samples - np.dot(traj_grad, paramCV1) mean_CV1 = np.mean(CV1, axis=0) var_CV1 = Spectral_var(CV1[:, 0], W_spec) return mean_CV1, var_CV1
def Eval_ZVCV(traj, traj_grad, f_target, params, W_spec): if f_target == "sum": samples = traj.sum(axis=1).reshape(-1, 1) elif f_target == "sum_comps": samples = traj[:, params["ind"]].reshape(-1, 1) elif f_target == "sum_comps_squared": samples = np.square(traj[:, params["ind"]]).reshape(-1, 1) elif f_target == "sum_squared": samples = np.square(traj).sum(axis=1).reshape(-1, 1) elif f_target == "sum_4th": samples = ((traj)**4).sum(axis=1).reshape(-1, 1) elif f_target == "exp_sum": samples = np.exp(traj.sum(axis=1)).reshape(-1, 1) else: traj = np.expand_dims(traj, axis=0) samples = set_function(f_target, traj, [0], params) traj = traj[0] samples = samples[0] mean_vanilla = np.mean(samples) vars_vanilla = Spectral_var(samples[:, 0], W_spec) mean_ZV1, var_ZV1 = ZVpolyOne(traj, traj_grad, f_target, params, W_spec) mean_ZV2, var_ZV2 = ZVpolyTwo(traj, traj_grad, f_target, params, W_spec) #mean_CV1, var_CV1 = CVpolyOneUpdated(traj,traj_grad,f_target,params,W_spec) mean_CV1, var_CV1 = CVpolyOne(traj, traj_grad, f_target, params, W_spec) mean_CV2, var_CV2 = CVpolyTwo(traj, traj_grad, f_target, params, W_spec) return (mean_vanilla, mean_ZV1, mean_ZV2, mean_CV1, mean_CV2), (vars_vanilla, var_ZV1, var_ZV2, var_CV1, var_CV2)
def ZVpolyOne(traj, traj_grad, f_target, params, W_spec): n, d = traj.shape if f_target == "sum": samples = traj.sum(axis=1).reshape(-1, 1) elif f_target == "sum_comps": samples = traj[:, params["ind"]].reshape(-1, 1) elif f_target == "sum_comps_squared": samples = np.square(traj[:, params["ind"]]).reshape(-1, 1) elif f_target == "sum_squared": samples = np.square(traj).sum(axis=1).reshape(-1, 1) elif f_target == "sum_4th": samples = ((traj)**4).sum(axis=1).reshape(-1, 1) elif f_target == "exp_sum": samples = np.exp(traj.sum(axis=1)).reshape(-1, 1) else: traj = np.expand_dims(traj, axis=0) samples = set_function(f_target, traj, [0], params) traj = traj[0] samples = samples[0] cov1 = np.cov(traj_grad, rowvar=False) if d == 1: A = 1 / cov1 else: A = np.linalg.inv(cov1) covariance = np.cov(np.concatenate((-traj_grad, samples), axis=1), rowvar=False) paramZV1 = -np.dot(A, covariance[:d, d:]) #print("ZV1: ",paramZV1) ZV1 = samples - np.dot(traj_grad, paramZV1) mean_ZV1 = np.mean(ZV1, axis=0) var_ZV1 = Spectral_var(ZV1[:, 0], W_spec) return mean_ZV1, var_ZV1
def CVpolyGaussian(traj, traj_grad, f_target, params, W_spec): n, d = traj.shape if f_target == "sum": samples = traj.sum(axis=1).reshape(-1, 1) elif f_target == "sum_comps": samples = traj[:, params["ind"]].reshape(-1, 1) elif f_target == "sum_squared": samples = np.square(traj).sum(axis=1).reshape(-1, 1) elif f_target == "sum_comps_squared": samples = np.square(traj[:, params["ind"]]).reshape(-1, 1) elif f_target == "sum_4th": samples = ((traj)**4).sum(axis=1).reshape(-1, 1) elif f_target == "exp_sum": samples = np.exp(traj.sum(axis=1)).reshape(-1, 1) else: traj = np.expand_dims(traj, axis=0) samples = set_function(f_target, traj, [0], params) traj = traj[0] samples = samples[0] #jac,delta = TryCV(traj,samples) jac, delta = GausCV(traj, samples) #print(jac.shape) #print(delta.shape) CV = samples - np.sum(traj_grad * jac, axis=1).reshape((-1, 1)) + delta #CV = -np.sum(traj_grad*jac, axis = 1).reshape((-1,1)) + delta.reshape((-1,1)) mean_CV = np.mean(CV, axis=0) #print(mean_CV) var_CV = Spectral_var(CV[:, 0], W_spec) return mean_CV, var_CV
def CVpolyOneGaussian(traj, traj_grad, f_target, params, W_spec): """ Version of CV's with family of $\psi$ given by 2-dimensional gaussians """ n, d = traj.shape if f_target == "sum": samples = traj.sum(axis=1).reshape(-1, 1) elif f_target == "sum_comps": samples = traj[:, params["ind"]].reshape(-1, 1) elif f_target == "sum_squared": samples = np.square(traj).sum(axis=1).reshape(-1, 1) elif f_target == "sum_4th": samples = ((traj)**4).sum(axis=1).reshape(-1, 1) elif f_target == "exp_sum": samples = np.exp(traj.sum(axis=1)).reshape(-1, 1) else: traj = np.expand_dims(traj, axis=0) samples = set_function(f_target, traj, [0], params) traj = traj[0] samples = samples[0] print(samples.shape) print(traj.shape) covariance = np.cov(np.concatenate((traj, samples), axis=1), rowvar=False) paramCV1 = covariance[:d, d:] CV1 = samples - np.dot(traj_grad, paramCV1) mean_CV1 = np.mean(CV1, axis=0) var_CV1 = Spectral_var(CV1[:, 0], W_spec) return mean_CV1, var_CV1
def Run_eval_test(intseed,method,vars_arr,Potential,W_spec,CV_dict,step,N,n,d,params_test = None, f_type = "posterior_mean"): """ generic function that runs a MCMC trajectory and computes means and variances for the ordinary samples, ESVM, EVM and LS-adjusted trajectories """ sampler_type = method["sampler"] burn_type = method["burn_type"] main_type = method["main_type"] if sampler_type == "ULA": traj,traj_grad = ULA(intseed,Potential,step, N, n, d, burn_type, main_type) elif sampler_type == "MALA": traj,traj_grad,n_accepted = MALA(intseed,Potential,step,N,n,d, burn_type, main_type) elif sampler_type == "RWM": traj,traj_grad,n_accepted = RWM(intseed,Potential,step,N,n,d) else: raise "Not implemented error: choose ULA, MALA or RWM as sampler" #lists to save the results of the trajectory ints_all = [] vars_all = [] #initialize function values f_vals = set_function(f_type,[traj],vars_arr,params_test) #kill dimension which is not needed f_vals = f_vals[0] integrals,vars_spec = Eval_samples("Vanilla",f_vals,traj,traj_grad,1,W_spec,n,d,vars_arr) #usual samples, without variance reduction ints_all.append(integrals) vars_all.append(vars_spec) if CV_dict["ESVM"] != None: A_ZAV_1 = CV_dict["ESVM"][0] A_ZAV_2 = CV_dict["ESVM"][1] integrals,vars_spec = Eval_samples("1st_order",f_vals,traj,traj_grad,A_ZAV_1,W_spec,n,d,vars_arr) #CV - polynomials of degree 1, ESVM estimator ints_all.append(integrals) vars_all.append(vars_spec) integrals,vars_spec = Eval_samples("2nd_order",f_vals,traj,traj_grad,A_ZAV_2,W_spec,n,d,vars_arr) #CV - polynomials of degree 2, ESVM estimator ints_all.append(integrals) vars_all.append(vars_spec) if CV_dict["EVM"] != None: A_ZV_1 = CV_dict["EVM"][0] A_ZV_2 = CV_dict["EVM"][1] integrals,vars_spec = Eval_samples("1st_order",f_vals,traj,traj_grad,A_ZV_1,W_spec,n,d,vars_arr) #CV - polynomials of degree 1, EVM estimator ints_all.append(integrals) vars_all.append(vars_spec) integrals,vars_spec = Eval_samples("2nd_order",f_vals,traj,traj_grad,A_ZV_2,W_spec,n,d,vars_arr) #CV - polynomials of degree 2, EVM estimator ints_all.append(integrals) vars_all.append(vars_spec) if CV_dict["LS"] != None: A_LS_1 = CV_dict["LS"][0] A_LS_2 = CV_dict["LS"][1] integrals,vars_spec = Eval_samples("1st_order",f_vals,traj,traj_grad,A_LS_1,W_spec,n,d,vars_arr) #CV - polynomials of degree 1, LS estimator ints_all.append(integrals) vars_all.append(vars_spec) integrals,vars_spec = Eval_samples("2nd_order",f_vals,traj,traj_grad,A_LS_2,W_spec,n,d,vars_arr) #CV - polynomials of degree 2, LS estimator ints_all.append(integrals) vars_all.append(vars_spec) ints_all = np.asarray(ints_all) vars_all = np.asarray(vars_all) return ints_all,vars_all
def Run_eval_test(intseed, degree, sampler, methods, vars_arr, Potential, test_dict, CV_dict, params_test, f_type): """ New version of the main function; Runs MCMC trajectory, computes means and variances for vanilla and adjusted samples """ sampler_type = sampler["sampler"] burn_type = sampler["burn_type"] main_type = sampler["main_type"] W_spec = test_dict["W"] step = test_dict["step"] N_burn = test_dict["burn_in"] N_test = test_dict["n_test"] d = test_dict["dim"] if sampler_type == "ULA": traj, traj_grad = ULA(intseed, Potential, step, N_burn, N_test, d, burn_type, main_type) elif sampler_type == "MALA": traj, traj_grad, n_accepted = MALA(intseed, Potential, step, N_burn, N_test, d, burn_type, main_type) elif sampler_type == "RWM": traj, traj_grad, n_accepted = RWM(intseed, Potential, step, N_burn, N_test, d) else: #independent samples, pure Monte-Carlo case traj, traj_grad = MC_sampler( intseed, Potential, N_test, d) #note that there is no burn-in period needed here #lists to save the results of the trajectory res_dict = {"Vanilla": [], "ESVM": [], "EVM": [], "LS": [], "MAX": []} vars_dict = {"Vanilla": [], "ESVM": [], "EVM": [], "LS": [], "MAX": []} #initialize function values f_vals = set_function(f_type, [traj], vars_arr, params_test) #kill dimension which is not needed f_vals = f_vals[0] integrals, vars_spec = eval_samples( "Vanilla", f_vals, traj, traj_grad, 1, W_spec, N_test, d, vars_arr) #usual samples, without variance reduction res_dict["Vanilla"].append(integrals) vars_dict["Vanilla"].append(vars_spec) #set flag based upon the polynomial degree if degree == 2: flag = "2nd_order" else: flag = "kth_order" #main loop for ind in range(len(methods)): Coef_matr = CV_dict[methods[ind]] integrals, vars_spec = eval_samples(flag, f_vals, traj, traj_grad, Coef_matr, W_spec, N_test, d, vars_arr) res_dict[methods[ind]].append(integrals) vars_dict[methods[ind]].append(vars_spec) return res_dict, vars_dict
def CVpolyTwo(traj, traj_grad, f_target, params, W_spec): n, d = traj.shape if f_target == "sum": samples = traj.sum(axis=1).reshape(-1, 1) elif f_target == "sum_comps": samples = traj[:, params["ind"]].reshape(-1, 1) elif f_target == "sum_comps_squared": samples = np.square(traj[:, params["ind"]]).reshape(-1, 1) elif f_target == "sum_squared": samples = np.square(traj).sum(axis=1).reshape(-1, 1) elif f_target == "sum_4th": samples = ((traj)**4).sum(axis=1).reshape(-1, 1) elif f_target == "exp_sum": samples = np.exp(traj.sum(axis=1)).reshape(-1, 1) else: traj = np.expand_dims(traj, axis=0) samples = set_function(f_target, traj, [0], params) traj = traj[0] samples = samples[0] poisson = np.zeros((n, int(d * (d + 3) / 2))) poisson[:, np.arange(d)] = traj poisson[:, np.arange(d, 2 * d)] = np.multiply(traj, traj) k = 2 * d for j in np.arange(d - 1): for i in np.arange(j + 1, d): poisson[:, k] = np.multiply(traj[:, i], traj[:, j]) k = k + 1 Lpoisson = np.zeros((n, int(d * (d + 3) / 2))) Lpoisson[:, np.arange(d)] = -traj_grad Lpoisson[:, np.arange(d, 2 * d)] = 2 * (1. - np.multiply(traj, traj_grad)) k = 2 * d for j in np.arange(d - 1): for i in np.arange(j + 1, d): Lpoisson[:,k] = -np.multiply(traj_grad[:,i], traj[:,j]) \ -np.multiply(traj_grad[:,j], traj[:,i]) k = k + 1 cov1 = np.cov(np.concatenate((poisson, -Lpoisson), axis=1), rowvar=False) A = np.linalg.inv(cov1[0:int(d * (d + 3) / 2), int(d * (d + 3) / 2):d * (d + 3)]) cov2 = np.cov(np.concatenate((poisson, samples), axis=1), rowvar=False) B = cov2[0:int(d * (d + 3) / 2), int(d * (d + 3) / 2):] paramCV2 = np.dot(A, B) CV2 = samples + np.dot(Lpoisson, paramCV2) mean_CV2 = np.mean(CV2, axis=0) var_CV2 = Spectral_var(CV2[:, 0], W_spec) return mean_CV2, var_CV2
def test_traj(Potential, coefs_poly_regr, step, r_seed, lag, K_max, S_max, N_burn, N_test, d, f_type, inds_arr, params, x0, fixed_start): """ """ X_test, Noise = ULA_light(r_seed, Potential, step, N_burn, N_test, d, return_noise=True, x0=x0, fixed_start=fixed_start) print(X_test[0]) Noise = Noise.T test_stat_vanilla = np.zeros(N_test, dtype=float) test_stat_vr = np.zeros_like(test_stat_vanilla) #compute number of basis polynomials num_basis_funcs = (K_max + 1)**d #print("number of basis functions = ",num_basis_funcs) #compute polynomials of noise variables Z_l poly_vals = np.zeros((num_basis_funcs, N_test), dtype=float) for k in range(len(poly_vals)): poly_vals[k, :] = eval_hermite(k, Noise, K_max) #print(poly_vals.shape) #initialize function #f_vals_vanilla = np.sum(X_test,axis=1) #f_vals_vanilla = X_test[:,0] f_vals_vanilla = set_function(f_type, np.expand_dims(X_test, axis=0), inds_arr, params) f_vals_vanilla = f_vals_vanilla[0, :, 0] cvfs = np.zeros_like(f_vals_vanilla) st_norm_moments = init_moments(K_max + S_max + 1) table_coefs = init_basis_polynomials(K_max, S_max, st_norm_moments, step) #print(table_coefs.shape) start_time = time.time() for i in range(1, len(cvfs)): #start computing a_{p-l} coefficients num_lags = min(lag, i) a_vals = np.zeros((num_lags, num_basis_funcs), dtype=float) #control variates for func_order in range(num_lags): #for a fixed lag Q function #compute \hat{a} with fixed lag x = X_test[i - 1 - func_order] x_next = x + step * Potential.gradpotential(x) for k in range(1, num_basis_funcs): a_cur = np.ones(coefs_poly_regr.shape[1], dtype=float) for s in range(len(a_cur)): k_vect, s_vect = get_representations(k, s, d, K_max) #print("K = ",k_vect) #print("S = ",s_vect) for dim_ind in range(d): a_cur[s] = a_cur[s] * P.polynomial.polyval( x_next[dim_ind], table_coefs[k_vect[dim_ind], s_vect[dim_ind], :]) a_vals[-(func_order + 1), k] = np.dot(a_cur, coefs_poly_regr[func_order, :]) #OK, now I have coefficients of the polynomial, and I need to integrate it w.r.t. Gaussian measure #print("sum of coefficients",np.sum(np.abs(a_vals))) #print(a_vals) cvfs[i] = np.sum(a_vals * (poly_vals[:, i - num_lags + 1:i + 1].T)) #save results test_stat_vanilla[i] = np.mean(f_vals_vanilla[1:(i + 1)]) test_stat_vr[i] = test_stat_vanilla[i] - np.sum(cvfs[1:(i + 1)]) / i end_time = time.time() - start_time return test_stat_vanilla, test_stat_vr