def CVpolyOne(traj, traj_grad, f_target, params, W_spec): n, d = traj.shape if f_target == "sum": samples = traj.sum(axis=1).reshape(-1, 1) elif f_target == "sum_comps": samples = traj[:, params["ind"]].reshape(-1, 1) elif f_target == "sum_comps_squared": samples = np.square(traj[:, params["ind"]]).reshape(-1, 1) elif f_target == "sum_squared": samples = np.square(traj).sum(axis=1).reshape(-1, 1) elif f_target == "sum_4th": samples = ((traj)**4).sum(axis=1).reshape(-1, 1) elif f_target == "exp_sum": samples = np.exp(traj.sum(axis=1)).reshape(-1, 1) else: traj = np.expand_dims(traj, axis=0) samples = set_function(f_target, traj, [0], params) traj = traj[0] samples = samples[0] #covariance = np.cov(np.concatenate((traj, samples), axis=1), rowvar=False) #paramCV1 = covariance[:d, d:] paramCV1 = ( np.transpose(traj) @ (samples - np.mean(samples))) / traj.shape[0] print("CV1: ", paramCV1) CV1 = samples - np.dot(traj_grad, paramCV1) mean_CV1 = np.mean(CV1, axis=0) var_CV1 = Spectral_var(CV1[:, 0], W_spec) return mean_CV1, var_CV1
def eval_samples(typ, f_vals, X, X_grad, A, W_spec, n, d, vars_arr): """Universal function to evaluate MCMC samples with ot without control functionals Args: typ - one of "Vanilla", "1st_order","2nd_order","kth_order" ... Returns: ... """ if typ not in ["Vanilla", "1st_order", "2nd_order", "kth_order"]: raise "Not implemented error in EvalSamples" n_vars = len(vars_arr) integrals = np.zeros(n_vars, dtype=np.float64) vars_spec = np.zeros_like(integrals) var_counter = 0 for ind in range(len(vars_arr)): #spectral estimate for variance if typ == "Vanilla": Y = f_vals[:, ind] #elif typ == "1st_order": #a = A[var_counter,:] #Y = f_vals[:,ind] + X_grad @ a elif typ == "2nd_order": b = A[var_counter, :d] B = A[var_counter, d:].reshape((d, d)) Y = f_vals[:, ind] + X_grad @ b + (X_grad.dot(B + B.T) * X).sum(axis=1) + 2 * np.trace(B) elif typ == "kth_order": a = A[var_counter, :] a = a.reshape((-1, d)) Y = set_Y_k_deg(a, f_vals, X, X_grad, ind) integrals[var_counter] = np.mean(Y) vars_spec[var_counter] = Spectral_var(Y, W_spec) var_counter = var_counter + 1 return integrals, vars_spec
def ZVpolyOne(traj, traj_grad, f_target, params, W_spec): n, d = traj.shape if f_target == "sum": samples = traj.sum(axis=1).reshape(-1, 1) elif f_target == "sum_comps": samples = traj[:, params["ind"]].reshape(-1, 1) elif f_target == "sum_comps_squared": samples = np.square(traj[:, params["ind"]]).reshape(-1, 1) elif f_target == "sum_squared": samples = np.square(traj).sum(axis=1).reshape(-1, 1) elif f_target == "sum_4th": samples = ((traj)**4).sum(axis=1).reshape(-1, 1) elif f_target == "exp_sum": samples = np.exp(traj.sum(axis=1)).reshape(-1, 1) else: traj = np.expand_dims(traj, axis=0) samples = set_function(f_target, traj, [0], params) traj = traj[0] samples = samples[0] cov1 = np.cov(traj_grad, rowvar=False) if d == 1: A = 1 / cov1 else: A = np.linalg.inv(cov1) covariance = np.cov(np.concatenate((-traj_grad, samples), axis=1), rowvar=False) paramZV1 = -np.dot(A, covariance[:d, d:]) #print("ZV1: ",paramZV1) ZV1 = samples - np.dot(traj_grad, paramZV1) mean_ZV1 = np.mean(ZV1, axis=0) var_ZV1 = Spectral_var(ZV1[:, 0], W_spec) return mean_ZV1, var_ZV1
def Eval_ZVCV(traj, traj_grad, f_target, params, W_spec): if f_target == "sum": samples = traj.sum(axis=1).reshape(-1, 1) elif f_target == "sum_comps": samples = traj[:, params["ind"]].reshape(-1, 1) elif f_target == "sum_comps_squared": samples = np.square(traj[:, params["ind"]]).reshape(-1, 1) elif f_target == "sum_squared": samples = np.square(traj).sum(axis=1).reshape(-1, 1) elif f_target == "sum_4th": samples = ((traj)**4).sum(axis=1).reshape(-1, 1) elif f_target == "exp_sum": samples = np.exp(traj.sum(axis=1)).reshape(-1, 1) else: traj = np.expand_dims(traj, axis=0) samples = set_function(f_target, traj, [0], params) traj = traj[0] samples = samples[0] mean_vanilla = np.mean(samples) vars_vanilla = Spectral_var(samples[:, 0], W_spec) mean_ZV1, var_ZV1 = ZVpolyOne(traj, traj_grad, f_target, params, W_spec) mean_ZV2, var_ZV2 = ZVpolyTwo(traj, traj_grad, f_target, params, W_spec) #mean_CV1, var_CV1 = CVpolyOneUpdated(traj,traj_grad,f_target,params,W_spec) mean_CV1, var_CV1 = CVpolyOne(traj, traj_grad, f_target, params, W_spec) mean_CV2, var_CV2 = CVpolyTwo(traj, traj_grad, f_target, params, W_spec) return (mean_vanilla, mean_ZV1, mean_ZV2, mean_CV1, mean_CV2), (vars_vanilla, var_ZV1, var_ZV2, var_CV1, var_CV2)
def CVpolyGaussian(traj, traj_grad, f_target, params, W_spec): n, d = traj.shape if f_target == "sum": samples = traj.sum(axis=1).reshape(-1, 1) elif f_target == "sum_comps": samples = traj[:, params["ind"]].reshape(-1, 1) elif f_target == "sum_squared": samples = np.square(traj).sum(axis=1).reshape(-1, 1) elif f_target == "sum_comps_squared": samples = np.square(traj[:, params["ind"]]).reshape(-1, 1) elif f_target == "sum_4th": samples = ((traj)**4).sum(axis=1).reshape(-1, 1) elif f_target == "exp_sum": samples = np.exp(traj.sum(axis=1)).reshape(-1, 1) else: traj = np.expand_dims(traj, axis=0) samples = set_function(f_target, traj, [0], params) traj = traj[0] samples = samples[0] #jac,delta = TryCV(traj,samples) jac, delta = GausCV(traj, samples) #print(jac.shape) #print(delta.shape) CV = samples - np.sum(traj_grad * jac, axis=1).reshape((-1, 1)) + delta #CV = -np.sum(traj_grad*jac, axis = 1).reshape((-1,1)) + delta.reshape((-1,1)) mean_CV = np.mean(CV, axis=0) #print(mean_CV) var_CV = Spectral_var(CV[:, 0], W_spec) return mean_CV, var_CV
def CVpolyOneGaussian(traj, traj_grad, f_target, params, W_spec): """ Version of CV's with family of $\psi$ given by 2-dimensional gaussians """ n, d = traj.shape if f_target == "sum": samples = traj.sum(axis=1).reshape(-1, 1) elif f_target == "sum_comps": samples = traj[:, params["ind"]].reshape(-1, 1) elif f_target == "sum_squared": samples = np.square(traj).sum(axis=1).reshape(-1, 1) elif f_target == "sum_4th": samples = ((traj)**4).sum(axis=1).reshape(-1, 1) elif f_target == "exp_sum": samples = np.exp(traj.sum(axis=1)).reshape(-1, 1) else: traj = np.expand_dims(traj, axis=0) samples = set_function(f_target, traj, [0], params) traj = traj[0] samples = samples[0] print(samples.shape) print(traj.shape) covariance = np.cov(np.concatenate((traj, samples), axis=1), rowvar=False) paramCV1 = covariance[:d, d:] CV1 = samples - np.dot(traj_grad, paramCV1) mean_CV1 = np.mean(CV1, axis=0) var_CV1 = Spectral_var(CV1[:, 0], W_spec) return mean_CV1, var_CV1
def Train_Gauss(theta, X, X_grad, W, centers, n): """ Train Gaussian-based control variates for 1-dimensional Gaussian Mixture example """ #compute control functionals X_matr = np.tile(X, (1, len(theta))) x_cur = cur_func(X[:,0]) + X_grad[:,0]*np.sum(np.exp(-0.5*(X_matr - centers)**2)*(X_matr - centers)*theta, axis = 1) +\ np.sum(np.exp(-0.5*(X_matr - centers)**2)*((X_matr - centers)**2 - 1)*theta,axis = 1) #return 1./(n-1)*np.dot(x_cur - np.mean(x_cur),x_cur - np.mean(x_cur)) return Spectral_var(x_cur, W)
def qform_k_sep_ESVM(a, f_vals, X, X_grad, W, ind, n, k): """ Args: a - np.array of shape (k,d), where k - degree of polynomial returns: spectral variance estimate based on given matrix W """ d = X.shape[1] a = a.reshape((k, d)) Y = set_Y_k_deg(a, f_vals, X, X_grad, ind) return Spectral_var(Y, W)
def qform_2_ESVM(a, f_vals, X, X_grad, W, ind, n, alpha=0.0): """ Arguments: a - np.array of shape (d+1,d), a[0,:] - np.array of shape(d) - corresponds to coefficients via linear variables a[1:,:] - np.array of shape (d,d) - to quadratic terms """ d = X_grad.shape[1] b = a[:d] B = a[d:].reshape((d, d)) x_cur = f_vals[:, ind] + X_grad @ b + qform_q(B + B.T, X_grad, X) + 2 * np.trace(B) return Spectral_var(x_cur, W) + alpha * np.sum(B**2)
def CVpolyTwo(traj, traj_grad, f_target, params, W_spec): n, d = traj.shape if f_target == "sum": samples = traj.sum(axis=1).reshape(-1, 1) elif f_target == "sum_comps": samples = traj[:, params["ind"]].reshape(-1, 1) elif f_target == "sum_comps_squared": samples = np.square(traj[:, params["ind"]]).reshape(-1, 1) elif f_target == "sum_squared": samples = np.square(traj).sum(axis=1).reshape(-1, 1) elif f_target == "sum_4th": samples = ((traj)**4).sum(axis=1).reshape(-1, 1) elif f_target == "exp_sum": samples = np.exp(traj.sum(axis=1)).reshape(-1, 1) else: traj = np.expand_dims(traj, axis=0) samples = set_function(f_target, traj, [0], params) traj = traj[0] samples = samples[0] poisson = np.zeros((n, int(d * (d + 3) / 2))) poisson[:, np.arange(d)] = traj poisson[:, np.arange(d, 2 * d)] = np.multiply(traj, traj) k = 2 * d for j in np.arange(d - 1): for i in np.arange(j + 1, d): poisson[:, k] = np.multiply(traj[:, i], traj[:, j]) k = k + 1 Lpoisson = np.zeros((n, int(d * (d + 3) / 2))) Lpoisson[:, np.arange(d)] = -traj_grad Lpoisson[:, np.arange(d, 2 * d)] = 2 * (1. - np.multiply(traj, traj_grad)) k = 2 * d for j in np.arange(d - 1): for i in np.arange(j + 1, d): Lpoisson[:,k] = -np.multiply(traj_grad[:,i], traj[:,j]) \ -np.multiply(traj_grad[:,j], traj[:,i]) k = k + 1 cov1 = np.cov(np.concatenate((poisson, -Lpoisson), axis=1), rowvar=False) A = np.linalg.inv(cov1[0:int(d * (d + 3) / 2), int(d * (d + 3) / 2):d * (d + 3)]) cov2 = np.cov(np.concatenate((poisson, samples), axis=1), rowvar=False) B = cov2[0:int(d * (d + 3) / 2), int(d * (d + 3) / 2):] paramCV2 = np.dot(A, B) CV2 = samples + np.dot(Lpoisson, paramCV2) mean_CV2 = np.mean(CV2, axis=0) var_CV2 = Spectral_var(CV2[:, 0], W_spec) return mean_CV2, var_CV2
def eval_samples_gmm(typ, X, X_grad, theta, W_spec, n, centers): """Universal function to evaluate MCMC samples with ot without control functionals Args: typ - one of "Vanilla", "ESVM" ... Returns: ... """ if typ not in ["Vanilla", "ESVM"]: raise "Not implemented error in EvalSamples" #spectral estimate for variance if typ == "Vanilla": Y = cur_func(X[:, 0]) elif typ == "ESVM": X_matr = np.tile(X, (1, len(theta))) Y = cur_func(X[:,0]) + X_grad[:,0]*np.sum(np.exp(-0.5*(X_matr - centers)**2)*(X_matr - centers)*theta, axis = 1) +\ np.sum(np.exp(-0.5*(X_matr - centers)**2)*((X_matr - centers)**2 - 1)*theta,axis = 1) integral = np.mean(Y) variance = Spectral_var(Y, W_spec) return integral, variance
def qform_1_ESVM(a, f_vals, X_grad, W, ind, n, alpha): """ ESVM quadratic form computation: asymptotic variance estimator based on kernel W; """ x_cur = f_vals[:, ind] + X_grad @ a return Spectral_var(x_cur, W) + alpha * np.linalg.norm(a)**2