def legendre_wafp(lambdas, M=1e3, sampler=None): """ Generate M (= lambdas.shape[0]) weighted approximate Fekete points using randomized sampling. """ from scipy.linalg import qr from legendre_induced import induced_distribution_mixture_sampling if lambdas.ndim == 1: lambdas = np.reshape(lambdas, [lambdas.size, 1]) N, d = lambdas.shape ab = jacobi_recurrence(lambdas.max() + 1, alpha=0., beta=0., probability=True) if sampler is None: sampler = lambda MM: induced_distribution_mixture_sampling(lambdas, MM) # Choose at least 2*N samples M = max(M, 2*N) x = sampler(M) V = opolynd_eval(x, lambdas, ab) _, _, p = qr(V.T/np.sqrt(np.sum(V**2,axis=1)), pivoting=True, mode='economic') return x[p[:N], :]
def legendre_wafp(lambdas, M=1e3, sampler=None): """ Generate M (= lambdas.shape[0]) weighted approximate Fekete points using randomized sampling. """ from scipy.linalg import qr from legendre_induced import induced_distribution_mixture_sampling if lambdas.ndim == 1: lambdas = np.reshape(lambdas, [lambdas.size, 1]) N, d = lambdas.shape ab = jacobi_recurrence(lambdas.max() + 1, alpha=0., beta=0., probability=True) if sampler is None: sampler = lambda MM: induced_distribution_mixture_sampling(lambdas, MM) # Choose at least 2*N samples M = max(M, 2 * N) x = sampler(M) V = opolynd_eval(x, lambdas, ab) _, _, p = qr(V.T / np.sqrt(np.sum(V**2, axis=1)), pivoting=True, mode='economic') return x[p[:N], :]
def legendre_wafp_enrichment(x, lambdas, M_enrich, sampler=None): """ Adds M_enrich points to the existing point set x by (approximate) determinant maximization. """ from legendre_induced import induced_distribution_mixture_sampling if lambdas.ndim == 1: lambdas = np.reshape(lambdas, [lambdas.size, 1]) if sampler is None: sampler = lambda MM: induced_distribution_mixture_sampling(lambdas, MM) M0 = x.shape[0] N, d = lambdas.shape ab = jacobi_recurrence(lambdas.max() + 1, alpha=0., beta=0., probability=True) M = max(M0, 2*N) while x.shape[0] < M0 + M_enrich: V = opolynd_eval(x, lambdas, ab) W = (V.T/np.sqrt(np.sum(V**2,axis=1))).T * np.sqrt(float(N)/float(x.shape[0])) G = np.dot(W.T, W) iG = np.linalg.inv(G) xs = sampler(M) Vs = opolynd_eval(xs, lambdas, ab) Ws = (Vs.T/np.sqrt(np.sum(Vs**2,axis=1))).T dets = np.sum((Ws*np.dot(Ws, iG))**2, axis=1) ind = np.argmax(dets) x = np.vstack([x, xs[ind,:]]) return x
def induced_distribution_quadrule(n): """ Returns a Gaussian quadrature rule that would be used by induced_distribution. """ from recurrence import jacobi_recurrence from quadrature import gauss_quadrature n = np.atleast_1d(n) N = n.max() ab = jacobi_recurrence(N + 2, alpha=0., beta=0., probability=True) return gauss_quadrature(ab, N + 1)
def induced_distribution_quadrule(n): """ Returns a Gaussian quadrature rule that would be used by induced_distribution. """ from recurrence import jacobi_recurrence from quadrature import gauss_quadrature n = np.atleast_1d(n) N = n.max() ab = jacobi_recurrence(N+2, alpha=0., beta=0., probability=True) return gauss_quadrature(ab, N+1)
def fidistinv_driver_helper(u, data): tol = 1e-12 # machine eps guarding # Construct Vandermonde-like matrix M = data.shape[0] - 6 ab = recurrence.jacobi_recurrence(M+1, alpha=-1/2., beta=-1/2.) edges = np.concatenate([[-np.inf], data[0,:], [data[1,-1]], [np.inf]]) bins = np.digitize(u, edges) B = edges.size-1; x = np.zeros(u.size) x[bins==1] = data[2,0] x[bins==B] = data[3,-1] for qb in range(2, B): mask = (bins==qb) if not(np.any(mask)): continue q = qb -2 vgrid = (u[mask] - data[0,q])/(data[1,q] - data[0,q])*2. - 1. V = opoly1d.opoly1d_eval(vgrid, list(range(M)), ab) temp = np.dot(V, data[6:,q]) temp /= ( (1. + vgrid)**(data[4,q]) * (1. - vgrid)**(data[5,q]) ) if data[4,q] != 0.: # Set LHS to be 0 flags = np.abs(u[mask] - data[0,q]) < tol temp[flags] = 0. temp *= (data[3,q] - data[2,q]) temp += data[2,q] else: # Set RHS to be 0 flags = np.abs(u[mask] - data[1,q]) < tol temp[flags] = 0. temp *= (data[3,q] - data[2,q]) temp += data[3,q] x[mask] = temp return x
def legendre_wafp_enrichment(x, lambdas, M_enrich, sampler=None): """ Adds M_enrich points to the existing point set x by (approximate) determinant maximization. """ from legendre_induced import induced_distribution_mixture_sampling if lambdas.ndim == 1: lambdas = np.reshape(lambdas, [lambdas.size, 1]) if sampler is None: sampler = lambda MM: induced_distribution_mixture_sampling(lambdas, MM) M0 = x.shape[0] N, d = lambdas.shape ab = jacobi_recurrence(lambdas.max() + 1, alpha=0., beta=0., probability=True) M = max(M0, 2 * N) while x.shape[0] < M0 + M_enrich: V = opolynd_eval(x, lambdas, ab) W = (V.T / np.sqrt(np.sum(V**2, axis=1))).T * np.sqrt( float(N) / float(x.shape[0])) G = np.dot(W.T, W) iG = np.linalg.inv(G) xs = sampler(M) Vs = opolynd_eval(xs, lambdas, ab) Ws = (Vs.T / np.sqrt(np.sum(Vs**2, axis=1))).T dets = np.sum((Ws * np.dot(Ws, iG))**2, axis=1) ind = np.argmax(dets) x = np.vstack([x, xs[ind, :]]) return x
def induced_distribution(x, n, quadrule=None): """ Computes the induced distribution function, which is F_n(x) = 1/2 * \int_{-1}^x p_n^2(s) ds, where p_n is the degree-n orthonormal Legendre polynomial. With this normalization, the endpoint values are F_n(-1) = 0, and F_n(1) = 1 for all n. This function is vectorized in both x and n, and supports the calling syntaxes induced_distribution(array, array) induced_distribution(array, scalar) In the first syntax, both arrays must have the same number of elements. If specified, the optional input quadrule is a list, [x,w], containing a quadrature rule that is assumed accurate for all polynomials up to degree 2*np.max(n.flatten()). If not given, this quadrature rule is generated as an appropriate-sized Gaussian quadrature rule. """ from recurrence import jacobi_recurrence from opoly1d import opoly1d_eval x = np.atleast_1d(x) n = np.atleast_1d(n) if np.size(x) != np.size(n): if n.size != 1: raise ValueError( "If n is the not the same size as x, then n must be a scalar.") assert (n >= 0).all(), "Degrees n must be non-negative" N = n.max() if quadrule is not None: xg, wg = quadrule[0], quadrule[1] else: xg, wg = induced_distribution_quadrule(n) # Output array u = np.zeros(x.shape) ab = jacobi_recurrence(N + 2, alpha=0., beta=0., probability=True) # Unfortunately, I don't see an efficient way to avoid a for loop here if n.size > 1: for ind, xv in np.ndenumerate(np.asarray(x)): if xv <= -1: u[ind] = 0. elif xv >= 1: u[ind] = 1. else: vg = (xg + 1.) / 2. * (xv + 1) - 1. u[ind] = (xv + 1.) / 2 * np.dot( (opoly1d_eval(vg, n[ind], ab)**2).T, wg) else: for ind, xv in np.ndenumerate(np.asarray(x)): if xv <= -1: u[ind] = 0. elif xv >= 1: u[ind] = 1. else: vg = (xg + 1.) / 2. * (xv + 1) - 1. u[ind] = (xv + 1.) / 2 * np.dot( (opoly1d_eval(vg, n, ab)**2).T, wg) return u
######## Step 2: run "expensive" model u = np.zeros([Nx, M]) print("Evaluating model on mesh...") for ind, zval in enumerate(z): u[:, ind] = expensive_model(zval, x) print(u) # Each column of u is a model run for a fixed parameter value ######## Step 3: compute PCE coefficients print("Assembling PCE coefficients...") ab = jacobi_recurrence(lambdas.max() + 1, alpha=0., beta=0., probability=True) V = opolynd_eval(z, lambdas, ab) weights = np.sqrt(float(N) / float(M)) / np.sqrt(np.sum(V**2, axis=1)) # The PCE coefficients are computed as a weighted discrete least-squares # estimator with the weights above. coeffs = np.linalg.lstsq((V.T * weights).T, (u * weights).T)[0].T # Each row of coeffs contains PCE coefficients for a single x gridpoint. # Each column of coeffs contains a particular PCE coefficient for all # values of x. ######## Step 4: whatever postprocessing you want print("Processing PCE coefficients...") # Compute total sensitivities total_sensitivities = sensitivity.pce_total_sensitivity(
def induced_distribution(x, n, quadrule=None): """ Computes the induced distribution function, which is F_n(x) = 1/2 * \int_{-1}^x p_n^2(s) ds, where p_n is the degree-n orthonormal Legendre polynomial. With this normalization, the endpoint values are F_n(-1) = 0, and F_n(1) = 1 for all n. This function is vectorized in both x and n, and supports the calling syntaxes induced_distribution(array, array) induced_distribution(array, scalar) In the first syntax, both arrays must have the same number of elements. If specified, the optional input quadrule is a list, [x,w], containing a quadrature rule that is assumed accurate for all polynomials up to degree 2*np.max(n.flatten()). If not given, this quadrature rule is generated as an appropriate-sized Gaussian quadrature rule. """ from recurrence import jacobi_recurrence from opoly1d import opoly1d_eval x = np.atleast_1d(x) n = np.atleast_1d(n) if np.size(x) != np.size(n): if n.size != 1: raise ValueError("If n is the not the same size as x, then n must be a scalar.") assert (n >= 0).all(), "Degrees n must be non-negative" N = n.max() if quadrule is not None: xg, wg = quadrule[0], quadrule[1] else: xg, wg = induced_distribution_quadrule(n) # Output array u = np.zeros(x.shape) ab = jacobi_recurrence(N+2, alpha=0., beta=0., probability=True) # Unfortunately, I don't see an efficient way to avoid a for loop here if n.size > 1: for ind,xv in np.ndenumerate(np.asarray(x)): if xv <= -1: u[ind] = 0. elif xv >= 1: u[ind] = 1. else: vg = (xg+1.)/2.*(xv+1) - 1. u[ind] = (xv+1.)/2*np.dot((opoly1d_eval(vg, n[ind], ab)**2).T, wg) else: for ind,xv in np.ndenumerate(np.asarray(x)): if xv <= -1: u[ind] = 0. elif xv >= 1: u[ind] = 1. else: vg = (xg+1.)/2.*(xv+1) - 1. u[ind] = (xv+1.)/2*np.dot((opoly1d_eval(vg, n, ab)**2).T, wg) return u
######## Step 2: run "expensive" model u = np.zeros([Nx, M]) print("Evaluating model on mesh...") for ind,zval in enumerate(z): u[:,ind] = expensive_model(zval, x) print(u) # Each column of u is a model run for a fixed parameter value ######## Step 3: compute PCE coefficients print("Assembling PCE coefficients...") ab = jacobi_recurrence(lambdas.max()+1, alpha=0., beta=0., probability=True) V = opolynd_eval(z, lambdas, ab) weights = np.sqrt(float(N)/float(M)) / np.sqrt(np.sum(V**2,axis=1)) # The PCE coefficients are computed as a weighted discrete least-squares # estimator with the weights above. coeffs = np.linalg.lstsq( (V.T*weights).T, (u*weights).T)[0].T # Each row of coeffs contains PCE coefficients for a single x gridpoint. # Each column of coeffs contains a particular PCE coefficient for all # values of x. ######## Step 4: whatever postprocessing you want print("Processing PCE coefficients...") # Compute total sensitivities total_sensitivities = sensitivity.pce_total_sensitivity(coeffs.T, lambdas, list(range(d)))
for qd in range(1, max(jac)+1): asdf return qreturn if __name__ == "__main__": from matplotlib import pyplot as plt from recurrence import jacobi_recurrence alpha = 0. beta = np.pi nmax = 35 probability_measure = True ab = jacobi_recurrence(nmax+1,alpha=alpha,beta=beta,probability=probability_measure) x = np.linspace(-1, 1, 300) p = opoly1d_eval(x, np.arange(nmax), ab) plt.plot(x, p[:,:10]) plt.title('Polynomials') plt.figure() q = qpoly1d_eval(x, nmax, ab) plt.plot(x, q[:,:10]) plt.title('Q polynomials') err = np.linalg.norm( q - p / np.sqrt( np.cumsum( p**2, axis=1 ) ) ) plt.show()
""" from numpy.linalg import eigh n = ab.shape[0] if n+1 < N: raise IndexError('Require N+1 recurrence coefficients for an N-point rule.') J = np.diag(ab[1:N,1], k=1) + np.diag(ab[:N,0],k=0) + np.diag(ab[1:N,1], k=-1) lamb,v = eigh(J) return lamb, v[0,:]**2 if __name__ == "__main__": from recurrence import jacobi_recurrence from opoly1d import opoly1d_eval alpha = 0 beta = 0 N = 12 ab = jacobi_recurrence(N+1, alpha=alpha, beta=beta, probability=True) x,w = gauss_quadrature(ab,N) V = opoly1d_eval(x, list(range(N)), ab) G = np.dot(np.dot(V.T, np.diag(w)), V) err = np.linalg.norm(G - np.eye(G.shape[0]))
n = ab.shape[0] if n + 1 < N: raise IndexError( 'Require N+1 recurrence coefficients for an N-point rule.') J = np.diag(ab[1:N, 1], k=1) + np.diag(ab[:N, 0], k=0) + np.diag( ab[1:N, 1], k=-1) lamb, v = eigh(J) return lamb, v[0, :]**2 if __name__ == "__main__": from recurrence import jacobi_recurrence from opoly1d import opoly1d_eval alpha = 0 beta = 0 N = 12 ab = jacobi_recurrence(N + 1, alpha=alpha, beta=beta, probability=True) x, w = gauss_quadrature(ab, N) V = opoly1d_eval(x, list(range(N)), ab) G = np.dot(np.dot(V.T, np.diag(w)), V) err = np.linalg.norm(G - np.eye(G.shape[0]))
return qreturn if __name__ == "__main__": from matplotlib import pyplot as plt from recurrence import jacobi_recurrence alpha = 0. beta = np.pi nmax = 35 probability_measure = True ab = jacobi_recurrence(nmax + 1, alpha=alpha, beta=beta, probability=probability_measure) x = np.linspace(-1, 1, 300) p = opoly1d_eval(x, np.arange(nmax), ab) plt.plot(x, p[:, :10]) plt.title('Polynomials') plt.figure() q = qpoly1d_eval(x, nmax, ab) plt.plot(x, q[:, :10]) plt.title('Q polynomials') err = np.linalg.norm(q - p / np.sqrt(np.cumsum(p**2, axis=1))) plt.show()