def fit_quadrature(orth, nodes, weights, solves, retall=False, norms=None, **kws): """ Using spectral projection to create a polynomial approximation over distribution space. Parameters ---------- orth : Poly Orthogonal polynomial expansion. Must be orthogonal for the approximation to be accurate. nodes : array_like Where to evaluate the polynomial expansion and model to approximate. nodes.shape==(D,K) where D is the number of dimensions and K is the number of nodes. weights : array_like Weights when doing numerical integration. weights.shape==(K,) solves : array_like, callable The model to approximate. If array_like is provided, it must have len(solves)==K. If callable, it must take a single argument X with len(X)==D, and return a consistent numpy compatible shape. norms : array_like In the of TTR using coefficients to estimate the polynomial norm is more stable than manual calculation. Calculated using quadrature if no provided. norms.shape==(len(orth),) """ orth = po.Poly(orth) nodes = np.asfarray(nodes) weights = np.asfarray(weights) if hasattr(solves, "__call__"): solves = [solves(q) for q in nodes.T] solves = np.asfarray(solves) shape = solves.shape solves = solves.reshape(weights.size, solves.size/weights.size) ovals = orth(*nodes) vals1 = [(val*solves.T*weights).T for val in ovals] if norms is None: vals2 = [(val**2*weights).T for val in ovals] norms = np.sum(vals2, 1) else: norms = np.array(norms).flatten() assert len(norms)==len(orth) coefs = (np.sum(vals1, 1).T/norms).T coefs = coefs.reshape(len(coefs), *shape[1:]) Q = po.transpose(po.sum(orth*coefs.T, -1)) if retall: return Q, coefs return Q
def orth_pcd(order, dist, eps=1.e-16, normed=False, **kws): """ Create orthogonal polynomial expansion from pivoted Cholesky decompostion. Parameters ---------- order : int Order of polynomial expansion dist : Dist Distribution space where polynomials are orthogonal normed : bool If True orthonormal polynomials will be used instead of monic. **kws : optional Extra keywords passed to dist.mom Examples -------- # >>> Z = cp.Normal() # >>> print cp.orth_pcd(2, Z) # [1.0, q0^2-1.0, q0] """ raise DeprecationWarning("Obsolete. Use orth_chol instead.") dim = len(dist) basis = po.basis(1, order, dim) C = Cov(basis, dist) N = len(basis) L, P = pcd(C, approx=1, pivot=1, tol=eps) Li = np.dot(P, np.linalg.inv(L.T)) if normed: for i in xrange(N): Li[:, i] /= np.sum(Li[:, i] * P[:, i]) E_ = -po.sum(E(basis, dist, **kws) * Li.T, -1) coefs = np.zeros((N + 1, N + 1)) coefs[1:, 1:] = Li coefs[0, 0] = 1 coefs[0, 1:] = E_ out = {} out[(0, ) * dim] = coefs[0] basis = list(basis) for i in xrange(N): I = basis[i].keys[0] out[I] = coefs[i + 1] P = po.Poly(out, dim, coefs.shape[1:], float) return P
def orth_pcd(order, dist, eps=1.e-16, normed=False, **kws): """ Create orthogonal polynomial expansion from pivoted Cholesky decompostion. Parameters ---------- order : int Order of polynomial expansion dist : Dist Distribution space where polynomials are orthogonal normed : bool If True orthonormal polynomials will be used instead of monic. **kws : optional Extra keywords passed to dist.mom Examples -------- # >>> Z = cp.Normal() # >>> print cp.orth_pcd(2, Z) # [1.0, q0^2-1.0, q0] """ raise DeprecationWarning("Obsolete. Use orth_chol instead.") dim = len(dist) basis = po.basis(1,order,dim) C = Cov(basis, dist) N = len(basis) L, P = pcd(C, approx=1, pivot=1, tol=eps) Li = np.dot(P, np.linalg.inv(L.T)) if normed: for i in xrange(N): Li[:,i] /= np.sum(Li[:,i]*P[:,i]) E_ = -po.sum(E(basis, dist, **kws)*Li.T, -1) coefs = np.zeros((N+1, N+1)) coefs[1:,1:] = Li coefs[0,0] = 1 coefs[0,1:] = E_ out = {} out[(0,)*dim] = coefs[0] basis = list(basis) for i in xrange(N): I = basis[i].keys[0] out[I] = coefs[i+1] P = po.Poly(out, dim, coefs.shape[1:], float) return P
def lagrange_polynomial(X, sort="GR"): """ Lagrange Polynomials X : array_like Sample points where the lagrange polynomials shall be """ X = np.asfarray(X) if len(X.shape) == 1: X = X.reshape(1, X.size) dim, size = X.shape order = 1 while ber.terms(order, dim) <= size: order += 1 indices = np.array(ber.bindex(1, order, dim, sort)[:size]) s, t = np.mgrid[:size, :size] M = np.prod(X.T[s]**indices[t], -1) det = np.linalg.det(M) if det == 0: raise np.linalg.LinAlgError, "invertable matrix" v = po.basis(1, order, dim, sort)[:size] coeffs = np.zeros((size, size)) if size == 2: coeffs = np.linalg.inv(M) else: for i in xrange(size): for j in xrange(size): coeffs[i, j] += np.linalg.det(M[1:, 1:]) M = np.roll(M, -1, axis=0) M = np.roll(M, -1, axis=1) coeffs /= det return po.sum(v * (coeffs.T), 1)
def lagrange_polynomial(X, sort="GR"): """ Lagrange Polynomials X : array_like Sample points where the lagrange polynomials shall be """ X = np.asfarray(X) if len(X.shape)==1: X = X.reshape(1,X.size) dim,size = X.shape order = 1 while ber.terms(order, dim)<=size: order += 1 indices = np.array(ber.bindex(1, order, dim, sort)[:size]) s,t = np.mgrid[:size, :size] M = np.prod(X.T[s]**indices[t], -1) det = np.linalg.det(M) if det==0: raise np.linalg.LinAlgError, "invertable matrix" v = po.basis(1, order, dim, sort)[:size] coeffs = np.zeros((size, size)) if size==2: coeffs = np.linalg.inv(M) else: for i in xrange(size): for j in xrange(size): coeffs[i,j] += np.linalg.det(M[1:,1:]) M = np.roll(M, -1, axis=0) M = np.roll(M, -1, axis=1) coeffs /= det return po.sum(v*(coeffs.T), 1)
def orth_svd(order, dist, eps=1.e-300, normed=False, **kws): """ Create orthogonal polynomial expansion from pivoted Cholesky decompostion. If eigenvalue of covariance matrix is bellow eps, the polynomial is subset. Parameters ---------- order : int Order of polynomial expansion dist : Dist Distribution space where polynomials are orthogonal eps : float Threshold for when to subset the expansion. normed : bool If True, polynomial will be orthonormal. **kws : optional Extra keywords passed to dist.mom Examples -------- # >>> Z = cp.Normal() # >>> print cp.orth_svd(2, Z) # [1.0, q0^2-1.0, q0] """ dim = len(dist) if isinstance(order, po.Poly): basis = order else: basis = po.basis(1,order,dim) basis = list(basis) C = Cov(basis, dist, **kws) L, P = pcd(C, approx=0, pivot=1, tol=eps) N = L.shape[-1] if len(L)!=N: I = [_.tolist().index(1) for _ in P] b_ = [0]*N for i in xrange(N): b_[i] = basis[I[i]] basis = b_ C = Cov(basis, dist, **kws) L, P = pcd(C, approx=0, pivot=1, tol=eps) N = L.shape[-1] basis = po.Poly(basis) Li = rlstsq(L, P, alpha=1.e-300).T E_ = -po.sum(E(basis, dist, **kws)*Li.T, -1) coefs = np.zeros((N+1, N+1)) coefs[1:,1:] = Li coefs[0,0] = 1 coefs[0,1:] = E_ out = {} out[(0,)*dim] = coefs[0] for i in xrange(N): I = basis[i].keys[0] out[I] = coefs[i+1] P = po.Poly(out, dim, coefs.shape[1:], float) if normed: norm = np.sqrt(Var(P, dist, **kws)) norm[0] = 1 P = P/norm return P
def fit_regression(P, x, u, rule="LS", retall=False, **kws): """ Fit a polynomial chaos expansion using linear regression. Parameters ---------- P : Poly Polynomial chaos expansion with `P.shape=(M,)` and `P.dim=D`. x : array_like Collocation nodes with `x.shape=(D,K)`. u : array_like Model evaluations with `len(u)=K`. retall : bool If True return uhat in addition to R rule : str Regression method used. The follwong methods uses scikits-learn as backend. See `sklearn.linear_model` for more details. Key Scikit-learn Description --- ------------ ----------- Parameters Description ---------- ----------- "BARD" ARDRegression Bayesian ARD Regression n_iter=300 Maximum iterations tol=1e-3 Optimization tolerance alpha_1=1e-6 Gamma scale parameter alpha_2=1e-6 Gamma inverse scale parameter lambda_1=1e-6 Gamma shape parameter lambda_2=1e-6 Gamma inverse scale parameter threshold_lambda=1e-4 Upper pruning threshold "BR" BayesianRidge Bayesian Ridge Regression n_iter=300 Maximum iterations tol=1e-3 Optimization tolerance alpha_1=1e-6 Gamma scale parameter alpha_2=1e-6 Gamma inverse scale parameter lambda_1=1e-6 Gamma shape parameter lambda_2=1e-6 Gamma inverse scale parameter "EN" ElastiNet Elastic Net alpha=1.0 Dampening parameter rho Mixing parameter in [0,1] max_iter=300 Maximum iterations tol Optimization tolerance "ENC" ElasticNetCV EN w/Cross Validation rho Dampening parameter(s) eps=1e-3 min(alpha)/max(alpha) n_alphas Number of alphas alphas List of alphas max_iter Maximum iterations tol Optimization tolerance cv=3 Cross validation folds "LA" Lars Least Angle Regression n_nonzero_coefs Number of non-zero coefficients eps Cholesky regularization "LAC" LarsCV LAR w/Cross Validation max_iter Maximum iterations cv=5 Cross validation folds max_n_alphas Max points for residuals in cv "LAS" Lasso Least Absolute Shrinkage and Selection Operator alpha=1.0 Dampening parameter max_iter Maximum iterations tol Optimization tolerance "LASC" LassoCV LAS w/Cross Validation eps=1e-3 min(alpha)/max(alpha) n_alphas Number of alphas alphas List of alphas max_iter Maximum iterations tol Optimization tolerance cv=3 Cross validation folds "LL" LassoLars Lasso and Lars model max_iter Maximum iterations eps Cholesky regularization "LLC" LassoLarsCV LL w/Cross Validation max_iter Maximum iterations cv=5 Cross validation folds max_n_alphas Max points for residuals in cv eps Cholesky regularization "LLIC" LassoLarsIC LL w/AIC or BIC criterion "AIC" or "BIC" criterion max_iter Maximum iterations eps Cholesky regularization "OMP" OrthogonalMatchingPursuit n_nonzero_coefs Number of non-zero coefficients tol Max residual norm (instead of non-zero coef) Local methods Key Description --- ----------- "LS" Ordenary Least Squares "T" Ridge Regression/Tikhonov Regularization order Order of regularization (or custom matrix) alpha Dampning parameter (else estimated from gcv) "TC" T w/Cross Validation order Order of regularization (or custom matrix) alpha Dampning parameter (else estimated from gcv) Returns ------- R[, uhat] R : Poly Fitted polynomial with `R.shape=u.shape[1:]` and `R.dim=D`. uhat : np.ndarray The Fourier coefficients in the estimation. Examples -------- >>> P = cp.Poly([1, x, y]) >>> s = [[-1,-1,1,1], [-1,1,-1,1]] >>> u = [0,1,1,2] >>> print fit_regression(P, s, u) 0.5q1+0.5q0+1.0 """ x = np.array(x) if len(x.shape) == 1: x = x.reshape(1, *x.shape) u = np.array(u) Q = P(*x).T shape = u.shape[1:] u = u.reshape(u.shape[0], int(np.prod(u.shape[1:]))) rule = rule.upper() # Local rules if rule == "LS": uhat = la.lstsq(Q, u)[0].T elif rule == "T": uhat, alphas = rlstsq(Q, u, kws.get("order", 0), kws.get("alpha", None), False, True) uhat = uhat.T elif rule == "TC": uhat = rlstsq(Q, u, kws.get("order", 0), kws.get("alpha", None), True) uhat = uhat.T else: # Scikit-learn wrapper try: _ = lm except: raise NotImplementedError("sklearn not installed") if rule == "BARD": solver = lm.ARDRegression(fit_intercept=False, copy_X=False, **kws) elif rule == "BR": kws["fit_intercept"] = kws.get("fit_intercept", False) solver = lm.BayesianRidge(**kws) elif rule == "EN": kws["fit_intercept"] = kws.get("fit_intercept", False) solver = lm.ElasticNet(**kws) elif rule == "ENC": kws["fit_intercept"] = kws.get("fit_intercept", False) solver = lm.ElasticNetCV(**kws) elif rule == "LA": # success kws["fit_intercept"] = kws.get("fit_intercept", False) solver = lm.Lars(**kws) elif rule == "LAC": kws["fit_intercept"] = kws.get("fit_intercept", False) solver = lm.LarsCV(**kws) elif rule == "LAS": kws["fit_intercept"] = kws.get("fit_intercept", False) solver = lm.Lasso(**kws) elif rule == "LASC": kws["fit_intercept"] = kws.get("fit_intercept", False) solver = lm.LassoCV(**kws) elif rule == "LL": kws["fit_intercept"] = kws.get("fit_intercept", False) solver = lm.LassoLars(**kws) elif rule == "LLC": kws["fit_intercept"] = kws.get("fit_intercept", False) solver = lm.LassoLarsCV(**kws) elif rule == "LLIC": kws["fit_intercept"] = kws.get("fit_intercept", False) solver = lm.LassoLarsIC(**kws) elif rule == "OMP": solver = lm.OrthogonalMatchingPursuit(**kws) uhat = solver.fit(Q, u).coef_ u = u.reshape(u.shape[0], *shape) R = po.sum((P * uhat), -1) R = po.reshape(R, shape) if retall == 1: return R, uhat elif retall == 2: if rule == "T": return R, uhat, Q, alphas return R, uhat, Q return R
def pcm_gq(func, order, dist_out, dist_in=None, acc=None, orth=None, retall=False, sparse=False): """ Probabilistic Collocation Method using optimal Gaussian quadrature Parameters ---------- Required arguments func : callable The model to be approximated. Must accept arguments on the form `func(z, *args, **kws)` where `z` is an 1-dimensional array with `len(z)==len(dist)`. order : int The order of the polynomial approximation dist_out : Dist Distributions for models parameter Optional arguments dist_in : Dist If included, space will be mapped using a Rosenblatt transformation from dist_out to dist_in before creating an expansin in terms of dist_in acc : float The order of the sample scheme used If omitted order+1 will be used orth : int, str, callable, Poly Orthogonal polynomial generation. int, str : orth will be passed to orth_select for selection of orthogonalization. See orth_select doc for more details. callable : the return of orth(M, dist) will be used. Poly : it will be used directly. All polynomials must be orthogonal for method to work properly. args : itterable Extra positional arguments passed to `func`. kws : dict Extra keyword arguments passed to `func`. retall : bool If True, return also number of evaluations sparse : bool If True, Smolyak sparsegrid will be used instead of full tensorgrid Returns ------- Q[, X] Q : Poly Polynomial estimate of a given a model. X : np.ndarray Values used in evaluation # Examples # -------- # Define function: # >>> func = lambda z: z[1]*z[0] # # Define distribution: # >>> dist = cp.J(cp.Normal(), cp.Normal()) # # Perform pcm: # >>> p, x, w, y = cp.pcm_gq(func, 2, dist, acc=3, retall=True) # >>> print cp.around(p, 10) # q0q1 # >>> print len(w) # 16 """ if acc is None: acc = order + 1 if dist_in is None: z, w = qu.generate_quadrature(acc, dist_out, 100, sparse=sparse, rule="G") x = z dist = dist_out else: z, w = qu.generate_quadrature(acc, dist_in, 100, sparse=sparse, rule="G") x = dist_out.ppf(dist_in.cdf(z)) dist = dist_in y = np.array(map(func, x.T)) shape = y.shape y = y.reshape(w.size, y.size / w.size) if orth is None: if dist.dependent: orth = "chol" else: orth = "ttr" if isinstance(orth, (str, int, long)): orth = orth_select(orth) if not isinstance(orth, po.Poly): orth = orth(order, dist) ovals = orth(*z) vals1 = [(val * y.T * w).T for val in ovals] vals2 = [(val**2 * w).T for val in ovals] coef = (np.sum(vals1, 1).T / np.sum(vals2, 1)).T coef = coef.reshape(len(coef), *shape[1:]) Q = po.transpose(po.sum(orth * coef.T, -1)) if retall: return Q, x, w, y return Q
def fit_quadrature(orth, nodes, weights, solves, retall=False, norms=None, **kws): """ Using spectral projection to create a polynomial approximation over distribution space. Parameters ---------- orth : Poly Orthogonal polynomial expansion. Must be orthogonal for the approximation to be accurate. nodes : array_like Where to evaluate the polynomial expansion and model to approximate. nodes.shape==(D,K) where D is the number of dimensions and K is the number of nodes. weights : array_like Weights when doing numerical integration. weights.shape==(K,) solves : array_like, callable The model to approximate. If array_like is provided, it must have len(solves)==K. If callable, it must take a single argument X with len(X)==D, and return a consistent numpy compatible shape. norms : array_like In the of TTR using coefficients to estimate the polynomial norm is more stable than manual calculation. Calculated using quadrature if no provided. norms.shape==(len(orth),) """ orth = po.Poly(orth) nodes = np.asfarray(nodes) weights = np.asfarray(weights) if hasattr(solves, "__call__"): solves = [solves(q) for q in nodes.T] solves = np.asfarray(solves) shape = solves.shape solves = solves.reshape(weights.size, solves.size / weights.size) ovals = orth(*nodes) vals1 = [(val * solves.T * weights).T for val in ovals] if norms is None: vals2 = [(val**2 * weights).T for val in ovals] norms = np.sum(vals2, 1) else: norms = np.array(norms).flatten() assert len(norms) == len(orth) coefs = (np.sum(vals1, 1).T / norms).T coefs = coefs.reshape(len(coefs), *shape[1:]) Q = po.transpose(po.sum(orth * coefs.T, -1)) if retall: return Q, coefs return Q
def fit_adaptive(func, poly, dist, abserr=1.e-8, relerr=1.e-8, budget=0, norm=0, bufname="", retall=False): """Adaptive estimation of Fourier coefficients. Parameters ---------- func : callable Should take a single argument `q` which is 1D array `len(q)=len(dist)`. Must return something compatible with np.ndarray. poly : Poly Polynomial vector for which to create Fourier coefficients for. dist : Dist A distribution to optimize the Fourier coefficients to. abserr : float Absolute error tolerance. relerr : float Relative error tolerance. budget : int Soft maximum number of function evaluations. 0 means unlimited. norm : int Specifies the norm that is used to measure the error and determine convergence properties (irrelevant for single-valued functions). The `norm` argument takes one of the values: 0 : L0-norm 1 : L0-norm on top of paired the L2-norm. Good for complex numbers where each conseqtive pair of the solution is real and imaginery. 2 : L2-norm 3 : L1-norm 4 : L_infinity-norm bufname : str, optional Buffer evaluations to file such that the fit_adaptive can be run again without redooing all evaluations. retall : bool If true, returns extra values. Returns ------- estimate[, coeffs, norms, coeff_error, norm_error] estimate : Poly The polynomial chaos expansion representation of func. coeffs : np.ndarray The Fourier coefficients. norms : np.ndarray The norm of the orthogonal polynomial squared. coeff_error : np.ndarray Estimated integration error of the coeffs. norm_error : np.ndarray Estimated integration error of the norms. Examples -------- >>> func = lambda q: q[0]*q[1] >>> poly = cp.basis(0,2,2) >>> dist = cp.J(cp.Uniform(0,1), cp.Uniform(0,1)) >>> res = cp.fit_adaptive(func, poly, dist, budget=100) >>> print res """ if bufname: func = lazy_eval(func, load=bufname) dim = len(dist) n = [0, 0] dummy_x = dist.inv(.5 * np.ones(dim, dtype=np.float64)) val = np.array(func(dummy_x), np.float64) xmin = np.zeros(dim, np.float64) xmax = np.ones(dim, np.float64) def f1(u, ns, *args): qs = dist.inv(u.reshape(ns, dim)) out = (poly(*qs.T)**2).T.flatten() return out dim1 = len(poly) val1 = np.empty(dim1, dtype=np.float64) err1 = np.empty(dim1, dtype=np.float64) _cubature(f1, dim1, xmin, xmax, (), "h", abserr, relerr, norm, budget, True, val1, err1) val1 = np.tile(val1, val.size) dim2 = np.prod(val.shape) * dim1 val2 = np.empty(dim2, dtype=np.float64) err2 = np.empty(dim2, dtype=np.float64) def f2(u, ns, *args): n[0] += ns n[1] += 1 qs = dist.inv(u.reshape(ns, dim)) Y = np.array([func(q) for q in qs]) Q = poly(*qs.T) out = np.array([Y.T * q1 for q1 in Q]).T.flatten() out = out / np.tile(val1, ns) return out try: _ = _cubature except: raise NotImplementedError("cubature not install properly") _cubature(f2, dim2, xmin, xmax, (), "h", abserr, relerr, norm, budget, True, val2, err2) shape = (dim1, ) + val.shape val2 = val2.reshape(shape[::-1]).T out = po.transpose(po.sum(poly * val2.T, -1)) if retall: return out, val2, val1, err2, err1 return val2
def orth_svd(order, dist, eps=1.e-300, normed=False, **kws): """ Create orthogonal polynomial expansion from pivoted Cholesky decompostion. If eigenvalue of covariance matrix is bellow eps, the polynomial is subset. Parameters ---------- order : int Order of polynomial expansion dist : Dist Distribution space where polynomials are orthogonal eps : float Threshold for when to subset the expansion. normed : bool If True, polynomial will be orthonormal. **kws : optional Extra keywords passed to dist.mom Examples -------- # >>> Z = cp.Normal() # >>> print cp.orth_svd(2, Z) # [1.0, q0^2-1.0, q0] """ raise DeprecationWarning("Obsolete") dim = len(dist) if isinstance(order, po.Poly): basis = order else: basis = po.basis(1, order, dim) basis = list(basis) C = Cov(basis, dist, **kws) L, P = pcd(C, approx=0, pivot=1, tol=eps) N = L.shape[-1] if len(L) != N: I = [_.tolist().index(1) for _ in P] b_ = [0] * N for i in xrange(N): b_[i] = basis[I[i]] basis = b_ C = Cov(basis, dist, **kws) L, P = pcd(C, approx=0, pivot=1, tol=eps) N = L.shape[-1] basis = po.Poly(basis) Li = rlstsq(L, P, alpha=1.e-300).T E_ = -po.sum(E(basis, dist, **kws) * Li.T, -1) coefs = np.zeros((N + 1, N + 1)) coefs[1:, 1:] = Li coefs[0, 0] = 1 coefs[0, 1:] = E_ out = {} out[(0, ) * dim] = coefs[0] for i in xrange(N): I = basis[i].keys[0] out[I] = coefs[i + 1] P = po.Poly(out, dim, coefs.shape[1:], float) if normed: norm = np.sqrt(Var(P, dist, **kws)) norm[0] = 1 P = P / norm return P
def fit_adaptive(func, poly, dist, abserr=1.e-8, relerr=1.e-8, budget=0, norm=0, bufname="", retall=False): """Adaptive estimation of Fourier coefficients. Parameters ---------- func : callable Should take a single argument `q` which is 1D array `len(q)=len(dist)`. Must return something compatible with np.ndarray. poly : Poly Polynomial vector for which to create Fourier coefficients for. dist : Dist A distribution to optimize the Fourier coefficients to. abserr : float Absolute error tolerance. relerr : float Relative error tolerance. budget : int Soft maximum number of function evaluations. 0 means unlimited. norm : int Specifies the norm that is used to measure the error and determine convergence properties (irrelevant for single-valued functions). The `norm` argument takes one of the values: 0 : L0-norm 1 : L0-norm on top of paired the L2-norm. Good for complex numbers where each conseqtive pair of the solution is real and imaginery. 2 : L2-norm 3 : L1-norm 4 : L_infinity-norm bufname : str, optional Buffer evaluations to file such that the fit_adaptive can be run again without redooing all evaluations. retall : bool If true, returns extra values. Returns ------- estimate[, coeffs, norms, coeff_error, norm_error] estimate : Poly The polynomial chaos expansion representation of func. coeffs : np.ndarray The Fourier coefficients. norms : np.ndarray The norm of the orthogonal polynomial squared. coeff_error : np.ndarray Estimated integration error of the coeffs. norm_error : np.ndarray Estimated integration error of the norms. Examples -------- >>> func = lambda q: q[0]*q[1] >>> poly = cp.basis(0,2,2) >>> dist = cp.J(cp.Uniform(0,1), cp.Uniform(0,1)) >>> res = cp.fit_adaptive(func, poly, dist, budget=100) >>> print res """ if bufname: func = lazy_eval(func, load=bufname) dim = len(dist) n = [0,0] dummy_x = dist.inv(.5*np.ones(dim, dtype=np.float64)) val = np.array(func(dummy_x), np.float64) xmin = np.zeros(dim, np.float64) xmax = np.ones(dim, np.float64) def f1(u, ns, *args): qs = dist.inv(u.reshape(ns, dim)) out = (poly(*qs.T)**2).T.flatten() return out dim1 = len(poly) val1 = np.empty(dim1, dtype=np.float64) err1 = np.empty(dim1, dtype=np.float64) _cubature(f1, dim1, xmin, xmax, (), "h", abserr, relerr, norm, budget, True, val1, err1) val1 = np.tile(val1, val.size) dim2 = np.prod(val.shape)*dim1 val2 = np.empty(dim2, dtype=np.float64) err2 = np.empty(dim2, dtype=np.float64) def f2(u, ns, *args): n[0] += ns n[1] += 1 qs = dist.inv(u.reshape(ns, dim)) Y = np.array([func(q) for q in qs]) Q = poly(*qs.T) out = np.array([Y.T*q1 for q1 in Q]).T.flatten() out = out/np.tile(val1, ns) return out try: _ = _cubature except: raise NotImplementedError( "cubature not install properly") _cubature(f2, dim2, xmin, xmax, (), "h", abserr, relerr, norm, budget, True, val2, err2) shape = (dim1,)+val.shape val2 = val2.reshape(shape[::-1]).T out = po.transpose(po.sum(poly*val2.T, -1)) if retall: return out, val2, val1, err2, err1 return val2
def fit_regression(P, x, u, rule="LS", retall=False, **kws): """ Fit a polynomial chaos expansion using linear regression. Parameters ---------- P : Poly Polynomial chaos expansion with `P.shape=(M,)` and `P.dim=D`. x : array_like Collocation nodes with `x.shape=(D,K)`. u : array_like Model evaluations with `len(u)=K`. retall : bool If True return uhat in addition to R rule : str Regression method used. The follwong methods uses scikits-learn as backend. See `sklearn.linear_model` for more details. Key Scikit-learn Description --- ------------ ----------- Parameters Description ---------- ----------- "BARD" ARDRegression Bayesian ARD Regression n_iter=300 Maximum iterations tol=1e-3 Optimization tolerance alpha_1=1e-6 Gamma scale parameter alpha_2=1e-6 Gamma inverse scale parameter lambda_1=1e-6 Gamma shape parameter lambda_2=1e-6 Gamma inverse scale parameter threshold_lambda=1e-4 Upper pruning threshold "BR" BayesianRidge Bayesian Ridge Regression n_iter=300 Maximum iterations tol=1e-3 Optimization tolerance alpha_1=1e-6 Gamma scale parameter alpha_2=1e-6 Gamma inverse scale parameter lambda_1=1e-6 Gamma shape parameter lambda_2=1e-6 Gamma inverse scale parameter "EN" ElastiNet Elastic Net alpha=1.0 Dampening parameter rho Mixing parameter in [0,1] max_iter=300 Maximum iterations tol Optimization tolerance "ENC" ElasticNetCV EN w/Cross Validation rho Dampening parameter(s) eps=1e-3 min(alpha)/max(alpha) n_alphas Number of alphas alphas List of alphas max_iter Maximum iterations tol Optimization tolerance cv=3 Cross validation folds "LA" Lars Least Angle Regression n_nonzero_coefs Number of non-zero coefficients eps Cholesky regularization "LAC" LarsCV LAR w/Cross Validation max_iter Maximum iterations cv=5 Cross validation folds max_n_alphas Max points for residuals in cv "LAS" Lasso Least Absolute Shrinkage and Selection Operator alpha=1.0 Dampening parameter max_iter Maximum iterations tol Optimization tolerance "LASC" LassoCV LAS w/Cross Validation eps=1e-3 min(alpha)/max(alpha) n_alphas Number of alphas alphas List of alphas max_iter Maximum iterations tol Optimization tolerance cv=3 Cross validation folds "LL" LassoLars Lasso and Lars model max_iter Maximum iterations eps Cholesky regularization "LLC" LassoLarsCV LL w/Cross Validation max_iter Maximum iterations cv=5 Cross validation folds max_n_alphas Max points for residuals in cv eps Cholesky regularization "LLIC" LassoLarsIC LL w/AIC or BIC criterion "AIC" or "BIC" criterion max_iter Maximum iterations eps Cholesky regularization "OMP" OrthogonalMatchingPursuit n_nonzero_coefs Number of non-zero coefficients tol Max residual norm (instead of non-zero coef) Local methods Key Description --- ----------- "LS" Ordenary Least Squares "T" Ridge Regression/Tikhonov Regularization order Order of regularization (or custom matrix) alpha Dampning parameter (else estimated from gcv) "TC" T w/Cross Validation order Order of regularization (or custom matrix) alpha Dampning parameter (else estimated from gcv) Returns ------- R[, uhat] R : Poly Fitted polynomial with `R.shape=u.shape[1:]` and `R.dim=D`. uhat : np.ndarray The Fourier coefficients in the estimation. Examples -------- >>> P = cp.Poly([1, x, y]) >>> x = [[-1,-1,1,1], [-1,1,-1,1]] >>> u = [0,1,1,2] >>> print fit_regression(P, x, u) 0.5q1+0.5q0+1.0 """ x = np.array(x) if len(x.shape)==1: x = x.reshape(1, *x.shape) u = np.array(u) Q = P(*x).T shape = u.shape[1:] u = u.reshape(u.shape[0], np.prod(u.shape[1:])) rule = rule.upper() # Local rules if rule=="LS": uhat = la.lstsq(Q, u)[0] elif rule=="T": uhat = rlstsq(Q, u, kws.get("order",0), kws.get("alpha", None), False) elif rule=="TC": uhat = rlstsq(Q, u, kws.get("order",0), kws.get("alpha", None), True) else: # Scikit-learn wrapper try: _ = lm except: raise NotImplementedError( "sklearn not installed") if rule=="BARD": solver = lm.ARDRegression(fit_intercept=False, copy_X=False, **kws) elif rule=="BR": kws["fit_intercept"] = kws.get("fit_intercept", False) solver = lm.BayesianRidge(**kws) elif rule=="EN": kws["fit_intercept"] = kws.get("fit_intercept", False) solver = lm.ElasticNet(**kws) elif rule=="ENC": kws["fit_intercept"] = kws.get("fit_intercept", False) solver = lm.ElasticNetCV(**kws) elif rule=="LA": kws["fit_intercept"] = kws.get("fit_intercept", False) solver = lm.Lars(**kws) elif rule=="LAC": kws["fit_intercept"] = kws.get("fit_intercept", False) solver = lm.LarsCV(**kws) elif rule=="LAS": kws["fit_intercept"] = kws.get("fit_intercept", False) solver = lm.Lasso(**kws) elif rule=="LASC": kws["fit_intercept"] = kws.get("fit_intercept", False) solver = lm.LassoCV(**kws) elif rule=="LL": kws["fit_intercept"] = kws.get("fit_intercept", False) solver = lm.LassoLars(**kws) elif rule=="LLC": kws["fit_intercept"] = kws.get("fit_intercept", False) solver = lm.LassoLarsCV(**kws) elif rule=="LLIC": kws["fit_intercept"] = kws.get("fit_intercept", False) solver = lm.LassoLarsIC(**kws) elif rule=="OMP": solver = lm.OrthogonalMatchingPursuit(**kws) uhat = solver.fit(Q, u).coef_ u = u.reshape(u.shape[0], *shape) R = po.sum((P*uhat.T), -1) R = po.reshape(R, shape) if retall==1: return R, uhat elif retall==2: return R, uhat, Q return R
def pcm_gq(func, order, dist_out, dist_in=None, acc=None, orth=None, retall=False, sparse=False): """ Probabilistic Collocation Method using optimal Gaussian quadrature Parameters ---------- Required arguments func : callable The model to be approximated. Must accept arguments on the form `func(z, *args, **kws)` where `z` is an 1-dimensional array with `len(z)==len(dist)`. order : int The order of the polynomial approximation dist_out : Dist Distributions for models parameter Optional arguments dist_in : Dist If included, space will be mapped using a Rosenblatt transformation from dist_out to dist_in before creating an expansin in terms of dist_in acc : float The order of the sample scheme used If omitted order+1 will be used orth : int, str, callable, Poly Orthogonal polynomial generation. int, str : orth will be passed to orth_select for selection of orthogonalization. See orth_select doc for more details. callable : the return of orth(M, dist) will be used. Poly : it will be used directly. All polynomials must be orthogonal for method to work properly. args : itterable Extra positional arguments passed to `func`. kws : dict Extra keyword arguments passed to `func`. retall : bool If True, return also number of evaluations sparse : bool If True, Smolyak sparsegrid will be used instead of full tensorgrid Returns ------- Q[, X] Q : Poly Polynomial estimate of a given a model. X : np.ndarray Values used in evaluation # Examples # -------- # Define function: # >>> func = lambda z: z[1]*z[0] # # Define distribution: # >>> dist = cp.J(cp.Normal(), cp.Normal()) # # Perform pcm: # >>> p, x, w, y = cp.pcm_gq(func, 2, dist, acc=3, retall=True) # >>> print cp.around(p, 10) # q0q1 # >>> print len(w) # 16 """ if acc is None: acc = order+1 if dist_in is None: z,w = qu.generate_quadrature(acc, dist_out, 100, sparse=sparse, rule="G") x = z dist = dist_out else: z,w = qu.generate_quadrature(acc, dist_in, 100, sparse=sparse, rule="G") x = dist_out.ppf(dist_in.cdf(z)) dist = dist_in y = np.array(map(func, x.T)) shape = y.shape y = y.reshape(w.size, y.size/w.size) if orth is None: if dist.dependent: orth = "chol" else: orth = "ttr" if isinstance(orth, (str, int, long)): orth = orth_select(orth) if not isinstance(orth, po.Poly): orth = orth(order, dist) ovals = orth(*z) vals1 = [(val*y.T*w).T for val in ovals] vals2 = [(val**2*w).T for val in ovals] coef = (np.sum(vals1, 1).T/np.sum(vals2, 1)).T coef = coef.reshape(len(coef), *shape[1:]) Q = po.transpose(po.sum(orth*coef.T, -1)) if retall: return Q, x, w, y return Q