def test_ratio(self): """ Evaluation of orthogonal polynomial ratios. """ alpha = -1. + 10*np.random.rand(1)[0] beta = -1. + 10*np.random.rand(1)[0] J = JacobiPolynomials(alpha=alpha,beta=beta) N = int(np.ceil(60*np.random.rand(1))) x = (1 + 5*np.random.rand(1)) * (1 + np.random.rand(50)) y = (1 + 5*np.random.rand(1)) * (-1 - np.random.rand(50)) x = np.concatenate([x, y]) P = J.eval(x, range(N+1)) rdirect = np.zeros([x.size, N+1]) rdirect[:,0] = P[:,0] rdirect[:,1:] = P[:,1:]/P[:,:-1] r = J.r_eval(x, range(N+1)) delta = 1e-6 errs = np.abs(r-rdirect) i,j = np.where(errs > delta)[:2] if i.size > 0: errstr = 'Failed for alpha={0:1.3f}, beta={1:1.3f}, n={2:d}, x={3:1.6f}'.format(alpha, beta, j[0], x[i[0]]) else: errstr = '' self.assertAlmostEqual(np.linalg.norm(errs, ord=np.inf), 0, delta=delta, msg=errstr)
def test_gq(self): """Gaussian quadrature integration accuracy""" alpha = -1. + 10*np.random.rand(1)[0] beta = -1. + 10*np.random.rand(1)[0] J = JacobiPolynomials(alpha=alpha,beta=beta) N = int(np.ceil(60*np.random.rand(1))) x,w = J.gauss_quadrature(N) w /= w.sum() # Force probability measure V = J.eval(x, range(2*N)) integrals = np.dot(w, V) integrals[0] -= V[0,0] # Exact value self.assertAlmostEqual(np.linalg.norm(integrals,ord=np.inf), 0.)
def test_idist_legendre(self): """Evaluation of Legendre induced distribution function.""" J = JacobiPolynomials(alpha=0., beta=0.) n = int(np.ceil(25*np.random.rand(1))[0]) M = 25 x = -1. + 2*np.random.rand(M) # JacobiPolynomials method F1 = J.idist(x, n) y, w = J.gauss_quadrature(n+1) # Exact: integrate density F2 = np.zeros(F1.shape) for xind, xval in enumerate(x): yquad = (y+1)/2.*(xval+1) - 1. F2[xind] = np.dot(w,J.eval(yquad, n)**2) * (xval+1)/2 self.assertAlmostEqual(np.linalg.norm(F1-F2,ord=np.inf), 0.)
def test_fidist_jacobi(self): """Fast induced sampling routine for Jacobi polynomials.""" alpha = np.random.random()*11 - 1. beta = np.random.random()*11 - 1. nmax = 4 M = 10 u = np.random.random(M) J = JacobiPolynomials(alpha=alpha,beta=beta) delta = 1e-3 for n in range(nmax)[::-1]: x0 = J.idistinv(u, n) x1 = J.fidistinv(u, n) ind = np.where(np.abs(x0-x1) > delta)[:2][0] if ind.size > 0: errstr = 'Failed for alpha={0:1.3f}, beta={1:1.3f}, n={2:d}, u={3:1.6f}'.format(alpha, beta, n, u[ind[0]]) else: errstr = '' self.assertAlmostEqual(np.linalg.norm(x0-x1,ord=np.inf), 0., delta=delta, msg=errstr) n = np.array(np.round(np.random.random(M)), dtype=int) x0 = J.idistinv(u, n) x1 = J.fidistinv(u, n) ind = np.where(np.abs(x0-x1) > delta)[:2][0] if ind.size > 0: errstr = 'Failed for alpha={0:1.3f}, beta={1:1.3f}, n={2:d}, u={3:1.6f}'.format(alpha, beta, n[ind[0]], u[ind[0]]) else: errstr = '' self.assertAlmostEqual(np.linalg.norm(x0-x1,ord=np.inf), 0., delta=delta, msg=errstr)
def __init__(self, alpha=None, beta=None, mean=None, stdev=None, dim=None, domain=None): # Convert mean/stdev inputs to alpha/beta if mean is not None and stdev is not None: if alpha is not None or beta is not None: raise ValueError('Cannot simultaneously specify alpha/beta parameters and mean/stdev parameters') alpha, beta = self._convert_meanstdev_to_alphabeta(mean, stdev) alpha, beta = self._convert_alphabeta_to_iterable(alpha, beta) # Sets self.dim, self.alpha, self.beta, self.domain self._detect_dimension(alpha, beta, dim, domain) for qd in range(self.dim): assert self.alpha[qd]>0 and self.beta[qd]>0, "Parameter vectors alpha and beta must have strictly positive components" assert self.dim > 0, "Dimension must be positive" assert self.domain.shape == (2, self.dim) ## Construct affine map transformations # Standard domain is [0,1]^dim self.standard_domain = np.ones([2, self.dim]) self.standard_domain[0,:] = 0. # Low-level routines use Jacobi Polynomials, which operate on [-1,1] # instead of the standard Beta domain of [0,1] self.jacobi_domain = np.ones([2, self.dim]) self.jacobi_domain[0,:] = -1. self.transform_standard_dist_to_poly = AffineTransform(domain=self.standard_domain, image=self.jacobi_domain) self.transform_to_standard = AffineTransform(domain=self.domain, image=self.standard_domain) ## Construct 1D polynomial families Js = [] for qd in range(self.dim): Js.append(JacobiPolynomials(alpha=self.beta[qd]-1., beta=self.alpha[qd]-1.)) self.polys = TensorialPolynomials(polys1d=Js) self.indices = None
Returns ------ N samples iid from a discrete probability measure """ p = probs[:] / sum(probs[:]) j = np.digitize(np.random.random(N), np.cumsum(p), right=False) assert probs.size == states.size x = states[j] return x if __name__ == "__main__": from families import JacobiPolynomials alph = -0.8 bet = np.sqrt(101) states = np.linspace(-1, 1, 10) n = 4 J = JacobiPolynomials(alpha=alph, beta=bet) probs = J.idist(states, n, M=10) N = 5 print(discrete_sampling(N, probs, states))
def christoffel_function(self, x, k): """ Computes the normalized (inverse) Christoffel function lambda, defined as lambda**2 = k / sum(p**2, axi=1), where p is a matrix containing evaluations of an orthonormal polynomial family up to degree k-1, defined by the recurrence coefficients ab. """ assert k > 0 p = self.eval(x, range(k)) return np.sqrt(float(k) / np.sum(p**2, axis=1)) if __name__ == "__main__": from matplotlib import pyplot as plt from families import JacobiPolynomials J = JacobiPolynomials() N = 100 k = 15 x, w = J.gauss_quadrature(N) V = J.eval(x, range(k)) plt.plot(x, V[:, :k]) plt.show()
if __name__ == "__main__": from matplotlib import pyplot as plt from families import JacobiPolynomials, HermitePolynomials import variableTransformations as VT from scipy.stats import multivariate_normal from math import pi def fun(x): return 2 * x**2 # return np.ones(len(x)) ''' Example 1: $\int_{-1}^1 x^2 dx$ Use Legendre polynomials, w(x)=1, x in [-1,1], 0 otherwise ''' J = JacobiPolynomials(alpha=0, beta=0) N = 30 k = 30 x, w = J.gauss_quadrature(N) print(np.dot(fun(x), w) * 2) # >> outputs 1/3 but the solution is 2/3 ''' Example 2: $\int_{-inf}^inf e^{-x^2} x^2 dx$ Use Hermite polynomials, w(x)=e^{-x^2} ''' H = HermitePolynomials(rho=0) N = 10 k = 10 x, w = H.gauss_quadrature(N) print(np.dot(fun(x), w) * np.sqrt(pi))
if __name__ == "__main__": import matplotlib.pyplot as plt from matplotlib import cm from mpl_toolkits.mplot3d import Axes3D from families import JacobiPolynomials import opoly1d, indexing import pdb d = 4 k = 3 J = JacobiPolynomials() P = TensorialPolynomials(J, d) #ab = opoly1d.jacobi_recurrence(k+1, alpha=0, beta=0) lambdas = indexing.total_degree_indices(d, k) if d == 2: N = 50 x = np.linspace(-1, 1, N) X, Y = np.meshgrid(x, x) XX = np.concatenate((X.reshape(X.size, 1), Y.reshape(Y.size, 1)), axis=1) p = P.eval(XX, lambdas)
def v(n, gamma, c): P = JacobiPolynomials(alpha=gamma, beta=gamma) return lambda x: sum(c[i] * P.eval(x, i, d=0) for i in range(len(c)))
def create_K_function(n, gamma): P = JacobiPolynomials(alpha=gamma, beta=gamma) return lambda x: np.sqrt(np.sum(P.eval(x, range(n + 1), d=0)**2, axis=1))