def method3(reps, A, b, x): LU, p = dgesv.lup_packed(A) mincols, maxcols = dgesv.find_bands(LU, tol=1e-15) for j in range(reps): # dgesv.solve_decomposed_banded( LU, p, mincols, maxcols, b[j,:], x ) dgesv.solve_decomposed_banded(LU, p, mincols, maxcols, b, x)
def main(): stuff = RandomPileOfTestStuff(q=100) # From the API docs for numpy.polynomial.legendre.leggauss: # Computes the sample points and weights for Gauss-Legendre quadrature. # These sample points and weights will correctly integrate polynomials of degree 2*deg - 1 or less over the interval [-1, 1] with the weight function f(x) = 1. # # Hence, in Galerkin methods, to exactly handle a mass matrix where neither of the terms is differentiated, using affine mapping to the reference element [-1,1] # (implying piecewise constant Jacobian), we need to have # # 2*deg - 1 = 2*d # # i.e. # # deg = (2*d + 1) / 2 # # where d is the degree of the highest-degree polynomial present in the Galerkin basis, and deg is the order of the Gauss-Legendre rule. # Obviously, since only integer deg are available, we must round up (if rounded down, 2*deg-1 is less than the smallest needed, 2*d). # Thus the actual practical result is # # deg = ceil( (2*d + 1) / 2 ) = d + ceil( 1/2 ) = d+1 # # (Observe that a rule of this order can do one degree more than the matrix M (integrand N*N) needs. With this, we could exactly integrate x*N*N, if needed.) # # # For the purposes of solving the first-order problem u' = f(u, t) by dG, the matrix is not our M, but instead our C (N'*N = degree d-1 plus degree d), so # # 2*deg - 1 = 2*d - 1 # # i.e. # # deg = d # # Thus, we can solve this problem with a Gauss-Legendre rule of one order lower than in the case where the matrix M is needed. # # # deg = d+1 # q,w = np.polynomial.legendre.leggauss( deg ) # print( deg,(2*deg-1),q,w ) # matrix, name, figure number to plot, bugcheck (max(abs(bugcheck)) should evaluate to 0 for mat[2:,2:]) data = ((stuff.K, "K", 2, lambda v: v - np.transpose(v)), (stuff.C, "C", 3, lambda v: v + np.transpose(v)), (stuff.M, "M", 4, lambda v: v - np.transpose(v))) for mat, name, figno, bugcheck in data: print(mat) plt.figure(figno) plt.subplot(1, 2, 1) plt.spy( mat ) # spy() doesn't work for a full matrix without any zero entries! (try stuff.M with q=2) # plt.imshow(mat, interpolation="nearest", cmap="Oranges") # plt.colorbar() plt.plot([0, stuff.q], [0, stuff.q], 'r--') # mark diagonal plt.title(r"$\mathbf{%s}$" % name) if stuff.q >= 2: v = mat[2:, 2:] b = np.max(np.abs(bugcheck(v))) assert b == 0.0, "bugcheck fail for matrix %s; should be 0, got %g" % ( name, b) # LU decomposition (sort of) # plt.subplot(1, 2, 2) A = mat.copy() A[0, 0] += 1.0 # K and C are rank-deficient by one; simulate effect of boundary conditions (or dG jump term) LU, p = dgesv.lup_packed(A) plt.spy(LU) plt.plot([0, stuff.q], [0, stuff.q], 'r--') plt.title(r"$\mathbf{LU}$ (packed format)") ## L,U,p = dgesv.lup(stuff.M) ## print( np.transpose(np.nonzero(L)) ) ## print( np.transpose(np.nonzero(U)) ) ## print( p ) ## plt.figure(3) ## plt.subplot(1,2, 1) ## plt.spy(L) ## plt.plot( [0,stuff.q-1], [0,stuff.q-1], 'r--' ) ### plt.imshow(L, interpolation="nearest", cmap="Oranges") ### plt.colorbar(orientation="horizontal") ## plt.title(r"$\mathbf{L}$") ## plt.subplot(1,2, 2) ## plt.spy(U) ## plt.plot( [0,stuff.q-1], [0,stuff.q-1], 'r--' ) ### plt.imshow(U, interpolation="nearest", cmap="Oranges") ### plt.colorbar(orientation="horizontal") ## plt.title(r"$\mathbf{U}$") ## LU,p = dgesv.lup_packed(stuff.M) ## plt.figure(4) ## plt.spy(LU) ## plt.plot( [0,stuff.q-1], [0,stuff.q-1], 'r--' ) ## plt.title(r"$\mathbf{LU}$ (packed format)") ## mincols,maxcols = dgesv.find_bands(LU, 1e-15) ## print( mincols, maxcols ) ## # old Python-based mincols, maxcols finding code ## ## # Find the smallest column index with nonzero data on each row in L. ## # ## # We can use this to "sparsify" the backsolve even though the data structure is dense. ## # ## # This assumes that each row has at least one nonzero entry (which is always the case for an invertible matrix). ## # ## Lnz = np.nonzero(L) ## mincols = [] ## rowprev = -1 ## n = len(Lnz[0]) ## i = 0 ## while i < n: ## if Lnz[0][i] != rowprev: ## mincols.append(Lnz[1][i]) ## rowprev = Lnz[0][i] ## i += 1 ## mincols = np.array( mincols, dtype=np.intc, order="C" ) ## print( L ) ## print( mincols ) ## # Find the largest column index with nonzero data on each row in U. ## # ## # We can use this to "sparsify" the backsolve even though the data structure is dense. ## # ## # This assumes that each row has at least one nonzero entry (which is always the case for an invertible matrix). ## # ## Unz = np.nonzero(U) ## maxcols = [] ## rowprev = -1 ## n = len(Unz[0]) ## i = n - 1 ## while i >= 0: ## if Unz[0][i] != rowprev: ## maxcols.append(Unz[1][i]) ## rowprev = Unz[0][i] ## i -= 1 ## maxcols.reverse() ## maxcols = np.array( maxcols, dtype=np.intc, order="C" ) ## print( U ) ## print( maxcols ) # Visualize # xx = np.linspace(-1., 1., 101) plt.figure(1) plt.clf() for func in stuff.N: plt.plot(xx, func(xx)) plt.axis('tight') a = plt.axis() plt.axis([a[0], a[1], a[2] * 1.05, a[3] * 1.05]) plt.grid(b=True, which='both') plt.title('Hierarchical basis functions')
def method2(reps, A, b, x): LU, p = dgesv.lup_packed(A) for j in range(reps): # dgesv.solve_decomposed( LU, p, b[j,:], x ) dgesv.solve_decomposed(LU, p, b, x)
def main(): # Up to q=24, the full script works despite warnings from quad() in dgmass(). # # For evaluating the hierarchical basis functions only (no dgmass()): # # q = 30, still sort of works, small deviations (1e-7) can be seen in the endpoint values of the few highest-order Nj # q = 40, almost works, high-order Nj start getting wobbly # q = 50, completely broken, out of precision # # By comparison, legtest3.py, which uses SymPy's mpmath (arbitrary precision floating point), works at least up to q=300, but is very slow. # stuff = RandomPileOfTestStuff(q=24, tol=1e-3) # From the API docs for numpy.polynomial.legendre.leggauss: # Computes the sample points and weights for Gauss-Legendre quadrature. # These sample points and weights will correctly integrate polynomials of degree 2*deg - 1 or less over the interval [-1, 1] with the weight function f(x) = 1. # # Hence, in Galerkin methods, to exactly handle a mass matrix where neither of the terms is differentiated, using affine mapping to the reference element [-1,1] # (implying piecewise constant Jacobian), we need to have # # 2*deg - 1 = 2*d # # i.e. # # deg = (2*d + 1) / 2 # # deg = int(np.ceil( (2*d + 1)/2. )) # q,w = np.polynomial.legendre.leggauss( deg ) # print( deg,(2*deg-1),q,w ) print(stuff.C) print(np.linalg.matrix_rank(stuff.C)) # should be full rank plt.figure(2) plt.spy(stuff.C) plt.plot([0, stuff.q - 1], [0, stuff.q - 1], 'r--') # plt.imshow(M, interpolation="nearest", cmap="Oranges") # plt.colorbar() plt.title(r"$\mathbf{M}$") ## L,U,p = dgesv.lup(stuff.C) ## print( np.transpose(np.nonzero(L)) ) ## print( np.transpose(np.nonzero(U)) ) ## print( p ) ## plt.figure(3) ## plt.subplot(1,2, 1) ## plt.spy(L) ## plt.plot( [0,stuff.q-1], [0,stuff.q-1], 'r--' ) ### plt.imshow(L, interpolation="nearest", cmap="Oranges") ### plt.colorbar(orientation="horizontal") ## plt.title(r"$\mathbf{L}$") ## plt.subplot(1,2, 2) ## plt.spy(U) ## plt.plot( [0,stuff.q-1], [0,stuff.q-1], 'r--' ) ### plt.imshow(U, interpolation="nearest", cmap="Oranges") ### plt.colorbar(orientation="horizontal") ## plt.title(r"$\mathbf{U}$") LU, p = dgesv.lup_packed(stuff.C) plt.figure(4) plt.spy(LU) plt.plot([0, stuff.q - 1], [0, stuff.q - 1], 'r--') plt.title(r"$\mathbf{LU}$ (packed format)") mincols, maxcols = dgesv.find_bands(LU, 1e-15) print(mincols, maxcols) ## # old Python-based mincols, maxcols finding code ## ## # Find the smallest column index with nonzero data on each row in L. ## # ## # We can use this to "sparsify" the backsolve even though the data structure is dense. ## # ## # This assumes that each row has at least one nonzero entry (which is always the case for an invertible matrix). ## # ## Lnz = np.nonzero(L) ## mincols = [] ## rowprev = -1 ## n = len(Lnz[0]) ## i = 0 ## while i < n: ## if Lnz[0][i] != rowprev: ## mincols.append(Lnz[1][i]) ## rowprev = Lnz[0][i] ## i += 1 ## mincols = np.array( mincols, dtype=np.intc, order="C" ) ## print( L ) ## print( mincols ) ## # Find the largest column index with nonzero data on each row in U. ## # ## # We can use this to "sparsify" the backsolve even though the data structure is dense. ## # ## # This assumes that each row has at least one nonzero entry (which is always the case for an invertible matrix). ## # ## Unz = np.nonzero(U) ## maxcols = [] ## rowprev = -1 ## n = len(Unz[0]) ## i = n - 1 ## while i >= 0: ## if Unz[0][i] != rowprev: ## maxcols.append(Unz[1][i]) ## rowprev = Unz[0][i] ## i -= 1 ## maxcols.reverse() ## maxcols = np.array( maxcols, dtype=np.intc, order="C" ) ## print( U ) ## print( maxcols ) # Visualize # xx = np.linspace( -1., 1., 100001) # the good thing about the fast approach... smooth curves! plt.figure(1) plt.clf() for func in stuff.N: plt.plot(xx, func(xx)) plt.axis('tight') a = plt.axis() plt.axis([a[0], a[1], a[2] * 1.05, a[3] * 1.05]) plt.grid(b=True, which='both') plt.title('Hierarchical basis functions') # Try some operations on the original Legendre polynomials # # As long as we keep the Polynomial objects, we can multiply them the intuitive way, producing a new Polynomial: # print(stuff.P[2] * stuff.P[3]) # => poly([ 0. 0.75 0. -3.5 0. 3.75]) # We can also differentiate them, which is useful for constructing the mass matrix: # print(stuff.P[2].deriv(1) * stuff.P[3]) # => poly([ 0. 0. -9. 0. 15.]) # Also integration is supported. # # p.integ() returns the definite integral, as a Polynomial object, from lbnd to an unspecified upper limit x, adding the integration constant k. # The value of x is chosen when calling the resulting object. # # Legendre polynomials are L2-orthogonal on [-1,1]: print(((stuff.P[2] * stuff.P[2]).integ( lbnd=-1, k=0))(1.0)) # 2/(2 n + 1); here n = 2, so this = 2/5 = 0.4 print(((stuff.P[2] * stuff.P[3]).integ(lbnd=-1, k=0))(1.0)) # zero # The integral of dPn/dx * Pm over the interval is zero if: # # - n + m is even # - n < m (and by the previous condition, also n <= m) # # These observations are based on the L2-orthogonality and the relation # # (2 n + 1) P_n = (d/dx)( P_{n+1} - P_{n-1} ) (*) # # which can be used to get rid of the derivative. The relation (*) follows from Bonnet’s recursion formula, # # (n + 1) P_{n+1} = (2 n + 1) P_n - n P_{n-1} # # By recursive application, (*) leads to the representation # # (d/dx) P_{n+1} = (2 n + 1) P_n + ( 2 (n - 2) + 1 ) P_{n-2} + ( 2 (n - 4) + 1 ) P_{n-4} + ... # # which is guaranteed to bottom out at P_1 and P_0 (by using P_0 = 1 and P_1 = x in (*)). # # See # https://en.wikipedia.org/wiki/Legendre_polynomials#Additional_properties_of_Legendre_polynomials # print(((stuff.P[3].deriv(1) * stuff.P[3]).integ( lbnd=-1, k=0))(1.0)) # zero, n + m even print(((stuff.P[3].deriv(1) * stuff.P[1]).integ( lbnd=-1, k=0))(1.0)) # zero, n + m even print(((stuff.P[2].deriv(1) * stuff.P[3]).integ(lbnd=-1, k=0))(1.0)) # zero, n < m print(((stuff.P[3].deriv(1) * stuff.P[2]).integ( lbnd=-1, k=0))(1.0)) # nonzero (derivative of p3 contains p2, p0)