def test_refine(): assert comm.Get_size() < 7 N = (8, 9, 10) F0 = Basis(8, 'F', dtype='D') F1 = Basis(9, 'F', dtype='D') F2 = Basis(10, 'F', dtype='d') T = TensorProductSpace(comm, (F0, F1, F2), slab=True, collapse_fourier=True) u_hat = Function(T) u = Array(T) u[:] = np.random.random(u.shape) u_hat = u.forward(u_hat) Tp = T.get_dealiased(padding_factor=(2, 2, 2)) u_ = Array(Tp) up_hat = Function(Tp) assert up_hat.commsizes == u_hat.commsizes u2 = u_hat.refine(2*np.array(N)) V = VectorTensorProductSpace(T) u_hat = Function(V) u = Array(V) u[:] = np.random.random(u.shape) u_hat = u.forward(u_hat) Vp = V.get_dealiased(padding_factor=(2, 2, 2)) u_ = Array(Vp) up_hat = Function(Vp) assert up_hat.commsizes == u_hat.commsizes u3 = u_hat.refine(2*np.array(N))
def test_assign(fam): x, y = symbols("x,y") for bc in (None, 'Dirichlet', 'Biharmonic'): dtype = 'D' if fam == 'F' else 'd' bc = 'periodic' if fam == 'F' else bc if bc == 'Biharmonic' and fam in ('La', 'H'): continue tol = 1e-12 if fam in ('C', 'L', 'F') else 1e-5 N = (10, 12) B0 = Basis(N[0], fam, dtype=dtype, bc=bc) B1 = Basis(N[1], fam, dtype=dtype, bc=bc) u_hat = Function(B0) u_hat[1:4] = 1 ub_hat = Function(B1) u_hat.assign(ub_hat) assert abs(inner(1, u_hat)-inner(1, ub_hat)) < tol T = TensorProductSpace(comm, (B0, B1)) u_hat = Function(T) u_hat[1:4, 1:4] = 1 Tp = T.get_refined((2*N[0], 2*N[1])) ub_hat = Function(Tp) u_hat.assign(ub_hat) assert abs(inner(1, u_hat)-inner(1, ub_hat)) < tol VT = VectorTensorProductSpace(T) u_hat = Function(VT) u_hat[:, 1:4, 1:4] = 1 Tp = T.get_refined((2*N[0], 2*N[1])) VTp = VectorTensorProductSpace(Tp) ub_hat = Function(VTp) u_hat.assign(ub_hat) assert abs(inner((1, 1), u_hat)-inner((1, 1), ub_hat)) < tol
def test_curl2(): # Test projection of curl K0 = Basis(N[0], 'C', bc=(0, 0)) K1 = Basis(N[1], 'F', dtype='D') K2 = Basis(N[2], 'F', dtype='d') K3 = Basis(N[0], 'C') T = TensorProductSpace(comm, (K0, K1, K2)) TT = TensorProductSpace(comm, (K3, K1, K2)) X = T.local_mesh(True) K = T.local_wavenumbers(False) Tk = VectorTensorProductSpace(T) TTk = MixedTensorProductSpace([T, T, TT]) U = Array(Tk) U_hat = Function(Tk) curl_hat = Function(TTk) curl_ = Array(TTk) # Initialize a Taylor Green vortex U[0] = np.sin(X[0]) * np.cos(X[1]) * np.cos(X[2]) * (1 - X[0]**2) U[1] = -np.cos(X[0]) * np.sin(X[1]) * np.cos(X[2]) * (1 - X[0]**2) U[2] = 0 U_hat = Tk.forward(U, U_hat) Uc = U_hat.copy() U = Tk.backward(U_hat, U) U_hat = Tk.forward(U, U_hat) assert allclose(U_hat, Uc) # Compute curl first by computing each term individually curl_hat[0] = 1j * (K[1] * U_hat[2] - K[2] * U_hat[1]) curl_[0] = T.backward( curl_hat[0], curl_[0]) # No x-derivatives, still in Dirichlet space dwdx_hat = project(Dx(U_hat[2], 0, 1), TT) # Need to use space without bc dvdx_hat = project(Dx(U_hat[1], 0, 1), TT) # Need to use space without bc dwdx = Array(TT) dvdx = Array(TT) dwdx = TT.backward(dwdx_hat, dwdx) dvdx = TT.backward(dvdx_hat, dvdx) curl_hat[1] = 1j * K[2] * U_hat[0] curl_hat[2] = -1j * K[1] * U_hat[0] curl_[1] = T.backward(curl_hat[1], curl_[1]) curl_[2] = T.backward(curl_hat[2], curl_[2]) curl_[1] -= dwdx curl_[2] += dvdx # Now do it with project w_hat = project(curl(U_hat), TTk) w = Array(TTk) w = TTk.backward(w_hat, w) assert allclose(w, curl_)
def get_context(): """Set up context for classical (NS) solver""" V0 = C2CBasis(params.N[0], domain=(0, params.L[0])) V1 = C2CBasis(params.N[1], domain=(0, params.L[1])) V2 = R2CBasis(params.N[2], domain=(0, params.L[2])) T = TensorProductSpace(comm, (V0, V1, V2), **{'threads': params.threads}) VT = VectorTensorProductSpace([T]*3) kw = {'padding_factor': 1.5 if params.dealias == '3/2-rule' else 1, 'dealias_direct': params.dealias == '2/3-rule'} V0p = C2CBasis(params.N[0], domain=(0, params.L[0]), **kw) V1p = C2CBasis(params.N[1], domain=(0, params.L[1]), **kw) V2p = R2CBasis(params.N[2], domain=(0, params.L[2]), **kw) Tp = TensorProductSpace(comm, (V0p, V1p, V2p), **{'threads': params.threads}) VTp = VectorTensorProductSpace([Tp]*3) float, complex, mpitype = datatypes(params.precision) FFT = T # For compatibility - to be removed # Mesh variables X = T.local_mesh(True) K = T.local_wavenumbers(scaled=True) K2 = K[0]*K[0] + K[1]*K[1] + K[2]*K[2] # Set Nyquist frequency to zero on K that is, from now on, used for odd derivatives Kx = T.local_wavenumbers(scaled=True, eliminate_highest_freq=True) K_over_K2 = np.zeros((3,)+VT.local_shape()) for i in range(3): K_over_K2[i] = K[i] / np.where(K2==0, 1, K2) # Velocity and pressure U = Array(VT, False) U_hat = Array(VT) P = Array(T, False) P_hat = Array(T) # Primary variable u = U_hat # RHS array dU = Array(VT) curl = Array(VT, False) Source = Array(VT) # Possible source term initialized to zero work = work_arrays() hdf5file = NSWriter({"U":U[0], "V":U[1], "W":U[2], "P":P}, chkpoint={"current":{"U":U, "P":P}, "previous":{}}, filename=params.h5filename+".h5") return config.AttributeDict(locals())
def test_curl_cc(): theta, phi = sp.symbols('x,y', real=True, positive=True) psi = (theta, phi) r = 1 rv = (r * sp.sin(theta) * sp.cos(phi), r * sp.sin(theta) * sp.sin(phi), r * sp.cos(theta)) # Manufactured solution sph = sp.functions.special.spherical_harmonics.Ynm ue = sph(6, 3, theta, phi) N, M = 16, 12 L0 = FunctionSpace(N, 'C', domain=(0, np.pi)) F1 = FunctionSpace(M, 'F', dtype='D') T = TensorProductSpace(comm, (L0, F1), coordinates=(psi, rv)) u_hat = Function(T, buffer=ue) du = curl(grad(u_hat)) du.terms() == [[]] r, theta, z = psi = sp.symbols('x,y,z', real=True, positive=True) rv = (r * sp.cos(theta), r * sp.sin(theta), z) # Manufactured solution ue = (r * (1 - r) * sp.cos(4 * theta) - 1 * (r - 1)) * sp.cos(4 * z) N = 12 F0 = FunctionSpace(N, 'F', dtype='D') F1 = FunctionSpace(N, 'F', dtype='d') L = FunctionSpace(N, 'L', bc='Dirichlet', domain=(0, 1)) T = TensorProductSpace(comm, (L, F0, F1), coordinates=(psi, rv)) T1 = T.get_orthogonal() V = VectorTensorProductSpace(T1) u_hat = Function(T, buffer=ue) du = project(curl(grad(u_hat)), V) assert np.linalg.norm(du) < 1e-10
def test_mixed_3D(backend, forward_output, as_scalar): if (backend == 'netcdf4' and forward_output is True) or skip[backend]: return K0 = FunctionSpace(N[0], 'F', dtype='D', domain=(0, np.pi)) K1 = FunctionSpace(N[1], 'F', dtype='d', domain=(0, 2 * np.pi)) K2 = FunctionSpace(N[2], 'C') T = TensorProductSpace(comm, (K0, K1, K2)) TT = VectorTensorProductSpace(T) filename = 'test3Dm_{}'.format(ex[forward_output]) hfile = writer(filename, TT, backend=backend) uf = Function(TT, val=2) if forward_output else Array(TT, val=2) uf[0] = 1 data = { 'ux': (uf[0], (uf[0], [slice(None), 4, slice(None)]), (uf[0], [slice(None), 4, 4])), 'uy': (uf[1], (uf[1], [slice(None), 4, slice(None)]), (uf[1], [slice(None), 4, 4])), 'u': [uf, (uf, [slice(None), 4, slice(None)])] } hfile.write(0, data, as_scalar=as_scalar) hfile.write(1, data, as_scalar=as_scalar) if not forward_output and backend == 'hdf5' and comm.Get_rank() == 0: generate_xdmf(filename + '.h5') if as_scalar is False: u0 = Function(TT) if forward_output else Array(TT) read = reader(filename, TT, backend=backend) read.read(u0, 'u', step=1) assert np.allclose(u0, uf) else: u0 = Function(T) if forward_output else Array(T) read = reader(filename, T, backend=backend) read.read(u0, 'u0', step=1) assert np.allclose(u0, uf[0])
def test_curl(typecode): K0 = Basis(N[0], 'F', dtype=typecode.upper()) K1 = Basis(N[1], 'F', dtype=typecode.upper()) K2 = Basis(N[2], 'F', dtype=typecode) T = TensorProductSpace(comm, (K0, K1, K2), dtype=typecode) X = T.local_mesh(True) K = T.local_wavenumbers() Tk = VectorTensorProductSpace(T) u = TrialFunction(Tk) v = TestFunction(Tk) U = Array(Tk) U_hat = Function(Tk) curl_hat = Function(Tk) curl_ = Array(Tk) # Initialize a Taylor Green vortex U[0] = np.sin(X[0]) * np.cos(X[1]) * np.cos(X[2]) U[1] = -np.cos(X[0]) * np.sin(X[1]) * np.cos(X[2]) U[2] = 0 U_hat = Tk.forward(U, U_hat) Uc = U_hat.copy() U = Tk.backward(U_hat, U) U_hat = Tk.forward(U, U_hat) assert allclose(U_hat, Uc) divu_hat = project(div(U_hat), T) divu = Array(T) divu = T.backward(divu_hat, divu) assert allclose(divu, 0) curl_hat[0] = 1j * (K[1] * U_hat[2] - K[2] * U_hat[1]) curl_hat[1] = 1j * (K[2] * U_hat[0] - K[0] * U_hat[2]) curl_hat[2] = 1j * (K[0] * U_hat[1] - K[1] * U_hat[0]) curl_ = Tk.backward(curl_hat, curl_) w_hat = Function(Tk) w_hat = inner(v, curl(U_hat), output_array=w_hat) A = inner(v, u) for i in range(3): w_hat[i] = A[i].solve(w_hat[i]) w = Array(Tk) w = Tk.backward(w_hat, w) #from IPython import embed; embed() assert allclose(w, curl_) u_hat = Function(Tk) u_hat = inner(v, U, output_array=u_hat) for i in range(3): u_hat[i] = A[i].solve(u_hat[i]) uu = Array(Tk) uu = Tk.backward(u_hat, uu) assert allclose(u_hat, U_hat)
def test_cylinder(): T = get_function_space('cylinder') u = TrialFunction(T) du = div(grad(u)) assert du.tolatex( ) == '\\frac{\\partial^2 u}{\\partial x^2 }+\\frac{1}{x}\\frac{\\partial u}{\\partial x }+\\frac{1}{x^{2}}\\frac{\\partial^2 u}{\\partial y^2 }+\\frac{\\partial^2 u}{\\partial z^2 }' V = VectorTensorProductSpace(T) u = TrialFunction(V) du = div(grad(u)) assert du.tolatex( ) == '\\left( \\frac{\\partial^2 u^{x}}{\\partial x^2 }+\\frac{1}{x}\\frac{\\partial u^{x}}{\\partial x }+\\frac{1}{x^{2}}\\frac{\\partial^2 u^{x}}{\\partial y^2 }- \\frac{2}{x}\\frac{\\partial u^{y}}{\\partial y }- \\frac{1}{x^{2}}u^{x}+\\frac{\\partial^2 u^{x}}{\\partial z^2 }\\right) \\mathbf{b}_{x} \\\\+\\left( \\frac{\\partial^2 u^{y}}{\\partial x^2 }+\\frac{3}{x}\\frac{\\partial u^{y}}{\\partial x }+\\frac{2}{x^{3}}\\frac{\\partial u^{x}}{\\partial y }+\\frac{1}{x^{2}}\\frac{\\partial^2 u^{y}}{\\partial y^2 }+\\frac{\\partial^2 u^{y}}{\\partial z^2 }\\right) \\mathbf{b}_{y} \\\\+\\left( \\frac{\\partial^2 u^{z}}{\\partial x^2 }+\\frac{1}{x}\\frac{\\partial u^{z}}{\\partial x }+\\frac{1}{x^{2}}\\frac{\\partial^2 u^{z}}{\\partial y^2 }+\\frac{\\partial^2 u^{z}}{\\partial z^2 }\\right) \\mathbf{b}_{z} \\\\'
def test_vector_laplace(space): """Test that div(grad(u)) = grad(div(u)) - curl(curl(u)) """ T = get_function_space(space) V = VectorTensorProductSpace(T) u = TrialFunction(V) v = _TestFunction(V) du = div(grad(u)) dv = grad(div(u)) - curl(curl(u)) u_hat = Function(V) u_hat[:] = np.random.random( u_hat.shape) + np.random.random(u_hat.shape) * 1j A0 = inner(v, du) A1 = inner(v, dv) a0 = BlockMatrix(A0) a1 = BlockMatrix(A1) b0 = Function(V) b1 = Function(V) b0 = a0.matvec(u_hat, b0) b1 = a1.matvec(u_hat, b1) assert np.linalg.norm(b0 - b1) < 1e-8
def get_context(): """Set up context for solver""" collapse_fourier = False if params.dealias == '3/2-rule' else True family = 'C' ST = Basis(params.N[0], family, bc=(0, 0), quad=params.Dquad) CT = Basis(params.N[0], family, quad=params.Dquad) CP = Basis(params.N[0], family, quad=params.Dquad) K0 = Basis(params.N[1], 'F', domain=(0, params.L[1]), dtype='D') K1 = Basis(params.N[2], 'F', domain=(0, params.L[2]), dtype='d') #CP.slice = lambda: slice(0, CP.N-2) constraints = ((3, 0, 0), (3, params.N[0]-1, 0)) kw0 = {'threads': params.threads, 'planner_effort': params.planner_effort["dct"], 'slab': (params.decomposition == 'slab'), 'collapse_fourier': collapse_fourier} FST = TensorProductSpace(comm, (ST, K0, K1), **kw0) # Dirichlet FCT = TensorProductSpace(comm, (CT, K0, K1), **kw0) # Regular Chebyshev N FCP = TensorProductSpace(comm, (CP, K0, K1), **kw0) # Regular Chebyshev N-2 VFS = VectorTensorProductSpace(FST) VCT = VectorTensorProductSpace(FCT) VQ = MixedTensorProductSpace([VFS, FCP]) mask = FST.mask_nyquist() if params.mask_nyquist else None # Padded kw = {'padding_factor': 1.5 if params.dealias == '3/2-rule' else 1, 'dealias_direct': params.dealias == '2/3-rule'} if params.dealias == '3/2-rule': # Requires new bases due to planning and transforms on different size arrays STp = Basis(params.N[0], family, bc=(0, 0), quad=params.Dquad) CTp = Basis(params.N[0], family, quad=params.Dquad) else: STp, CTp = ST, CT K0p = Basis(params.N[1], 'F', dtype='D', domain=(0, params.L[1]), **kw) K1p = Basis(params.N[2], 'F', dtype='d', domain=(0, params.L[2]), **kw) FSTp = TensorProductSpace(comm, (STp, K0p, K1p), **kw0) FCTp = TensorProductSpace(comm, (CTp, K0p, K1p), **kw0) VFSp = VectorTensorProductSpace(FSTp) VCp = MixedTensorProductSpace([FSTp, FCTp, FCTp]) float, complex, mpitype = datatypes("double") # Mesh variables X = FST.local_mesh(True) x0, x1, x2 = FST.mesh() K = FST.local_wavenumbers(scaled=True) # Solution variables UP_hat = Function(VQ) UP_hat0 = Function(VQ) U_hat, P_hat = UP_hat U_hat0, P_hat0 = UP_hat0 UP = Array(VQ) UP0 = Array(VQ) U, P = UP U0, P0 = UP0 # RK parameters a = (8./15., 5./12., 3./4.) b = (0.0, -17./60., -5./12.) # primary variable u = UP_hat H_hat = Function(VFS) dU = Function(VQ) hv = np.zeros((2,)+H_hat.shape, dtype=np.complex) Source = Array(VFS) # Note - not using VQ. Only used for constant pressure gradient Sk = Function(VFS) K2 = K[1]*K[1]+K[2]*K[2] # Set Nyquist frequency to zero on K that is used for odd derivatives in nonlinear terms Kx = FST.local_wavenumbers(scaled=True, eliminate_highest_freq=True) for i in range(3): K[i] = K[i].astype(float) Kx[i] = Kx[i].astype(float) work = work_arrays() u_dealias = Array(VFSp) curl_hat = Function(VCp) curl_dealias = Array(VCp) nu, dt, N = params.nu, params.dt, params.N up = TrialFunction(VQ) vq = TestFunction(VQ) ut, pt = up vt, qt = vq M = [] for rk in range(3): a0 = inner(vt, (2./nu/dt/(a[rk]+b[rk]))*ut-div(grad(ut))) a1 = inner(vt, (2./nu/(a[rk]+b[rk]))*grad(pt)) a2 = inner(qt, (2./nu/(a[rk]+b[rk]))*div(ut)) M.append(BlockMatrix(a0+a1+a2)) # Collect all matrices if ST.family() == 'chebyshev': mat = config.AttributeDict( dict(AB=[HelmholtzCoeff(N[0], 1., -(K2 - 2./nu/dt/(a[rk]+b[rk])), 0, ST.quad) for rk in range(3)],)) else: mat = config.AttributeDict( dict(ADD=inner_product((ST, 0), (ST, 2)), BDD=inner_product((ST, 0), (ST, 0))) ) la = None hdf5file = CoupledRK3File(config.params.solver, checkpoint={'space': VQ, 'data': {'0': {'UP': [UP_hat]}}}, results={'space': VFS, 'data': {'U': [U]}}) del rk return config.AttributeDict(locals())
def get_context(): """Set up context for classical (NS) solver""" float, complex, mpitype = datatypes(params.precision) collapse_fourier = False if params.dealias == '3/2-rule' else True dim = len(params.N) dtype = lambda d: float if d == dim - 1 else complex V = [ Basis(params.N[i], 'F', domain=(0, params.L[i]), dtype=dtype(i)) for i in range(dim) ] kw0 = { 'threads': params.threads, 'planner_effort': params.planner_effort['fft'] } T = TensorProductSpace(comm, V, dtype=float, slab=(params.decomposition == 'slab'), collapse_fourier=collapse_fourier, **kw0) VT = VectorTensorProductSpace(T) # Different bases for nonlinear term, either 2/3-rule or 3/2-rule kw = { 'padding_factor': 1.5 if params.dealias == '3/2-rule' else 1, 'dealias_direct': params.dealias == '2/3-rule' } Vp = [ Basis(params.N[i], 'F', domain=(0, params.L[i]), dtype=dtype(i), **kw) for i in range(dim) ] Tp = TensorProductSpace(comm, Vp, dtype=float, slab=(params.decomposition == 'slab'), collapse_fourier=collapse_fourier, **kw0) VTp = VectorTensorProductSpace(Tp) # Mesh variables X = T.local_mesh(True) K = T.local_wavenumbers(scaled=True) for i in range(dim): X[i] = X[i].astype(float) K[i] = K[i].astype(float) K2 = np.zeros(T.shape(True), dtype=float) for i in range(dim): K2 += K[i] * K[i] # Set Nyquist frequency to zero on K that is, from now on, used for odd derivatives Kx = T.local_wavenumbers(scaled=True, eliminate_highest_freq=True) for i in range(dim): Kx[i] = Kx[i].astype(float) K_over_K2 = np.zeros(VT.shape(True), dtype=float) for i in range(dim): K_over_K2[i] = K[i] / np.where(K2 == 0, 1, K2) # Velocity and pressure. Use ndarray view for efficiency U = Array(VT) U_hat = Function(VT) P = Array(T) P_hat = Function(T) u_dealias = Array(VTp) # Primary variable u = U_hat # RHS array dU = Function(VT) curl = Array(VT) Source = Function(VT) # Possible source term initialized to zero work = work_arrays() hdf5file = NSFile(config.params.solver, checkpoint={ 'space': VT, 'data': { '0': { 'U': [U_hat] } } }, results={ 'space': VT, 'data': { 'U': [U], 'P': [P] } }) return config.AttributeDict(locals())
def get_context(): """Set up context for solver""" # Get points and weights for Chebyshev weighted integrals ST = ShenDirichletBasis(params.N[0], quad=params.Dquad) SB = ShenBiharmonicBasis(params.N[0], quad=params.Bquad) CT = Basis(params.N[0], quad=params.Dquad) ST0 = ShenDirichletBasis(params.N[0], quad=params.Dquad, plan=True) # For 1D problem K0 = C2CBasis(params.N[1], domain=(0, params.L[1])) K1 = R2CBasis(params.N[2], domain=(0, params.L[2])) #CT = ST.CT # Chebyshev transform FST = TensorProductSpace(comm, (ST, K0, K1), **{ 'threads': params.threads, 'planner_effort': params.planner_effort["dct"] }) # Dirichlet FSB = TensorProductSpace(comm, (SB, K0, K1), **{ 'threads': params.threads, 'planner_effort': params.planner_effort["dct"] }) # Biharmonic FCT = TensorProductSpace(comm, (CT, K0, K1), **{ 'threads': params.threads, 'planner_effort': params.planner_effort["dct"] }) # Regular Chebyshev VFS = VectorTensorProductSpace([FSB, FST, FST]) # Padded STp = ShenDirichletBasis(params.N[0], quad=params.Dquad) SBp = ShenBiharmonicBasis(params.N[0], quad=params.Bquad) CTp = Basis(params.N[0], quad=params.Dquad) K0p = C2CBasis(params.N[1], padding_factor=1.5, domain=(0, params.L[1])) K1p = R2CBasis(params.N[2], padding_factor=1.5, domain=(0, params.L[2])) FSTp = TensorProductSpace( comm, (STp, K0p, K1p), **{ 'threads': params.threads, 'planner_effort': params.planner_effort["dct"] }) FSBp = TensorProductSpace( comm, (SBp, K0p, K1p), **{ 'threads': params.threads, 'planner_effort': params.planner_effort["dct"] }) FCTp = TensorProductSpace( comm, (CTp, K0p, K1p), **{ 'threads': params.threads, 'planner_effort': params.planner_effort["dct"] }) VFSp = VectorTensorProductSpace([FSBp, FSTp, FSTp]) VFSp = VFS FCTp = FCT FSTp = FST FSBp = FSB Nu = params.N[0] - 2 # Number of velocity modes in Shen basis Nb = params.N[0] - 4 # Number of velocity modes in Shen biharmonic basis u_slice = slice(0, Nu) v_slice = slice(0, Nb) float, complex, mpitype = datatypes("double") # Mesh variables X = FST.local_mesh(True) x0, x1, x2 = FST.mesh() K = FST.local_wavenumbers(scaled=True) # Solution variables U = Array(VFS, False) U0 = Array(VFS, False) U_hat = Array(VFS) U_hat0 = Array(VFS) g = Array(FST) # primary variable u = (U_hat, g) H_hat = Array(VFS) H_hat0 = Array(VFS) H_hat1 = Array(VFS) dU = Array(VFS) hv = Array(FST) hg = Array(FST) Source = Array(VFS, False) Sk = Array(VFS) K2 = K[1] * K[1] + K[2] * K[2] K_over_K2 = np.zeros((2, ) + g.shape) for i in range(2): K_over_K2[i] = K[i + 1] / np.where(K2 == 0, 1, K2) work = work_arrays() nu, dt, N = params.nu, params.dt, params.N K4 = K2**2 kx = K[0][:, 0, 0] alfa = K2[0] - 2.0 / nu / dt # Collect all matrices mat = config.AttributeDict( dict( CDD=inner_product((ST, 0), (ST, 1)), AB=HelmholtzCoeff(kx, -1.0, -alfa, ST.quad), AC=BiharmonicCoeff(kx, nu * dt / 2., (1. - nu * dt * K2[0]), -(K2[0] - nu * dt / 2. * K4[0]), quad=SB.quad), # Matrices for biharmonic equation CBD=inner_product((SB, 0), (ST, 1)), ABB=inner_product((SB, 0), (SB, 2)), BBB=inner_product((SB, 0), (SB, 0)), SBB=inner_product((SB, 0), (SB, 4)), # Matrices for Helmholtz equation ADD=inner_product((ST, 0), (ST, 2)), BDD=inner_product((ST, 0), (ST, 0)), BBD=inner_product((SB, 0), (ST, 0)), CDB=inner_product((ST, 0), (SB, 1)), ADD0=inner_product((ST0, 0), (ST0, 2)), BDD0=inner_product((ST0, 0), (ST0, 0)), )) # Collect all linear algebra solvers #la = config.AttributeDict(dict( #HelmholtzSolverG = Helmholtz(N[0], np.sqrt(K2[0]+2.0/nu/dt), ST), #BiharmonicSolverU = Biharmonic(N[0], -nu*dt/2., 1.+nu*dt*K2[0], #-(K2[0] + nu*dt/2.*K4[0]), quad=SB.quad, #solver="cython"), #HelmholtzSolverU0 = Helmholtz(N[0], np.sqrt(2./nu/dt), ST), #TDMASolverD = TDMA(inner_product((ST, 0), (ST, 0))) #) #) mat.ADD.axis = 0 mat.BDD.axis = 0 mat.SBB.axis = 0 la = config.AttributeDict( dict(HelmholtzSolverG=Helmholtz(mat.ADD, mat.BDD, -np.ones( (1, 1, 1)), (K2[0] + 2.0 / nu / dt)[np.newaxis, :, :]), BiharmonicSolverU=Biharmonic( mat.SBB, mat.ABB, mat.BBB, -nu * dt / 2. * np.ones( (1, 1, 1)), (1. + nu * dt * K2[0])[np.newaxis, :, :], (-(K2[0] + nu * dt / 2. * K4[0]))[np.newaxis, :, :]), HelmholtzSolverU0=old_Helmholtz(N[0], np.sqrt(2. / nu / dt), ST), TDMASolverD=TDMA(inner_product((ST, 0), (ST, 0))))) hdf5file = KMMWriter({ "U": U[0], "V": U[1], "W": U[2] }, chkpoint={ 'current': { 'U': U }, 'previous': { 'U': U0 } }, filename=params.solver + ".h5", mesh={ "x": x0, "y": x1, "z": x2 }) return config.AttributeDict(locals())
from shenfun.chebyshev.bases import ShenBiharmonicBasis from shenfun.fourier.bases import R2CBasis, C2CBasis from shenfun import Function, TensorProductSpace, VectorTensorProductSpace, curl from shenfun import inner, curl, TestFunction import numpy as np from mpi4py import MPI comm = MPI.COMM_WORLD N = (32, 33, 34) K0 = ShenBiharmonicBasis(N[0]) K1 = C2CBasis(N[1]) K2 = R2CBasis(N[2]) T = TensorProductSpace(comm, (K0, K1, K2)) Tk = VectorTensorProductSpace([T, T, T]) v = TestFunction(Tk) u_ = Function(Tk, False) u_[:] = np.random.random(u_.shape) u_hat = Function(Tk) u_hat = Tk.forward(u_, u_hat) w_hat = inner(v, curl(u_), uh_hat=u_hat)
def test_transform(typecode, dim): s = (True, ) if comm.Get_size() > 2 and dim > 2: s = (True, False) for slab in s: for shape in product(*([sizes] * dim)): bases = [] for n in shape[:-1]: bases.append(Basis(n, 'F', dtype=typecode.upper())) bases.append(Basis(shape[-1], 'F', dtype=typecode)) fft = TensorProductSpace(comm, bases, dtype=typecode, slab=slab) if comm.rank == 0: grid = [c.size for c in fft.subcomm] print('grid:{} shape:{} typecode:{}'.format( grid, shape, typecode)) U = random_like(fft.forward.input_array) F = fft.forward(U) V = fft.backward(F) assert allclose(V, U) # Alternative method fft.forward.input_array[...] = U fft.forward(fast_transform=False) fft.backward(fast_transform=False) V = fft.backward.output_array assert allclose(V, U) TT = VectorTensorProductSpace(fft) U = Array(TT) V = Array(TT) F = Function(TT) U[:] = random_like(U) F = TT.forward(U, F) V = TT.backward(F, V) assert allclose(V, U) TM = MixedTensorProductSpace([fft, fft]) U = Array(TM) V = Array(TM) F = Function(TM) U[:] = random_like(U) F = TM.forward(U, F) V = TM.backward(F, V) assert allclose(V, U) fft.destroy() padding = 1.5 bases = [] for n in shape[:-1]: bases.append( Basis(n, 'F', dtype=typecode.upper(), padding_factor=padding)) bases.append( Basis(shape[-1], 'F', dtype=typecode, padding_factor=padding)) fft = TensorProductSpace(comm, bases, dtype=typecode) if comm.rank == 0: grid = [c.size for c in fft.subcomm] print('grid:{} shape:{} typecode:{}'.format( grid, shape, typecode)) U = random_like(fft.forward.input_array) F = fft.forward(U) Fc = F.copy() V = fft.backward(F) F = fft.forward(V) assert allclose(F, Fc) # Alternative method fft.backward.input_array[...] = F fft.backward() fft.forward() V = fft.forward.output_array assert allclose(F, V) fft.destroy()
K0 = Basis(N[0], 'F', dtype='D', domain=(-1., 1.)) K1 = Basis(N[1], 'F', dtype='d', domain=(-1., 1.)) T = TensorProductSpace(comm, (K0, K1)) X = T.local_mesh(True) u = TrialFunction(T) v = TestFunction(T) # For nonlinear term we can use the 3/2-rule with padding Tp = T.get_dealiased((1.5, 1.5)) # Turn on padding by commenting #Tp = T # Create vector spaces and a test function for the regular vector space TV = VectorTensorProductSpace(T) TVp = VectorTensorProductSpace(Tp) vv = TestFunction(TV) uu = TrialFunction(TV) # Declare solution arrays and work arrays UV = Array(TV) UVp = Array(TVp) U, V = UV # views into vector components UV_hat = Function(TV) w0 = Function(TV) # Work array spectral space w1 = Array(TVp) # Work array physical space e1 = 0.00002 e2 = 0.00001 b0 = 0.03
def refine(self, N, output_array=None): """Return self with new number of quadrature points Parameters ---------- N : number or sequence of numbers The new number of quadrature points Note ---- If N is smaller than for self, then a truncated array is returned. If N is greater than before, then the returned array is padded with zeros. """ from shenfun.fourier.bases import R2CBasis from shenfun import VectorTensorProductSpace if self.ndim == 1: assert isinstance(N, Number) space = self.function_space() if output_array is None: refined_basis = space.get_refined(N) output_array = Function(refined_basis) output_array = self.assign(output_array) return output_array space = self.function_space() if isinstance(space, VectorTensorProductSpace): if output_array is None: output_array = [None]*len(self) for i, array in enumerate(self): output_array[i] = array.refine(N, output_array=output_array[i]) if isinstance(output_array, list): T = output_array[0].function_space() VT = VectorTensorProductSpace(T) output_array = np.array(output_array) output_array = Function(VT, buffer=output_array) return output_array axes = [bx for ax in space.axes for bx in ax] base = space.bases[axes[0]] global_shape = list(self.global_shape) # Global shape in spectral space factor = N[axes[0]]/self.function_space().bases[axes[0]].N if isinstance(base, R2CBasis): global_shape[axes[0]] = int((2*global_shape[axes[0]]-2)*factor)//2+1 else: global_shape[axes[0]] = int(global_shape[axes[0]]*factor) c1 = DistArray(global_shape, subcomm=self.pencil.subcomm, dtype=self.dtype, alignment=self.alignment) if self.global_shape[axes[0]] <= global_shape[axes[0]]: base._padding_backward(self, c1) else: base._truncation_forward(self, c1) for ax in axes[1:]: c0 = c1.redistribute(ax) factor = N[ax]/self.function_space().bases[ax].N # Get a new padded array base = space.bases[ax] if isinstance(base, R2CBasis): global_shape[ax] = int(base.N*factor)//2+1 else: global_shape[ax] = int(global_shape[ax]*factor) c1 = DistArray(global_shape, subcomm=c0.pencil.subcomm, dtype=c0.dtype, alignment=ax) # Copy from c0 to d0 if self.global_shape[ax] <= global_shape[ax]: base._padding_backward(c0, c1) else: base._truncation_forward(c0, c1) # Reverse transfer to get the same distribution as u_hat for ax in reversed(axes[:-1]): c1 = c1.redistribute(ax) if output_array is None: refined_space = space.get_refined(N) output_array = Function(refined_space, buffer=c1) else: output_array[:] = c1 return output_array
N = (200, 200) K0 = FunctionSpace(N[0], 'F', dtype='D', domain=(-1., 1.)) K1 = FunctionSpace(N[1], 'F', dtype='d', domain=(-1., 1.)) T = TensorProductSpace(comm, (K0, K1)) u = TrialFunction(T) v = TestFunction(T) # For nonlinear term we can use the 3/2-rule with padding Tp = T.get_dealiased((1.5, 1.5)) # Turn on padding by commenting #Tp = T # Create vector spaces and a test function for the regular vector space TV = VectorTensorProductSpace(T) TVp = VectorTensorProductSpace(Tp) vv = TestFunction(TV) uu = TrialFunction(TV) # Declare solution arrays and work arrays UV = Array(TV, buffer=(u0, v0)) UVp = Array(TVp) U, V = UV # views into vector components UV_hat = Function(TV) w0 = Function(TV) # Work array spectral space w1 = Array(TVp) # Work array physical space e1 = 0.00002 e2 = 0.00001 b0 = 0.03
def get_context(): """Set up context for solver""" # Get points and weights for Chebyshev weighted integrals assert params.Dquad == params.Bquad collapse_fourier = False if params.dealias == '3/2-rule' else True ST = Basis(params.N[0], 'C', bc=(0, 0), quad=params.Dquad) CT = Basis(params.N[0], 'C', quad=params.Dquad) CP = Basis(params.N[0], 'C', quad=params.Dquad) K0 = Basis(params.N[1], 'F', domain=(0, params.L[1]), dtype='D') K1 = Basis(params.N[2], 'F', domain=(0, params.L[2]), dtype='d') CP.slice = lambda: slice(0, CT.N) kw0 = {'threads': params.threads, 'planner_effort': params.planner_effort["dct"], 'slab': (params.decomposition == 'slab'), 'collapse_fourier': collapse_fourier} FST = TensorProductSpace(comm, (ST, K0, K1), **kw0) # Dirichlet FCT = TensorProductSpace(comm, (CT, K0, K1), **kw0) # Regular Chebyshev N FCP = TensorProductSpace(comm, (CP, K0, K1), **kw0) # Regular Chebyshev N-2 VFS = VectorTensorProductSpace(FST) VCT = VectorTensorProductSpace(FCT) VQ = MixedTensorProductSpace([VFS, FCP]) mask = FST.get_mask_nyquist() if params.mask_nyquist else None # Padded kw = {'padding_factor': 1.5 if params.dealias == '3/2-rule' else 1, 'dealias_direct': params.dealias == '2/3-rule'} if params.dealias == '3/2-rule': # Requires new bases due to planning and transforms on different size arrays STp = Basis(params.N[0], 'C', bc=(0, 0), quad=params.Dquad) CTp = Basis(params.N[0], 'C', quad=params.Dquad) else: STp, CTp = ST, CT K0p = Basis(params.N[1], 'F', dtype='D', domain=(0, params.L[1]), **kw) K1p = Basis(params.N[2], 'F', dtype='d', domain=(0, params.L[2]), **kw) FSTp = TensorProductSpace(comm, (STp, K0p, K1p), **kw0) FCTp = TensorProductSpace(comm, (CTp, K0p, K1p), **kw0) VFSp = VectorTensorProductSpace(FSTp) VCp = MixedTensorProductSpace([FSTp, FCTp, FCTp]) float, complex, mpitype = datatypes("double") constraints = ((3, 0, 0), (3, params.N[0]-1, 0)) # Mesh variables X = FST.local_mesh(True) x0, x1, x2 = FST.mesh() K = FST.local_wavenumbers(scaled=True) # Solution variables UP_hat = Function(VQ) UP_hat0 = Function(VQ) U_hat, P_hat = UP_hat U_hat0, P_hat0 = UP_hat0 UP = Array(VQ) UP0 = Array(VQ) U, P = UP U0, P0 = UP0 # primary variable u = UP_hat H_hat = Function(VFS) H_hat0 = Function(VFS) H_hat1 = Function(VFS) dU = Function(VQ) Source = Array(VFS) # Note - not using VQ. Only used for constant pressure gradient Sk = Function(VFS) K2 = K[1]*K[1]+K[2]*K[2] for i in range(3): K[i] = K[i].astype(float) work = work_arrays() u_dealias = Array(VFSp) curl_hat = Function(VCp) curl_dealias = Array(VCp) nu, dt, N = params.nu, params.dt, params.N up = TrialFunction(VQ) vq = TestFunction(VQ) ut, pt = up vt, qt = vq alfa = 2./nu/dt a0 = inner(vt, (2./nu/dt)*ut-div(grad(ut))) a1 = inner(vt, (2./nu)*grad(pt)) a2 = inner(qt, (2./nu)*div(ut)) M = BlockMatrix(a0+a1+a2) # Collect all matrices mat = config.AttributeDict( dict(CDD=inner_product((ST, 0), (ST, 1)), AB=HelmholtzCoeff(N[0], 1., alfa-K2, 0, ST.quad),)) la = None hdf5file = CoupledFile(config.params.solver, checkpoint={'space': VQ, 'data': {'0': {'UP': [UP_hat]}, '1': {'UP': [UP_hat0]}}}, results={'space': VFS, 'data': {'U': [U]}}) return config.AttributeDict(locals())
def get_context(): """Set up context for solver""" # Get points and weights for Chebyshev weighted integrals assert params.Dquad == params.Bquad ST = Basis(params.N[0], 'C', bc=(0, 0), quad=params.Dquad) SB = Basis(params.N[0], 'C', bc='Biharmonic', quad=params.Bquad) CT = Basis(params.N[0], 'C', quad=params.Dquad) ST0 = Basis(params.N[0], 'C', bc=(0, 0), quad=params.Dquad, plan=True) # For 1D problem K0 = Basis(params.N[1], 'F', domain=(0, params.L[1]), dtype='D') K1 = Basis(params.N[2], 'F', domain=(0, params.L[2]), dtype='d') FST = TensorProductSpace(comm, (ST, K0, K1), **{ 'threads': params.threads, 'planner_effort': params.planner_effort["dct"] }) # Dirichlet FSB = TensorProductSpace(comm, (SB, K0, K1), **{ 'threads': params.threads, 'planner_effort': params.planner_effort["dct"] }) # Biharmonic FCT = TensorProductSpace(comm, (CT, K0, K1), **{ 'threads': params.threads, 'planner_effort': params.planner_effort["dct"] }) # Regular Chebyshev VFS = VectorTensorProductSpace([FSB, FST, FST]) # Padded kw = { 'padding_factor': 1.5 if params.dealias == '3/2-rule' else 1, 'dealias_direct': params.dealias == '2/3-rule' } if params.dealias == '3/2-rule': # Requires new bases due to planning and transforms on different size arrays STp = Basis(params.N[0], 'C', bc=(0, 0), quad=params.Dquad) SBp = Basis(params.N[0], 'C', bc='Biharmonic', quad=params.Bquad) CTp = Basis(params.N[0], 'C', quad=params.Dquad) else: STp, SBp, CTp = ST, SB, CT K0p = Basis(params.N[1], 'F', dtype='D', domain=(0, params.L[1]), **kw) K1p = Basis(params.N[2], 'F', dtype='d', domain=(0, params.L[2]), **kw) FSTp = TensorProductSpace( comm, (STp, K0p, K1p), **{ 'threads': params.threads, 'planner_effort': params.planner_effort["dct"] }) FSBp = TensorProductSpace( comm, (SBp, K0p, K1p), **{ 'threads': params.threads, 'planner_effort': params.planner_effort["dct"] }) FCTp = TensorProductSpace( comm, (CTp, K0p, K1p), **{ 'threads': params.threads, 'planner_effort': params.planner_effort["dct"] }) VFSp = VectorTensorProductSpace([FSBp, FSTp, FSTp]) Nu = params.N[0] - 2 # Number of velocity modes in Shen basis Nb = params.N[0] - 4 # Number of velocity modes in Shen biharmonic basis u_slice = slice(0, Nu) v_slice = slice(0, Nb) float, complex, mpitype = datatypes("double") # Mesh variables X = FST.local_mesh(True) x0, x1, x2 = FST.mesh() K = FST.local_wavenumbers(scaled=True) # Solution variables U = Array(VFS) U0 = Array(VFS) U_hat = Function(VFS) U_hat0 = Function(VFS) g = Function(FST) # primary variable u = (U_hat, g) H_hat = Function(VFS) H_hat0 = Function(VFS) H_hat1 = Function(VFS) dU = Function(VFS) hv = Function(FST) hg = Function(FST) Source = Array(VFS) Sk = Function(VFS) K2 = K[1] * K[1] + K[2] * K[2] K4 = K2**2 # Set Nyquist frequency to zero on K that is used for odd derivatives in nonlinear terms Kx = FST.local_wavenumbers(scaled=True, eliminate_highest_freq=True) K_over_K2 = np.zeros((2, ) + g.shape) for i in range(2): K_over_K2[i] = K[i + 1] / np.where(K2 == 0, 1, K2) work = work_arrays() nu, dt, N = params.nu, params.dt, params.N alfa = K2[0] - 2.0 / nu / dt # Collect all matrices mat = config.AttributeDict( dict( CDD=inner_product((ST, 0), (ST, 1)), AB=HelmholtzCoeff(N[0], 1.0, -alfa, ST.quad), AC=BiharmonicCoeff(N[0], nu * dt / 2., (1. - nu * dt * K2[0]), -(K2[0] - nu * dt / 2. * K4[0]), quad=SB.quad), # Matrices for biharmonic equation CBD=inner_product((SB, 0), (ST, 1)), ABB=inner_product((SB, 0), (SB, 2)), BBB=inner_product((SB, 0), (SB, 0)), SBB=inner_product((SB, 0), (SB, 4)), # Matrices for Helmholtz equation ADD=inner_product((ST, 0), (ST, 2)), BDD=inner_product((ST, 0), (ST, 0)), BBD=inner_product((SB, 0), (ST, 0)), CDB=inner_product((ST, 0), (SB, 1)), ADD0=inner_product((ST0, 0), (ST0, 2)), BDD0=inner_product((ST0, 0), (ST0, 0)), )) ## Collect all linear algebra solvers #la = config.AttributeDict(dict( #HelmholtzSolverG = old_Helmholtz(N[0], np.sqrt(K2[0]+2.0/nu/dt), ST), #BiharmonicSolverU = old_Biharmonic(N[0], -nu*dt/2., 1.+nu*dt*K2[0], #-(K2[0] + nu*dt/2.*K4[0]), quad=SB.quad, #solver="cython"), #HelmholtzSolverU0 = old_Helmholtz(N[0], np.sqrt(2./nu/dt), ST), #TDMASolverD = TDMA(inner_product((ST, 0), (ST, 0))) #) #) mat.ADD.axis = 0 mat.BDD.axis = 0 mat.SBB.axis = 0 la = config.AttributeDict( dict(HelmholtzSolverG=Helmholtz(mat.ADD, mat.BDD, -np.ones( (1, 1, 1)), (K2[0] + 2.0 / nu / dt)[np.newaxis, :, :]), BiharmonicSolverU=Biharmonic( mat.SBB, mat.ABB, mat.BBB, -nu * dt / 2. * np.ones( (1, 1, 1)), (1. + nu * dt * K2[0])[np.newaxis, :, :], (-(K2[0] + nu * dt / 2. * K4[0]))[np.newaxis, :, :]), HelmholtzSolverU0=old_Helmholtz(N[0], np.sqrt(2. / nu / dt), ST), TDMASolverD=TDMA(inner_product((ST, 0), (ST, 0))))) hdf5file = KMMWriter({ "U": U[0], "V": U[1], "W": U[2] }, chkpoint={ 'current': { 'U': U }, 'previous': { 'U': U0 } }, filename=params.solver + ".h5", mesh={ "x": x0, "y": x1, "z": x2 }) return config.AttributeDict(locals())
def get_context(): """Set up context for solver""" # Get points and weights for Chebyshev weighted integrals assert params.Dquad == params.Bquad collapse_fourier = False if params.dealias == '3/2-rule' else True ST = Basis(params.N[0], 'C', bc=(0, 0), quad=params.Dquad) SB = Basis(params.N[0], 'C', bc='Biharmonic', quad=params.Bquad) CT = Basis(params.N[0], 'C', quad=params.Dquad) ST0 = Basis(params.N[0], 'C', bc=(0, 0), quad=params.Dquad) # For 1D problem K0 = Basis(params.N[1], 'F', domain=(0, params.L[1]), dtype='D') K1 = Basis(params.N[2], 'F', domain=(0, params.L[2]), dtype='d') kw0 = {'threads': params.threads, 'planner_effort': params.planner_effort["dct"], 'slab': (params.decomposition == 'slab'), 'collapse_fourier': collapse_fourier} FST = TensorProductSpace(comm, (ST, K0, K1), **kw0) # Dirichlet FSB = TensorProductSpace(comm, (SB, K0, K1), **kw0) # Biharmonic FCT = TensorProductSpace(comm, (CT, K0, K1), **kw0) # Regular Chebyshev VFS = VectorTensorProductSpace([FSB, FST, FST]) VFST = VectorTensorProductSpace([FST, FST, FST]) VUG = MixedTensorProductSpace([FSB, FST]) VCT = VectorTensorProductSpace(FCT) mask = FST.get_mask_nyquist() if params.mask_nyquist else None # Padded kw = {'padding_factor': 1.5 if params.dealias == '3/2-rule' else 1, 'dealias_direct': params.dealias == '2/3-rule'} if params.dealias == '3/2-rule': # Requires new bases due to planning and transforms on different size arrays STp = Basis(params.N[0], 'C', bc=(0, 0), quad=params.Dquad) SBp = Basis(params.N[0], 'C', bc='Biharmonic', quad=params.Bquad) CTp = Basis(params.N[0], 'C', quad=params.Dquad) else: STp, SBp, CTp = ST, SB, CT K0p = Basis(params.N[1], 'F', dtype='D', domain=(0, params.L[1]), **kw) K1p = Basis(params.N[2], 'F', dtype='d', domain=(0, params.L[2]), **kw) FSTp = TensorProductSpace(comm, (STp, K0p, K1p), **kw0) FSBp = TensorProductSpace(comm, (SBp, K0p, K1p), **kw0) FCTp = TensorProductSpace(comm, (CTp, K0p, K1p), **kw0) VFSp = VectorTensorProductSpace([FSBp, FSTp, FSTp]) float, complex, mpitype = datatypes("double") # Mesh variables X = FST.local_mesh(True) x0, x1, x2 = FST.mesh() K = FST.local_wavenumbers(scaled=True) # Solution variables U = Array(VFS) U0 = Array(VFS) U_hat = Function(VFS) U_hat0 = Function(VFS) g = Function(FST) # primary variable u = (U_hat, g) H_hat = Function(VFST) H_hat0 = Function(VFST) H_hat1 = Function(VFST) dU = Function(VFS) hv = Function(FSB) hg = Function(FST) Source = Array(VFS) Sk = Function(VFS) K2 = K[1]*K[1]+K[2]*K[2] K4 = K2**2 K_over_K2 = np.zeros((2,)+g.shape) for i in range(2): K_over_K2[i] = K[i+1] / np.where(K2 == 0, 1, K2) for i in range(3): K[i] = K[i].astype(float) work = work_arrays() u_dealias = Array(VFSp) u0_hat = np.zeros((2, params.N[0]), dtype=complex) h0_hat = np.zeros((2, params.N[0]), dtype=complex) w = np.zeros((params.N[0], ), dtype=complex) w1 = np.zeros((params.N[0], ), dtype=complex) nu, dt, N = params.nu, params.dt, params.N alfa = K2[0] - 2.0/nu/dt # Collect all matrices mat = config.AttributeDict( dict(CDD=inner_product((ST, 0), (ST, 1)), AB=HelmholtzCoeff(N[0], 1., -(K2 - 2.0/nu/dt), 0, ST.quad), AC=BiharmonicCoeff(N[0], nu*dt/2., (1. - nu*dt*K2), -(K2 - nu*dt/2.*K4), 0, SB.quad), # Matrices for biharmonic equation CBD=inner_product((SB, 0), (ST, 1)), ABB=inner_product((SB, 0), (SB, 2)), BBB=inner_product((SB, 0), (SB, 0)), SBB=inner_product((SB, 0), (SB, 4)), # Matrices for Helmholtz equation ADD=inner_product((ST, 0), (ST, 2)), BDD=inner_product((ST, 0), (ST, 0)), BBD=inner_product((SB, 0), (ST, 0)), CDB=inner_product((ST, 0), (SB, 1)), ADD0=inner_product((ST0, 0), (ST0, 2)), BDD0=inner_product((ST0, 0), (ST0, 0)),)) la = config.AttributeDict( dict(HelmholtzSolverG=Helmholtz(mat.ADD, mat.BDD, -np.ones((1, 1, 1)), (K2+2.0/nu/dt)), BiharmonicSolverU=Biharmonic(mat.SBB, mat.ABB, mat.BBB, -nu*dt/2.*np.ones((1, 1, 1)), (1.+nu*dt*K2), (-(K2 + nu*dt/2.*K4))), HelmholtzSolverU0=Helmholtz(mat.ADD0, mat.BDD0, np.array([-1.]), np.array([2./nu/dt])), TDMASolverD=TDMA(inner_product((ST, 0), (ST, 0))))) hdf5file = KMMFile(config.params.solver, checkpoint={'space': VFS, 'data': {'0': {'U': [U_hat]}, '1': {'U': [U_hat0]}}}, results={'space': VFS, 'data': {'U': [U]}}) return config.AttributeDict(locals())
def get_context(): """Set up context for classical (NS) solver""" float, complex, mpitype = datatypes(params.precision) collapse_fourier = False if params.dealias == '3/2-rule' else True dim = len(params.N) dtype = lambda d: float if d == dim-1 else complex V = [Basis(params.N[i], 'F', domain=(0, params.L[i]), dtype=dtype(i)) for i in range(dim)] kw0 = {'threads': params.threads, 'planner_effort': params.planner_effort['fft']} T = TensorProductSpace(comm, V, dtype=float, slab=(params.decomposition == 'slab'), collapse_fourier=collapse_fourier, **kw0) VT = VectorTensorProductSpace(T) # Different bases for nonlinear term, either 2/3-rule or 3/2-rule kw = {'padding_factor': 1.5 if params.dealias == '3/2-rule' else 1, 'dealias_direct': params.dealias == '2/3-rule'} Vp = [Basis(params.N[i], 'F', domain=(0, params.L[i]), dtype=dtype(i), **kw) for i in range(dim)] Tp = TensorProductSpace(comm, Vp, dtype=float, slab=(params.decomposition == 'slab'), collapse_fourier=collapse_fourier, **kw0) VTp = VectorTensorProductSpace(Tp) # Mesh variables X = T.local_mesh(True) K = T.local_wavenumbers(scaled=True) for i in range(dim): X[i] = X[i].astype(float) K[i] = K[i].astype(float) K2 = np.zeros(T.shape(True), dtype=float) for i in range(dim): K2 += K[i]*K[i] # Set Nyquist frequency to zero on K that is, from now on, used for odd derivatives Kx = T.local_wavenumbers(scaled=True, eliminate_highest_freq=True) for i in range(dim): Kx[i] = Kx[i].astype(float) K_over_K2 = np.zeros(VT.shape(True), dtype=float) for i in range(dim): K_over_K2[i] = K[i] / np.where(K2 == 0, 1, K2) # Velocity and pressure. Use ndarray view for efficiency U = Array(VT) U_hat = Function(VT) P = Array(T) P_hat = Function(T) u_dealias = Array(VTp) # Primary variable u = U_hat # RHS array dU = Function(VT) curl = Array(VT) Source = Function(VT) # Possible source term initialized to zero work = work_arrays() hdf5file = NSFile(config.params.solver, checkpoint={'space': VT, 'data': {'0': {'U': [U_hat]}}}, results={'space': VT, 'data': {'U': [U], 'P': [P]}}) return config.AttributeDict(locals())
def get_context(): float, complex, mpitype = datatypes(params.precision) collapse_fourier = False if params.dealias == '3/2-rule' else True dim = len(params.N) dtype = lambda d: float if d == dim - 1 else complex V = [ Basis(params.N[i], 'F', domain=(0, params.L[i]), dtype=dtype(i)) for i in range(dim) ] kw0 = { 'threads': params.threads, 'planner_effort': params.planner_effort['fft'] } T = TensorProductSpace(comm, V, dtype=float, slab=(params.decomposition == 'slab'), collapse_fourier=collapse_fourier, **kw0) VT = VectorTensorProductSpace(T) VM = MixedTensorProductSpace([T] * 2 * dim) mask = T.mask_nyquist() if params.mask_nyquist else None kw = { 'padding_factor': 1.5 if params.dealias == '3/2-rule' else 1, 'dealias_direct': params.dealias == '2/3-rule' } Vp = [ Basis(params.N[i], 'F', domain=(0, params.L[i]), dtype=dtype(i), **kw) for i in range(dim) ] Tp = TensorProductSpace(comm, Vp, dtype=float, slab=(params.decomposition == 'slab'), collapse_fourier=collapse_fourier, **kw0) VTp = VectorTensorProductSpace(Tp) VMp = MixedTensorProductSpace([Tp] * 2 * dim) # Mesh variables X = T.local_mesh(True) K = T.local_wavenumbers(scaled=True) for i in range(dim): X[i] = X[i].astype(float) K[i] = K[i].astype(float) K2 = np.zeros(T.shape(True), dtype=float) for i in range(dim): K2 += K[i] * K[i] # Set Nyquist frequency to zero on K that is, from now on, used for odd derivatives Kx = T.local_wavenumbers(scaled=True, eliminate_highest_freq=True) for i in range(dim): Kx[i] = Kx[i].astype(float) K_over_K2 = np.zeros(VT.shape(True), dtype=float) for i in range(dim): K_over_K2[i] = K[i] / np.where(K2 == 0, 1, K2) UB = Array(VM) P = Array(T) curl = Array(VT) UB_hat = Function(VM) P_hat = Function(T) dU = Function(VM) Source = Array(VM) ub_dealias = Array(VMp) ZZ_hat = np.zeros((3, 3) + Tp.shape(True), dtype=complex) # Work array # Create views into large data structures U = UB[:3] U_hat = UB_hat[:3] B = UB[3:] B_hat = UB_hat[3:] # Primary variable u = UB_hat hdf5file = MHDFile(config.params.solver, checkpoint={ 'space': VM, 'data': { '0': { 'UB': [UB_hat] } } }, results={ 'space': VM, 'data': { 'UB': [UB] } }) return config.AttributeDict(locals())
def get_context(): float, complex, mpitype = datatypes(params.precision) collapse_fourier = False if params.dealias == '3/2-rule' else True dim = len(params.N) dtype = lambda d: float if d == dim-1 else complex V = [Basis(params.N[i], 'F', domain=(0, params.L[i]), dtype=dtype(i)) for i in range(dim)] kw0 = {'threads': params.threads, 'planner_effort': params.planner_effort['fft']} T = TensorProductSpace(comm, V, dtype=float, slab=(params.decomposition == 'slab'), collapse_fourier=collapse_fourier, **kw0) VT = VectorTensorProductSpace(T) VM = MixedTensorProductSpace([T]*2*dim) kw = {'padding_factor': 1.5 if params.dealias == '3/2-rule' else 1, 'dealias_direct': params.dealias == '2/3-rule'} Vp = [Basis(params.N[i], 'F', domain=(0, params.L[i]), dtype=dtype(i), **kw) for i in range(dim)] Tp = TensorProductSpace(comm, Vp, dtype=float, slab=(params.decomposition == 'slab'), collapse_fourier=collapse_fourier, **kw0) VTp = VectorTensorProductSpace(Tp) VMp = MixedTensorProductSpace([Tp]*2*dim) # Mesh variables X = T.local_mesh(True) K = T.local_wavenumbers(scaled=True) for i in range(dim): X[i] = X[i].astype(float) K[i] = K[i].astype(float) K2 = np.zeros(T.shape(True), dtype=float) for i in range(dim): K2 += K[i]*K[i] # Set Nyquist frequency to zero on K that is, from now on, used for odd derivatives Kx = T.local_wavenumbers(scaled=True, eliminate_highest_freq=True) for i in range(dim): Kx[i] = Kx[i].astype(float) K_over_K2 = np.zeros(VT.shape(True), dtype=float) for i in range(dim): K_over_K2[i] = K[i] / np.where(K2 == 0, 1, K2) UB = Array(VM) P = Array(T) curl = Array(VT) UB_hat = Function(VM) P_hat = Function(T) dU = Function(VM) Source = Array(VM) ub_dealias = Array(VMp) ZZ_hat = np.zeros((3, 3) + Tp.shape(True), dtype=complex) # Work array # Create views into large data structures U = UB[:3] U_hat = UB_hat[:3] B = UB[3:] B_hat = UB_hat[3:] # Primary variable u = UB_hat hdf5file = MHDFile(config.params.solver, checkpoint={'space': VM, 'data': {'0': {'UB': [UB_hat]}}}, results={'space': VM, 'data': {'UB': [UB]}}) return config.AttributeDict(locals())