def get_context(): float, complex, mpitype = datatypes(params.precision) collapse_fourier = False if params.dealias == '3/2-rule' else True dim = len(params.N) dtype = lambda d: float if d == dim - 1 else complex V = [ Basis(params.N[i], 'F', domain=(0, params.L[i]), dtype=dtype(i)) for i in range(dim) ] kw0 = { 'threads': params.threads, 'planner_effort': params.planner_effort['fft'] } T = TensorProductSpace(comm, V, dtype=float, slab=(params.decomposition == 'slab'), collapse_fourier=collapse_fourier, **kw0) VT = VectorTensorProductSpace(T) VM = MixedTensorProductSpace([T] * 2 * dim) mask = T.mask_nyquist() if params.mask_nyquist else None kw = { 'padding_factor': 1.5 if params.dealias == '3/2-rule' else 1, 'dealias_direct': params.dealias == '2/3-rule' } Vp = [ Basis(params.N[i], 'F', domain=(0, params.L[i]), dtype=dtype(i), **kw) for i in range(dim) ] Tp = TensorProductSpace(comm, Vp, dtype=float, slab=(params.decomposition == 'slab'), collapse_fourier=collapse_fourier, **kw0) VTp = VectorTensorProductSpace(Tp) VMp = MixedTensorProductSpace([Tp] * 2 * dim) # Mesh variables X = T.local_mesh(True) K = T.local_wavenumbers(scaled=True) for i in range(dim): X[i] = X[i].astype(float) K[i] = K[i].astype(float) K2 = np.zeros(T.shape(True), dtype=float) for i in range(dim): K2 += K[i] * K[i] # Set Nyquist frequency to zero on K that is, from now on, used for odd derivatives Kx = T.local_wavenumbers(scaled=True, eliminate_highest_freq=True) for i in range(dim): Kx[i] = Kx[i].astype(float) K_over_K2 = np.zeros(VT.shape(True), dtype=float) for i in range(dim): K_over_K2[i] = K[i] / np.where(K2 == 0, 1, K2) UB = Array(VM) P = Array(T) curl = Array(VT) UB_hat = Function(VM) P_hat = Function(T) dU = Function(VM) Source = Array(VM) ub_dealias = Array(VMp) ZZ_hat = np.zeros((3, 3) + Tp.shape(True), dtype=complex) # Work array # Create views into large data structures U = UB[:3] U_hat = UB_hat[:3] B = UB[3:] B_hat = UB_hat[3:] # Primary variable u = UB_hat hdf5file = MHDFile(config.params.solver, checkpoint={ 'space': VM, 'data': { '0': { 'UB': [UB_hat] } } }, results={ 'space': VM, 'data': { 'UB': [UB] } }) return config.AttributeDict(locals())
def get_context(): """Set up context for classical (NS) solver""" float, complex, mpitype = datatypes(params.precision) collapse_fourier = False if params.dealias == '3/2-rule' else True dim = len(params.N) dtype = lambda d: float if d == dim - 1 else complex V = [ Basis(params.N[i], 'F', domain=(0, params.L[i]), dtype=dtype(i)) for i in range(dim) ] kw0 = { 'threads': params.threads, 'planner_effort': params.planner_effort['fft'] } T = TensorProductSpace(comm, V, dtype=float, slab=(params.decomposition == 'slab'), collapse_fourier=collapse_fourier, **kw0) VT = VectorTensorProductSpace(T) # Different bases for nonlinear term, either 2/3-rule or 3/2-rule kw = { 'padding_factor': 1.5 if params.dealias == '3/2-rule' else 1, 'dealias_direct': params.dealias == '2/3-rule' } Vp = [ Basis(params.N[i], 'F', domain=(0, params.L[i]), dtype=dtype(i), **kw) for i in range(dim) ] Tp = TensorProductSpace(comm, Vp, dtype=float, slab=(params.decomposition == 'slab'), collapse_fourier=collapse_fourier, **kw0) VTp = VectorTensorProductSpace(Tp) mask = T.mask_nyquist() if params.mask_nyquist else None # Mesh variables X = T.local_mesh(True) K = T.local_wavenumbers(scaled=True) for i in range(dim): X[i] = X[i].astype(float) K[i] = K[i].astype(float) K2 = np.zeros(T.shape(True), dtype=float) for i in range(dim): K2 += K[i] * K[i] # Set Nyquist frequency to zero on K that is, from now on, used for odd derivatives Kx = T.local_wavenumbers(scaled=True, eliminate_highest_freq=True) for i in range(dim): Kx[i] = Kx[i].astype(float) K_over_K2 = np.zeros(VT.shape(True), dtype=float) for i in range(dim): K_over_K2[i] = K[i] / np.where(K2 == 0, 1, K2) # Velocity and pressure. Use ndarray view for efficiency U = Array(VT) U_hat = Function(VT) P = Array(T) P_hat = Function(T) u_dealias = Array(VTp) # Primary variable u = U_hat # RHS array dU = Function(VT) curl = Array(VT) Source = Function(VT) # Possible source term initialized to zero work = work_arrays() hdf5file = NSFile(config.params.solver, checkpoint={ 'space': VT, 'data': { '0': { 'U': [U_hat] } } }, results={ 'space': VT, 'data': { 'U': [U], 'P': [P] } }) return config.AttributeDict(locals())
def get_context(): """Set up context for solver""" # Get points and weights for Chebyshev weighted integrals assert params.Dquad == params.Bquad collapse_fourier = False if params.dealias == '3/2-rule' else True ST = Basis(params.N[0], 'C', bc=(0, 0), quad=params.Dquad) CT = Basis(params.N[0], 'C', quad=params.Dquad) CP = Basis(params.N[0], 'C', quad=params.Dquad) K0 = Basis(params.N[1], 'F', domain=(0, params.L[1]), dtype='D') K1 = Basis(params.N[2], 'F', domain=(0, params.L[2]), dtype='d') CP.slice = lambda: slice(0, CT.N) kw0 = { 'threads': params.threads, 'planner_effort': params.planner_effort["dct"], 'slab': (params.decomposition == 'slab'), 'collapse_fourier': collapse_fourier } FST = TensorProductSpace(comm, (ST, K0, K1), **kw0) # Dirichlet FCT = TensorProductSpace(comm, (CT, K0, K1), **kw0) # Regular Chebyshev N FCP = TensorProductSpace(comm, (CP, K0, K1), **kw0) # Regular Chebyshev N-2 VFS = VectorTensorProductSpace(FST) VCT = VectorTensorProductSpace(FCT) VQ = MixedTensorProductSpace([VFS, FCP]) mask = FST.mask_nyquist() if params.mask_nyquist else None # Padded kw = { 'padding_factor': 1.5 if params.dealias == '3/2-rule' else 1, 'dealias_direct': params.dealias == '2/3-rule' } if params.dealias == '3/2-rule': # Requires new bases due to planning and transforms on different size arrays STp = Basis(params.N[0], 'C', bc=(0, 0), quad=params.Dquad) CTp = Basis(params.N[0], 'C', quad=params.Dquad) else: STp, CTp = ST, CT K0p = Basis(params.N[1], 'F', dtype='D', domain=(0, params.L[1]), **kw) K1p = Basis(params.N[2], 'F', dtype='d', domain=(0, params.L[2]), **kw) FSTp = TensorProductSpace(comm, (STp, K0p, K1p), **kw0) FCTp = TensorProductSpace(comm, (CTp, K0p, K1p), **kw0) VFSp = VectorTensorProductSpace(FSTp) VCp = MixedTensorProductSpace([FSTp, FCTp, FCTp]) float, complex, mpitype = datatypes("double") constraints = ((3, 0, 0), (3, params.N[0] - 1, 0)) # Mesh variables X = FST.local_mesh(True) x0, x1, x2 = FST.mesh() K = FST.local_wavenumbers(scaled=True) # Solution variables UP_hat = Function(VQ) UP_hat0 = Function(VQ) U_hat, P_hat = UP_hat U_hat0, P_hat0 = UP_hat0 UP = Array(VQ) UP0 = Array(VQ) U, P = UP U0, P0 = UP0 # primary variable u = UP_hat H_hat = Function(VFS) H_hat0 = Function(VFS) H_hat1 = Function(VFS) dU = Function(VQ) Source = Array( VFS) # Note - not using VQ. Only used for constant pressure gradient Sk = Function(VFS) K2 = K[1] * K[1] + K[2] * K[2] # Set Nyquist frequency to zero on K that is used for odd derivatives in nonlinear terms Kx = FST.local_wavenumbers(scaled=True, eliminate_highest_freq=True) for i in range(3): K[i] = K[i].astype(float) Kx[i] = Kx[i].astype(float) work = work_arrays() u_dealias = Array(VFSp) curl_hat = Function(VCp) curl_dealias = Array(VCp) nu, dt, N = params.nu, params.dt, params.N up = TrialFunction(VQ) vq = TestFunction(VQ) ut, pt = up vt, qt = vq alfa = 2. / nu / dt a0 = inner(vt, (2. / nu / dt) * ut - div(grad(ut))) a1 = inner(vt, (2. / nu) * grad(pt)) a2 = inner(qt, (2. / nu) * div(ut)) M = BlockMatrix(a0 + a1 + a2) # Collect all matrices mat = config.AttributeDict( dict( CDD=inner_product((ST, 0), (ST, 1)), AB=HelmholtzCoeff(N[0], 1., alfa - K2, 0, ST.quad), )) la = None hdf5file = CoupledFile(config.params.solver, checkpoint={ 'space': VQ, 'data': { '0': { 'UP': [UP_hat] }, '1': { 'UP': [UP_hat0] } } }, results={ 'space': VFS, 'data': { 'U': [U] } }) return config.AttributeDict(locals())
def get_context(): """Set up context for solver""" # Get points and weights for Chebyshev weighted integrals assert params.Dquad == params.Bquad collapse_fourier = False if params.dealias == '3/2-rule' else True ST = Basis(params.N[0], 'C', bc=(0, 0), quad=params.Dquad) SB = Basis(params.N[0], 'C', bc='Biharmonic', quad=params.Bquad) CT = Basis(params.N[0], 'C', quad=params.Dquad) ST0 = Basis(params.N[0], 'C', bc=(0, 0), quad=params.Dquad) # For 1D problem K0 = Basis(params.N[1], 'F', domain=(0, params.L[1]), dtype='D') K1 = Basis(params.N[2], 'F', domain=(0, params.L[2]), dtype='d') kw0 = {'threads': params.threads, 'planner_effort': params.planner_effort["dct"], 'slab': (params.decomposition == 'slab'), 'collapse_fourier': collapse_fourier} FST = TensorProductSpace(comm, (ST, K0, K1), **kw0) # Dirichlet FSB = TensorProductSpace(comm, (SB, K0, K1), **kw0) # Biharmonic FCT = TensorProductSpace(comm, (CT, K0, K1), **kw0) # Regular Chebyshev VFS = MixedTensorProductSpace([FSB, FST, FST]) VFST = MixedTensorProductSpace([FST, FST, FST]) VUG = MixedTensorProductSpace([FSB, FST]) VCT = VectorTensorProductSpace(FCT) mask = FST.mask_nyquist() if params.mask_nyquist else None # Padded kw = {'padding_factor': 1.5 if params.dealias == '3/2-rule' else 1, 'dealias_direct': params.dealias == '2/3-rule'} if params.dealias == '3/2-rule': # Requires new bases due to planning and transforms on different size arrays STp = Basis(params.N[0], 'C', bc=(0, 0), quad=params.Dquad) SBp = Basis(params.N[0], 'C', bc='Biharmonic', quad=params.Bquad) CTp = Basis(params.N[0], 'C', quad=params.Dquad) else: STp, SBp, CTp = ST, SB, CT K0p = Basis(params.N[1], 'F', dtype='D', domain=(0, params.L[1]), **kw) K1p = Basis(params.N[2], 'F', dtype='d', domain=(0, params.L[2]), **kw) FSTp = TensorProductSpace(comm, (STp, K0p, K1p), **kw0) FSBp = TensorProductSpace(comm, (SBp, K0p, K1p), **kw0) FCTp = TensorProductSpace(comm, (CTp, K0p, K1p), **kw0) VFSp = MixedTensorProductSpace([FSBp, FSTp, FSTp]) float, complex, mpitype = datatypes("double") # Mesh variables X = FST.local_mesh(True) x0, x1, x2 = FST.mesh() K = FST.local_wavenumbers(scaled=True) # Solution variables U = Array(VFS) U0 = Array(VFS) U_hat = Function(VFS) U_hat0 = Function(VFS) g = Function(FST) # primary variable u = (U_hat, g) H_hat = Function(VFST) H_hat0 = Function(VFST) H_hat1 = Function(VFST) dU = Function(VFS) hv = Function(FSB) hg = Function(FST) Source = Array(VFS) Sk = Function(VFS) K2 = K[1]*K[1]+K[2]*K[2] K4 = K2**2 # Set Nyquist frequency to zero on K that is used for odd derivatives in nonlinear terms Kx = FST.local_wavenumbers(scaled=True, eliminate_highest_freq=True) K_over_K2 = np.zeros((2,)+g.shape) for i in range(2): K_over_K2[i] = K[i+1] / np.where(K2 == 0, 1, K2) for i in range(3): K[i] = K[i].astype(float) Kx[i] = Kx[i].astype(float) work = work_arrays() u_dealias = Array(VFSp) u0_hat = np.zeros((2, params.N[0]), dtype=complex) h0_hat = np.zeros((2, params.N[0]), dtype=complex) w = np.zeros((params.N[0], ), dtype=complex) w1 = np.zeros((params.N[0], ), dtype=complex) nu, dt, N = params.nu, params.dt, params.N alfa = K2[0] - 2.0/nu/dt # Collect all matrices mat = config.AttributeDict( dict(CDD=inner_product((ST, 0), (ST, 1)), AB=HelmholtzCoeff(N[0], 1., -(K2 - 2.0/nu/dt), 0, ST.quad), AC=BiharmonicCoeff(N[0], nu*dt/2., (1. - nu*dt*K2), -(K2 - nu*dt/2.*K4), 0, SB.quad), # Matrices for biharmonic equation CBD=inner_product((SB, 0), (ST, 1)), ABB=inner_product((SB, 0), (SB, 2)), BBB=inner_product((SB, 0), (SB, 0)), SBB=inner_product((SB, 0), (SB, 4)), # Matrices for Helmholtz equation ADD=inner_product((ST, 0), (ST, 2)), BDD=inner_product((ST, 0), (ST, 0)), BBD=inner_product((SB, 0), (ST, 0)), CDB=inner_product((ST, 0), (SB, 1)), ADD0=inner_product((ST0, 0), (ST0, 2)), BDD0=inner_product((ST0, 0), (ST0, 0)),)) la = config.AttributeDict( dict(HelmholtzSolverG=Helmholtz(mat.ADD, mat.BDD, -np.ones((1, 1, 1)), (K2+2.0/nu/dt)), BiharmonicSolverU=Biharmonic(mat.SBB, mat.ABB, mat.BBB, -nu*dt/2.*np.ones((1, 1, 1)), (1.+nu*dt*K2), (-(K2 + nu*dt/2.*K4))), HelmholtzSolverU0=Helmholtz(mat.ADD0, mat.BDD0, np.array([-1.]), np.array([2./nu/dt])), TDMASolverD=TDMA(inner_product((ST, 0), (ST, 0))))) hdf5file = KMMFile(config.params.solver, checkpoint={'space': VFS, 'data': {'0': {'U': [U_hat]}, '1': {'U': [U_hat0]}}}, results={'space': VFS, 'data': {'U': [U]}}) return config.AttributeDict(locals())