Beispiel #1
0
def test_helmholtz3D(family, axis):
    la = lla
    if family == 'chebyshev':
        la = cla
    N = (8, 9, 10)
    SD = Basis(N[allaxes3D[axis][0]], family=family, bc=(0, 0))
    K1 = Basis(N[allaxes3D[axis][1]], family='F', dtype='D')
    K2 = Basis(N[allaxes3D[axis][2]], family='F', dtype='d')
    subcomms = mpi4py_fft.pencil.Subcomm(MPI.COMM_WORLD, [0, 1, 1])
    bases = [0] * 3
    bases[allaxes3D[axis][0]] = SD
    bases[allaxes3D[axis][1]] = K1
    bases[allaxes3D[axis][2]] = K2
    T = TensorProductSpace(subcomms, bases, axes=allaxes3D[axis])
    u = TrialFunction(T)
    v = TestFunction(T)
    if family == 'chebyshev':
        mat = inner(v, div(grad(u)))
    else:
        mat = inner(grad(v), grad(u))

    H = la.Helmholtz(*mat)
    u = Function(T)
    s = SD.sl[SD.slice()]
    u[s] = np.random.random(u[s].shape) + 1j * np.random.random(u[s].shape)
    f = Function(T)
    f = H.matvec(u, f)

    g0 = Function(T)
    g1 = Function(T)
    M = {d.get_key(): d for d in mat}
    g0 = M['ADDmat'].matvec(u, g0)
    g1 = M['BDDmat'].matvec(u, g1)

    assert np.linalg.norm(f - (g0 + g1)) < 1e-12, np.linalg.norm(f - (g0 + g1))

    uc = Function(T)
    uc = H(uc, f)
    assert np.linalg.norm(uc - u) < 1e-12
Beispiel #2
0
def test_PDMA(quad):
    SB = Basis(N, 'C', bc='Biharmonic', quad=quad, plan=True)
    u = TrialFunction(SB)
    v = TestFunction(SB)
    points, weights = SB.points_and_weights(N)
    fj = Array(SB, buffer=np.random.randn(N))
    f_hat = Function(SB)
    f_hat = inner(v, fj, output_array=f_hat)

    A = inner(v, div(grad(u)))
    B = inner(v, u)
    s = SB.slice()

    H = A + B

    P = PDMA(A, B, A.scale, B.scale, solver='cython')

    u_hat = Function(SB)
    u_hat[s] = solve(H.diags().toarray()[s, s], f_hat[s])

    u_hat2 = Function(SB)
    u_hat2 = P(u_hat2, f_hat)

    assert np.allclose(u_hat2, u_hat)
Beispiel #3
0
def get_context():
    """Set up context for solver"""

    # Get points and weights for Chebyshev weighted integrals
    assert params.Dquad == params.Bquad
    collapse_fourier = False if params.dealias == '3/2-rule' else True
    ST = Basis(params.N[0], 'C', bc=(0, 0), quad=params.Dquad)
    CT = Basis(params.N[0], 'C', quad=params.Dquad)
    CP = Basis(params.N[0], 'C', quad=params.Dquad)
    K0 = Basis(params.N[1], 'F', domain=(0, params.L[1]), dtype='D')
    K1 = Basis(params.N[2], 'F', domain=(0, params.L[2]), dtype='d')
    CP.slice = lambda: slice(0, CT.N)

    kw0 = {'threads': params.threads,
           'planner_effort': params.planner_effort["dct"],
           'slab': (params.decomposition == 'slab'),
           'collapse_fourier': collapse_fourier}
    FST = TensorProductSpace(comm, (ST, K0, K1), **kw0)    # Dirichlet
    FCT = TensorProductSpace(comm, (CT, K0, K1), **kw0)    # Regular Chebyshev N
    FCP = TensorProductSpace(comm, (CP, K0, K1), **kw0)    # Regular Chebyshev N-2
    VFS = VectorTensorProductSpace(FST)
    VCT = VectorTensorProductSpace(FCT)
    VQ = MixedTensorProductSpace([VFS, FCP])

    mask = FST.get_mask_nyquist() if params.mask_nyquist else None

    # Padded
    kw = {'padding_factor': 1.5 if params.dealias == '3/2-rule' else 1,
          'dealias_direct': params.dealias == '2/3-rule'}
    if params.dealias == '3/2-rule':
        # Requires new bases due to planning and transforms on different size arrays
        STp = Basis(params.N[0], 'C', bc=(0, 0), quad=params.Dquad)
        CTp = Basis(params.N[0], 'C', quad=params.Dquad)
    else:
        STp, CTp = ST, CT
    K0p = Basis(params.N[1], 'F', dtype='D', domain=(0, params.L[1]), **kw)
    K1p = Basis(params.N[2], 'F', dtype='d', domain=(0, params.L[2]), **kw)
    FSTp = TensorProductSpace(comm, (STp, K0p, K1p), **kw0)
    FCTp = TensorProductSpace(comm, (CTp, K0p, K1p), **kw0)
    VFSp = VectorTensorProductSpace(FSTp)
    VCp = MixedTensorProductSpace([FSTp, FCTp, FCTp])

    float, complex, mpitype = datatypes("double")

    constraints = ((3, 0, 0),
                   (3, params.N[0]-1, 0))

    # Mesh variables
    X = FST.local_mesh(True)
    x0, x1, x2 = FST.mesh()
    K = FST.local_wavenumbers(scaled=True)

    # Solution variables
    UP_hat = Function(VQ)
    UP_hat0 = Function(VQ)
    U_hat, P_hat = UP_hat
    U_hat0, P_hat0 = UP_hat0

    UP = Array(VQ)
    UP0 = Array(VQ)
    U, P = UP
    U0, P0 = UP0

    # primary variable
    u = UP_hat

    H_hat = Function(VFS)
    H_hat0 = Function(VFS)
    H_hat1 = Function(VFS)

    dU = Function(VQ)
    Source = Array(VFS) # Note - not using VQ. Only used for constant pressure gradient
    Sk = Function(VFS)

    K2 = K[1]*K[1]+K[2]*K[2]

    for i in range(3):
        K[i] = K[i].astype(float)

    work = work_arrays()
    u_dealias = Array(VFSp)
    curl_hat = Function(VCp)
    curl_dealias = Array(VCp)

    nu, dt, N = params.nu, params.dt, params.N

    up = TrialFunction(VQ)
    vq = TestFunction(VQ)

    ut, pt = up
    vt, qt = vq

    alfa = 2./nu/dt
    a0 = inner(vt, (2./nu/dt)*ut-div(grad(ut)))
    a1 = inner(vt, (2./nu)*grad(pt))
    a2 = inner(qt, (2./nu)*div(ut))

    M = BlockMatrix(a0+a1+a2)

    # Collect all matrices
    mat = config.AttributeDict(
        dict(CDD=inner_product((ST, 0), (ST, 1)),
             AB=HelmholtzCoeff(N[0], 1., alfa-K2, 0, ST.quad),))

    la = None

    hdf5file = CoupledFile(config.params.solver,
                        checkpoint={'space': VQ,
                                    'data': {'0': {'UP': [UP_hat]},
                                             '1': {'UP': [UP_hat0]}}},
                        results={'space': VFS,
                                 'data': {'U': [U]}})

    return config.AttributeDict(locals())