Пример #1
0
def test_interpolating_vector_field():
    n = 32
    array_vx = np.array([[(i + j) / n for j in range(n + 1)]
                         for i in range(n + 1)])
    missing = -9999.0
    array_vx[0, 0] = missing
    array_vx = np.flipud(array_vx)

    array_vy = np.array([[(j - i) / n for j in range(n + 1)]
                         for i in range(n + 1)])
    array_vy[-1, -1] = -9999.0
    array_vy = np.flipud(array_vy)

    vx = make_rio_dataset(array_vx, missing)
    vy = make_rio_dataset(array_vy, missing)

    mesh = make_domain(48,
                       48,
                       xmin=1 / 4,
                       ymin=1 / 4,
                       width=1 / 2,
                       height=1 / 2)
    x, y = firedrake.SpatialCoordinate(mesh)
    V = firedrake.VectorFunctionSpace(mesh, family='CG', degree=1)
    u = firedrake.interpolate(firedrake.as_vector((x + y, x - y)), V)
    v = icepack.interpolate((vx, vy), V)

    assert firedrake.norm(u - v) / firedrake.norm(u) < 1e-10
Пример #2
0
def test_vector_field():
    Nx, Ny = 16, 16
    mesh2d = firedrake.UnitSquareMesh(Nx, Ny)
    mesh3d = firedrake.ExtrudedMesh(mesh2d, layers=1)
    x, y, z = firedrake.SpatialCoordinate(mesh3d)

    V3D = firedrake.VectorFunctionSpace(mesh3d,
                                        "CG",
                                        2,
                                        vfamily="GL",
                                        vdegree=5,
                                        dim=2)
    u3d = firedrake.interpolate(firedrake.as_vector((1 - z**4, 0)), V3D)
    u_avg = depth_average(u3d)

    V2D = firedrake.VectorFunctionSpace(mesh2d, "CG", 2)
    x, y = firedrake.SpatialCoordinate(mesh2d)
    u2d = firedrake.interpolate(firedrake.as_vector((4 / 5, 0)), V2D)

    assert norm(u_avg - u2d) / norm(u2d) < 1 / (Nx * Ny)**2

    V0 = firedrake.VectorFunctionSpace(mesh3d,
                                       "CG",
                                       2,
                                       vfamily="GL",
                                       vdegree=0,
                                       dim=2)
    u_lift = lift3d(u_avg, V0)
    assert norm(depth_average(u_lift) - u2d) / norm(u2d) < 1 / (Nx * Ny)**2
Пример #3
0
def setup_2d_recovery(dirname):

    L = 100.
    W = 100.

    deltax = L / 5.
    deltay = W / 5.
    ncolumnsx = int(L / deltax)
    ncolumnsy = int(W / deltay)

    mesh = PeriodicRectangleMesh(ncolumnsx,
                                 ncolumnsy,
                                 L,
                                 W,
                                 direction='y',
                                 quadrilateral=True)
    x, y = SpatialCoordinate(mesh)

    # spaces
    VDG0 = FunctionSpace(mesh, "DG", 0)
    VCG1 = FunctionSpace(mesh, "CG", 1)
    VDG1 = FunctionSpace(mesh, "DG", 1)
    Vu = FunctionSpace(mesh, "RTCF", 1)
    VuCG1 = VectorFunctionSpace(mesh, "CG", 1)
    VuDG1 = VectorFunctionSpace(mesh, "DG", 1)

    # set up initial conditions
    np.random.seed(0)
    expr = np.random.randn() + np.random.randn() * x

    # our actual theta and rho and v
    rho_CG1_true = Function(VCG1).interpolate(expr)
    v_CG1_true = Function(VuCG1).interpolate(as_vector([expr, expr]))

    # make the initial fields by projecting expressions into the lowest order spaces
    rho_DG0 = Function(VDG0).interpolate(expr)
    rho_CG1 = Function(VCG1)
    v_Vu = Function(Vu).project(as_vector([expr, expr]))
    v_CG1 = Function(VuCG1)

    # make the recoverers and do the recovery
    rho_recoverer = Recoverer(rho_DG0,
                              rho_CG1,
                              VDG=VDG1,
                              boundary_method=Boundary_Method.dynamics)
    v_recoverer = Recoverer(v_Vu,
                            v_CG1,
                            VDG=VuDG1,
                            boundary_method=Boundary_Method.dynamics)

    rho_recoverer.project()
    v_recoverer.project()

    rho_diff = errornorm(rho_CG1, rho_CG1_true) / norm(rho_CG1_true)
    v_diff = errornorm(v_CG1, v_CG1_true) / norm(v_CG1_true)

    return (rho_diff, v_diff)
Пример #4
0
def run_tracer(setup):

    # Get initial conditions from shared config
    state = setup.state
    mesh = state.mesh
    dt = state.dt
    output = state.output

    x = SpatialCoordinate(state.mesh)
    H = 0.1
    parameters = ShallowWaterParameters(H=H)
    Omega = parameters.Omega
    g = parameters.g
    umax = setup.umax
    R = setup.radius
    fexpr = 2 * Omega * x[2] / R

    # Need to create a new state containing parameters
    state = State(mesh, dt=dt, output=output, parameters=parameters)

    # Equations
    eqns = LinearShallowWaterEquations(state,
                                       setup.family,
                                       setup.degree,
                                       fexpr=fexpr)
    tracer_eqn = AdvectionEquation(state, state.spaces("DG"), "tracer")

    # Specify initial prognostic fields
    u0 = state.fields("u")
    D0 = state.fields("D")
    tracer0 = state.fields("tracer", D0.function_space())
    tracer_end = Function(D0.function_space())

    # Expressions for initial fields corresponding to Williamson 2 test case
    Dexpr = H - ((R * Omega * umax) * (x[2] * x[2] / (R * R))) / g
    u0.project(setup.uexpr)
    D0.interpolate(Dexpr)
    tracer0.interpolate(setup.f_init)
    tracer_end.interpolate(setup.f_end)

    # set up transport schemes
    transport_schemes = [ForwardEuler(state, "D")]

    # Set up tracer transport
    tracer_transport = [(tracer_eqn, SSPRK3(state))]

    # build time stepper
    stepper = CrankNicolson(state,
                            eqns,
                            transport_schemes,
                            auxiliary_equations_and_schemes=tracer_transport)

    stepper.run(t=0, tmax=setup.tmax)

    error = norm(state.fields("tracer") - tracer_end) / norm(tracer_end)

    return error
Пример #5
0
def test_2D_cartesian_recovery(geometry, element, mesh, expr):

    family = "RTCF" if element == "quadrilateral" else "BDM"

    # horizontal base spaces
    cell = mesh.ufl_cell().cellname()

    # DG1
    DG1_elt = FiniteElement("DG", cell, 1, variant="equispaced")
    DG1 = FunctionSpace(mesh, DG1_elt)
    vec_DG1 = VectorFunctionSpace(mesh, DG1_elt)

    # spaces
    DG0 = FunctionSpace(mesh, "DG", 0)
    CG1 = FunctionSpace(mesh, "CG", 1)
    Vu = FunctionSpace(mesh, family, 1)
    vec_CG1 = VectorFunctionSpace(mesh, "CG", 1)

    # our actual theta and rho and v
    rho_CG1_true = Function(CG1).interpolate(expr)
    v_CG1_true = Function(vec_CG1).interpolate(as_vector([expr, expr]))

    # make the initial fields by projecting expressions into the lowest order spaces
    rho_DG0 = Function(DG0).interpolate(expr)
    rho_CG1 = Function(CG1)
    v_Vu = Function(Vu).project(as_vector([expr, expr]))
    v_CG1 = Function(vec_CG1)

    # make the recoverers and do the recovery
    rho_recoverer = Recoverer(rho_DG0,
                              rho_CG1,
                              VDG=DG1,
                              boundary_method=Boundary_Method.dynamics)
    v_recoverer = Recoverer(v_Vu,
                            v_CG1,
                            VDG=vec_DG1,
                            boundary_method=Boundary_Method.dynamics)

    rho_recoverer.project()
    v_recoverer.project()

    rho_diff = errornorm(rho_CG1, rho_CG1_true) / norm(rho_CG1_true)
    v_diff = errornorm(v_CG1, v_CG1_true) / norm(v_CG1_true)

    tolerance = 1e-7
    error_message = ("""
                     Incorrect recovery for {variable} with {boundary} boundary method
                     on {geometry} 2D Cartesian plane with {element} elements
                     """)
    assert rho_diff < tolerance, error_message.format(variable='rho',
                                                      boundary='dynamics',
                                                      geometry=geometry,
                                                      element=element)
    assert v_diff < tolerance, error_message.format(variable='v',
                                                    boundary='dynamics',
                                                    geometry=geometry,
                                                    element=element)
Пример #6
0
def solve_something(mesh):
    V = fd.FunctionSpace(mesh, "CG", 1)
    u = fd.Function(V)
    v = fd.TestFunction(V)

    x, y = fd.SpatialCoordinate(mesh)
    # f = fd.sin(x) * fd.sin(y) + x**2 + y**2
    # uex = x**4 * y**4
    uex = fd.sin(x) * fd.sin(y)  #*(x*y)**3
    # def source(xs, ys):
    #     return 1/((x-xs)**2+(y-ys)**2 + 0.1)
    # uex = source(0, 0)
    uex = uex - fd.assemble(uex * fd.dx) / fd.assemble(1 * fd.dx(domain=mesh))
    # f = fd.conditional(fd.ge(abs(x)-abs(y), 0), 1, 0)
    from firedrake import inner, grad, dx, ds, div, sym
    eps = fd.Constant(0.0)
    f = uex - div(grad(uex)) + eps * div(grad(div(grad(uex))))
    n = fd.FacetNormal(mesh)
    g = inner(grad(uex), n)
    g1 = inner(grad(div(grad(uex))), n)
    g2 = div(grad(uex))
    # F = 0.1 * inner(u, v) * dx + inner(grad(u), grad(v)) * dx + inner(grad(grad(u)), grad(grad(v))) * dx - f * v * dx - g * v * ds
    F = inner(u, v) * dx + inner(grad(u),
                                 grad(v)) * dx - f * v * dx - g * v * ds
    F += eps * inner(div(grad(u)), div(grad(v))) * dx
    F += eps * g1 * v * ds
    F -= eps * g2 * inner(grad(v), n) * ds
    # f = -div(grad(uex))
    # F = inner(grad(u), grad(v)) * dx - f * v * dx

    # bc = fd.DirichletBC(V, uex, "on_boundary")
    bc = None
    fd.solve(F == 0,
             u,
             bcs=bc,
             solver_parameters={
                 "ksp_type": "cg",
                 "ksp_atol": 1e-13,
                 "ksp_rtol": 1e-13,
                 "ksp_dtol": 1e-13,
                 "ksp_stol": 1e-13,
                 "pc_type": "jacobi",
                 "pc_factor_mat_solver_type": "mumps",
                 "snes_type": "ksponly",
                 "ksp_converged_reason": None
             })
    print("||u-uex||             =", fd.norm(u - uex))
    print("||grad(u-uex)||       =", fd.norm(grad(u - uex)))
    print("||grad(grad(u-uex))|| =", fd.norm(grad(grad(u - uex))))
    err = fd.Function(
        fd.TensorFunctionSpace(mesh, "DG",
                               V.ufl_element().degree() - 2)).interpolate(
                                   grad(grad(u - uex)))
    # err = fd.Function(fd.FunctionSpace(mesh, "DG", V.ufl_element().degree())).interpolate(u-uex)
    fd.File(outdir + "sln.pvd").write(u)
    fd.File(outdir + "err.pvd").write(err)
def run_advection_diffusion(tmpdir):

    # Mesh, state and equation
    L = 10
    mesh = PeriodicIntervalMesh(20, L)
    dt = 0.02
    tmax = 1.0

    diffusion_params = DiffusionParameters(kappa=0.75, mu=5)
    output = OutputParameters(dirname=str(tmpdir), dumpfreq=25)
    state = State(mesh, dt=dt, output=output)
    V = state.spaces("DG", "DG", 1)
    Vu = VectorFunctionSpace(mesh, "CG", 1)

    equation = AdvectionDiffusionEquation(
        state, V, "f", Vu=Vu, diffusion_parameters=diffusion_params)

    problem = [(equation, ((SSPRK3(state), transport), (BackwardEuler(state),
                                                        diffusion)))]

    # Initial conditions
    x = SpatialCoordinate(mesh)
    xc_init = 0.25 * L
    xc_end = 0.75 * L
    umax = 0.5 * L / tmax

    # Get minimum distance on periodic interval to xc
    x_init = conditional(
        sqrt((x[0] - xc_init)**2) < 0.5 * L, x[0] - xc_init,
        L + x[0] - xc_init)

    x_end = conditional(
        sqrt((x[0] - xc_end)**2) < 0.5 * L, x[0] - xc_end, L + x[0] - xc_end)

    f_init = 5.0
    f_end = f_init / 2.0
    f_width_init = L / 10.0
    f_width_end = f_width_init * 2.0
    f_init_expr = f_init * exp(-(x_init / f_width_init)**2)
    f_end_expr = f_end * exp(-(x_end / f_width_end)**2)

    state.fields('f').interpolate(f_init_expr)
    state.fields('u').interpolate(as_vector([Constant(umax)]))
    f_end = state.fields('f_end', V).interpolate(f_end_expr)

    # Time stepper
    timestepper = PrescribedTransport(state, problem)
    timestepper.run(0, tmax=tmax)

    error = norm(state.fields('f') - f_end) / norm(f_end)

    return error
Пример #8
0
def test_dry_compressible(tmpdir):

    dirname = str(tmpdir)
    state, check_state = run_dry_compressible(dirname)

    for variable in ['u', 'rho', 'theta']:
        new_variable = state.fields(variable)
        check_variable = check_state.fields(variable)
        error = norm(new_variable - check_variable) / norm(check_variable)

        # Slack values chosen to be robust to different platforms
        assert error < 1e-10, f'Values for {variable} in ' + \
            'Dry Compressible test do not match KGO values'
Пример #9
0
def test_interpolating_scalar_field():
    n = 32
    array = np.array([[(i + j) / n for j in range(n + 1)] for i in range(n + 1)])
    missing = -9999.0
    array[0, 0] = missing
    array = np.flipud(array)
    dataset = make_rio_dataset(array, missing)

    mesh = make_domain(48, 48, xmin=1 / 4, ymin=1 / 4, width=1 / 2, height=1 / 2)
    x, y = firedrake.SpatialCoordinate(mesh)
    Q = firedrake.FunctionSpace(mesh, "CG", 1)
    p = firedrake.interpolate(x + y, Q)
    q = icepack.interpolate(dataset, Q)

    assert firedrake.norm(p - q) / firedrake.norm(p) < 1e-10
Пример #10
0
def test_1D_recovery(geometry, mesh, expr):

    # horizontal base spaces
    cell = mesh.ufl_cell().cellname()

    # DG1
    DG1_elt = FiniteElement("DG", cell, 1, variant="equispaced")
    DG1 = FunctionSpace(mesh, DG1_elt)

    # spaces
    DG0 = FunctionSpace(mesh, "DG", 0)
    CG1 = FunctionSpace(mesh, "CG", 1)

    # our actual theta and rho and v
    rho_CG1_true = Function(CG1).interpolate(expr)

    # make the initial fields by projecting expressions into the lowest order spaces
    rho_DG0 = Function(DG0).interpolate(expr)
    rho_CG1 = Function(CG1)

    # make the recoverers and do the recovery
    rho_recoverer = Recoverer(rho_DG0, rho_CG1, VDG=DG1, boundary_method=Boundary_Method.dynamics)

    rho_recoverer.project()

    rho_diff = errornorm(rho_CG1, rho_CG1_true) / norm(rho_CG1_true)

    tolerance = 1e-7
    error_message = ("""
                     Incorrect recovery for {variable} with {boundary} boundary method
                     on {geometry} 1D domain
                     """)
    assert rho_diff < tolerance, error_message.format(variable='rho', boundary='dynamics', geometry=geometry)
Пример #11
0
def test_nearest_neighbor_interpolation():
    n = 32
    array = np.array([[(i + j) / n for j in range(n + 1)] for i in range(n + 1)])
    missing = -9999.0
    array[0, 0] = missing
    array = np.flipud(array)
    dataset = make_rio_dataset(array, missing)

    mesh = make_domain(48, 48, xmin=1 / 4, ymin=1 / 4, width=1 / 2, height=1 / 2)
    x, y = firedrake.SpatialCoordinate(mesh)
    Q = firedrake.FunctionSpace(mesh, "CG", 1)
    p = firedrake.interpolate(x + y, Q)
    q = icepack.interpolate(dataset, Q, method="nearest")

    relative_error = firedrake.norm(p - q) / firedrake.norm(p)
    assert (relative_error > 1e-10) and (relative_error < 1 / n)
Пример #12
0
 def callback(inverse_solver):
     E, R = inverse_solver.objective, inverse_solver.regularization
     misfit = firedrake.assemble(E) / area
     regularization = firedrake.assemble(R) / area
     q = inverse_solver.parameter
     error = firedrake.norm(q - q_true) / np.sqrt(area)
     print(misfit, regularization, error, flush=True)
Пример #13
0
def test_eigenvalues():
    nx, ny = 32, 32
    mesh = firedrake.UnitSquareMesh(nx, ny)
    x, y = firedrake.SpatialCoordinate(mesh)

    V = firedrake.VectorFunctionSpace(mesh, family='CG', degree=2)
    u = interpolate(as_vector((x, 0)), V)

    Q = firedrake.FunctionSpace(mesh, family='DG', degree=2)
    ε = sym(grad(u))
    Λ1, Λ2 = eigenvalues(ε)
    λ1 = firedrake.project(Λ1, Q)
    λ2 = firedrake.project(Λ2, Q)

    assert norm(λ1 - Constant(1)) < norm(u) / (nx * ny)
    assert norm(λ2) < norm(u) / (nx * ny)
Пример #14
0
def norm_weighted(u, k):
    """Computes the weighted H^1 norm of u.

    Inputs:

    u - a Firedrake Function

    k - positive real - the wavenumber.

    Output:

    positive real - the weighted H^1 norm of u.
    """

    return np.sqrt(norm(u,norm_type="H1")**2.0\
                            + (k**2.0 - 1.0)*norm(u,norm_type="L2")**2.0)
Пример #15
0
def test_scalar_field():
    Nx, Ny = 16, 16
    mesh2d = firedrake.UnitSquareMesh(Nx, Ny)
    mesh3d = firedrake.ExtrudedMesh(mesh2d, layers=1)
    x, y, z = firedrake.SpatialCoordinate(mesh3d)

    Q3D = firedrake.FunctionSpace(mesh3d,
                                  family='CG',
                                  degree=2,
                                  vfamily='GL',
                                  vdegree=5)
    q3d = firedrake.interpolate((x**2 + y**2) * (1 - z**4), Q3D)
    q_avg = depth_average(q3d)

    p3d = firedrake.interpolate(x**2 + y**2, Q3D)
    p_avg = depth_average(p3d, weight=1 - z**4)

    Q2D = firedrake.FunctionSpace(mesh2d, family='CG', degree=2)
    x, y = firedrake.SpatialCoordinate(mesh2d)
    q2d = firedrake.interpolate(4 * (x**2 + y**2) / 5, Q2D)

    assert q_avg.ufl_domain() is mesh2d
    assert norm(q_avg - q2d) / norm(q2d) < 1 / (Nx * Ny)**2
    assert norm(p_avg - q2d) / norm(q2d) < 1 / (Nx * Ny)**2

    Q0 = firedrake.FunctionSpace(mesh3d,
                                 family='CG',
                                 degree=2,
                                 vfamily='GL',
                                 vdegree=0)
    q_lift = lift3d(q_avg, Q0)
    assert norm(depth_average(q_lift) - q2d) / norm(q2d) < 1 / (Nx * Ny)**2
Пример #16
0
def test_checkpointing(tmpdir):

    dirname_1 = str(tmpdir) + '/checkpointing_1'
    dirname_2 = str(tmpdir) + '/checkpointing_2'
    state_1, stepper_1, dt = setup_checkpointing(dirname_1)
    state_2, stepper_2, dt = setup_checkpointing(dirname_2)

    # ------------------------------------------------------------------------ #
    # Run for 4 time steps and store values
    # ------------------------------------------------------------------------ #

    initialise_fields(state_1)
    stepper_1.run(t=0.0, tmax=4 * dt)

    # ------------------------------------------------------------------------ #
    # Start again, run for 2 time steps, checkpoint and then run for 2 more
    # ------------------------------------------------------------------------ #

    initialise_fields(state_2)
    stepper_2.run(t=0.0, tmax=2 * dt)

    # Wipe fields, then pickup
    state_2.fields('u').project(as_vector([-10.0, 0.0]))
    state_2.fields('rho').interpolate(Constant(0.0))
    state_2.fields('theta').interpolate(Constant(0.0))

    stepper_2.run(t=2 * dt, tmax=4 * dt, pickup=True)

    # ------------------------------------------------------------------------ #
    # Compare fields against saved values for run without checkpointing
    # ------------------------------------------------------------------------ #

    # Pick up from both stored checkpoint files
    # This is the best way to compare fields from different meshes
    for field_name in ['u', 'rho', 'theta']:
        with DumbCheckpoint(dirname_1 + '/chkpt', mode=FILE_READ) as chkpt:
            field_1 = Function(state_1.fields(field_name).function_space(),
                               name=field_name)
            chkpt.load(field_1)
            # These are preserved in the comments for when we can use CheckpointFile
            # mesh = chkpt.load_mesh(name='firedrake_default_extruded')
            # field_1 = chkpt.load_function(mesh, name=field_name)
        with DumbCheckpoint(dirname_2 + '/chkpt', mode=FILE_READ) as chkpt:
            field_2 = Function(state_1.fields(field_name).function_space(),
                               name=field_name)
            chkpt.load(field_2)
            # These are preserved in the comments for when we can use CheckpointFile
            # field_2 = chkpt.load_function(mesh, name=field_name)

        error = norm(field_1 - field_2)
        assert error < 1e-15, \
            f'Checkpointed field {field_name} is not equal to non-checkpointed field'
Пример #17
0
def getEps(dC, p, dt, K, norm_type=np.inf):
    if len(dC) == 0:
        return np.array([])
    if norm_type == np.inf:
        eps = []
        for dCi in dC[p:]:
            with dCi.dat.vec_ro as dCv:
                norm_inf = dCv.norm(PETSc.NormType.NORM_INFINITY)
                eps.append(norm_inf)
        eps = np.array(eps) * dt * K
    elif norm_type == 2:
        eps = np.array([fd.norm(dCi) * dt * K for dCi in dC[p:]])
    return eps
Пример #18
0
def test_diagnostic_solver_convergence():
    model = icepack.models.ShallowIce(penalty=penalty_low)

    for degree in range(1, 4):
        delta_x, error = [], []
        for N in range(0, 8 - 1 * (degree - 1), 1):
            mesh = make_mesh(R_mesh, N)

            Q = firedrake.FunctionSpace(mesh, 'CG', degree)
            V = firedrake.VectorFunctionSpace(mesh, 'CG', degree)

            h_expr = Bueler_profile(mesh, R)
            u_exact = interpolate(exact_u(h_expr, Q), V)

            h = interpolate(h_expr, Q)
            s = interpolate(h_expr, Q)
            u = firedrake.Function(V)

            solver = icepack.solvers.FlowSolver(model)
            u_num = solver.diagnostic_solve(
                velocity=u,
                thickness=h,
                surface=s,
                fluidity=A
            )
            error.append(norm(u_exact - u_num) / norm(u_exact))
            delta_x.append(mesh.cell_sizes.dat.data_ro.min())

            print(delta_x[-1], error[-1])

            assert assemble(model.scale(velocity=u_num, thickness=h)) > 0

        log_delta_x = np.log2(np.array(delta_x))
        log_error = np.log2(np.array(error))
        slope, intercept = np.polyfit(log_delta_x, log_error, 1)

        print('log(error) ~= {:g} * log(dx) + {:g}'.format(slope, intercept))
        assert slope > 0.9
Пример #19
0
def heat(n, deg, time_stages, stage_type="deriv", splitting=IA):
    N = 2**n
    msh = UnitIntervalMesh(N)

    params = {
        "snes_type": "ksponly",
        "ksp_type": "preonly",
        "mat_type": "aij",
        "pc_type": "lu"
    }

    V = FunctionSpace(msh, "CG", deg)
    x, = SpatialCoordinate(msh)

    t = Constant(0.0)
    dt = Constant(2.0 / N)

    uexact = exp(-t) * sin(pi * x)
    rhs = expand_derivatives(diff(uexact, t)) - div(grad(uexact))

    butcher_tableau = GaussLegendre(time_stages)

    u = project(uexact, V)

    v = TestFunction(V)

    F = (inner(Dt(u), v) * dx + inner(grad(u), grad(v)) * dx -
         inner(rhs, v) * dx)

    bc = DirichletBC(V, Constant(0), "on_boundary")

    stepper = TimeStepper(F,
                          butcher_tableau,
                          t,
                          dt,
                          u,
                          bcs=bc,
                          solver_parameters=params,
                          stage_type=stage_type,
                          splitting=splitting)

    while (float(t) < 1.0):
        if (float(t) + float(dt) > 1.0):
            dt.assign(1.0 - float(t))
        stepper.advance()
        t.assign(float(t) + float(dt))

    return errornorm(uexact, u) / norm(uexact)
Пример #20
0
def regularization_form(r):
    mesh = UnitSquareMesh(2 ** r, 2 ** r)
    x = SpatialCoordinate(mesh)

    S = VectorFunctionSpace(mesh, "CG", 1)
    beta = 4.0
    reg_solver = RegularizationSolver(S, mesh, beta=beta, gamma=0.0, dx=dx)

    # Exact solution with free Neumann boundary conditions for this domain
    u_exact = Function(S)
    u_exact_component = cos(x[0] * pi * 2) * cos(x[1] * pi * 2)
    u_exact.interpolate(as_vector((u_exact_component, u_exact_component)))
    f = Function(S)
    theta = TestFunction(S)
    f_component = (1 + beta * 8 * pi * pi) * u_exact_component
    f.interpolate(as_vector((f_component, f_component)))
    rhs_form = inner(f, theta) * dx

    velocity = Function(S)
    rhs = assemble(rhs_form)
    reg_solver.solve(velocity, rhs)
    File("solution_vel_unitsquare.pvd").write(velocity)
    return norm(project(u_exact - velocity, S))
Пример #21
0
    def _estimate_error(self):
        """Assuming that the RK stages have been evaluated, estimates
        the temporal truncation error by taking the norm of the
        difference between the new solutions computed by the two
        methods.  Typically will not be called by the end user."""
        dtc = float(self.dt)
        delb = self.delb

        if self.num_fields == 1:
            ks = self.ks
            self.error_func.dat.data[:] = 0.0
            for i in range(self.num_stages):
                self.error_func += dtc * delb[i] * ks[i]
        else:
            k = self.stages
            for i in range(self.num_fields):
                self.error_func.dat.data[i][:] = 0.0
            for s in range(self.num_stages):
                for i in range(self.num_fields):
                    self.error_func.dat.data[i][:] += \
                        dtc * delb[s] * k.dat.data[self.num_fields*s+i][:]

        return norm(self.error_func)
Пример #22
0
def test_2D_dirichlet_regions():
    # Can't do mesh refinement because MeshHierarchy ruins the domain tags
    mesh = Mesh("./2D_mesh.msh")
    dim = mesh.geometric_dimension()
    x = SpatialCoordinate(mesh)

    S = VectorFunctionSpace(mesh, "CG", 1)
    beta = 1.0
    reg_solver = RegularizationSolver(
        S, mesh, beta=beta, gamma=0.0, dx=dx, design_domain=0
    )

    # Exact solution with free Neumann boundary conditions for this domain
    u_exact = Function(S)
    u_exact_component = (-cos(x[0] * pi * 2) + 1) * (-cos(x[1] * pi * 2) + 1)

    u_exact.interpolate(
        as_vector(tuple(u_exact_component for _ in range(dim)))
    )
    f = Function(S)
    f_component = (
        -beta
        * (
            4.0 * pi * pi * cos(2 * pi * x[0]) * (-cos(2 * pi * x[1]) + 1)
            + 4.0 * pi * pi * cos(2 * pi * x[1]) * (-cos(2 * pi * x[0]) + 1)
        )
        + u_exact_component
    )
    f.interpolate(as_vector(tuple(f_component for _ in range(dim))))

    theta = TestFunction(S)
    rhs_form = inner(f, theta) * dx

    velocity = Function(S)
    rhs = assemble(rhs_form)
    reg_solver.solve(velocity, rhs)
    assert norm(project(domainify(u_exact, x) - velocity, S)) < 1e-3
Пример #23
0
def test_cond_evap(tmpdir, process):

    dirname = str(tmpdir)
    state, mv_true, mc_true, theta_d_true, mc_init = run_cond_evap(
        dirname, process)

    water_v = state.fields('vapour_mixing_ratio')
    water_c = state.fields('cloud_liquid_mixing_ratio')
    theta_vd = state.fields('theta')
    theta_d = Function(theta_vd.function_space())
    theta_d.interpolate(
        theta_vd / (1 + water_v * state.parameters.R_v / state.parameters.R_d))

    # Check that water vapour is approximately equal to saturation amount
    assert norm(water_v - mv_true) / norm(mv_true) < 0.01, \
        f'Final vapour field is incorrect for {process}'

    # Check that cloud has been created / removed
    denom = norm(mc_true) if process == "condensation" else norm(mc_init)
    assert norm(water_c - mc_true) / denom < 0.1, \
        f'Final cloud field is incorrect for {process}'

    # Check that theta pertubation has correct sign and size
    assert norm(theta_d - theta_d_true) / norm(theta_d_true) < 0.01, \
        f'Latent heating is incorrect for {process}'

    # Check that total moisture conserved
    filename = path.join(dirname, "cond_evap/diagnostics.nc")
    data = Dataset(filename, "r")

    water = data.groups["vapour_mixing_ratio_plus_cloud_liquid_mixing_ratio"]
    total = water.variables["total"]
    water_t_0 = total[0]
    water_t_T = total[-1]

    assert abs(water_t_0 - water_t_T) / water_t_0 < 1e-12, \
        f'Total amount of water should be conserved by {process}'
Пример #24
0
def test_3D_dirichlet_regions():
    # Can't do mesh refinement because MeshHierarchy ruins the domain tags
    mesh = Mesh("./3D_mesh.msh")
    dim = mesh.geometric_dimension()
    x = SpatialCoordinate(mesh)
    solver_parameters_amg = {
        "ksp_type": "cg",
        "ksp_converged_reason": None,
        "ksp_rtol": 1e-7,
        "pc_type": "hypre",
        "pc_hypre_type": "boomeramg",
        "pc_hypre_boomeramg_max_iter": 5,
        "pc_hypre_boomeramg_coarsen_type": "PMIS",
        "pc_hypre_boomeramg_agg_nl": 2,
        "pc_hypre_boomeramg_strong_threshold": 0.95,
        "pc_hypre_boomeramg_interp_type": "ext+i",
        "pc_hypre_boomeramg_P_max": 2,
        "pc_hypre_boomeramg_relax_type_all": "sequential-Gauss-Seidel",
        "pc_hypre_boomeramg_grid_sweeps_all": 1,
        "pc_hypre_boomeramg_truncfactor": 0.3,
        "pc_hypre_boomeramg_max_levels": 6,
    }

    S = VectorFunctionSpace(mesh, "CG", 1)
    beta = 1.0
    reg_solver = RegularizationSolver(
        S,
        mesh,
        beta=beta,
        gamma=0.0,
        dx=dx,
        design_domain=0,
        solver_parameters=solver_parameters_amg,
    )

    # Exact solution with free Neumann boundary conditions for this domain
    u_exact = Function(S)
    u_exact_component = (
        (-cos(x[0] * pi * 2) + 1)
        * (-cos(x[1] * pi * 2) + 1)
        * (-cos(x[2] * pi * 2) + 1)
    )

    u_exact.interpolate(
        as_vector(tuple(u_exact_component for _ in range(dim)))
    )
    f = Function(S)
    f_component = (
        -beta
        * (
            4.0
            * pi
            * pi
            * cos(2 * pi * x[0])
            * (-cos(2 * pi * x[1]) + 1)
            * (-cos(2 * pi * x[2]) + 1)
            + 4.0
            * pi
            * pi
            * cos(2 * pi * x[1])
            * (-cos(2 * pi * x[0]) + 1)
            * (-cos(2 * pi * x[2]) + 1)
            + 4.0
            * pi
            * pi
            * cos(2 * pi * x[2])
            * (-cos(2 * pi * x[1]) + 1)
            * (-cos(2 * pi * x[0]) + 1)
        )
        + u_exact_component
    )
    f.interpolate(as_vector(tuple(f_component for _ in range(dim))))

    theta = TestFunction(S)
    rhs_form = inner(f, theta) * dx

    velocity = Function(S)
    rhs = assemble(rhs_form)
    reg_solver.solve(velocity, rhs)
    error = norm(
        project(
            domainify(u_exact, x) - velocity,
            S,
            solver_parameters=solver_parameters_amg,
        )
    )
    assert error < 5e-2
def test_3D_cartesian_recovery(geometry, element, mesh, expr):

    family = "RTCF" if element == "quadrilateral" else "BDM"

    # horizontal base spaces
    cell = mesh._base_mesh.ufl_cell().cellname()
    u_hori = FiniteElement(family, cell, 1)
    w_hori = FiniteElement("DG", cell, 0)

    # vertical base spaces
    u_vert = FiniteElement("DG", interval, 0)
    w_vert = FiniteElement("CG", interval, 1)

    # build elements
    u_element = HDiv(TensorProductElement(u_hori, u_vert))
    w_element = HDiv(TensorProductElement(w_hori, w_vert))
    theta_element = TensorProductElement(w_hori, w_vert)
    v_element = u_element + w_element

    # DG1
    DG1_hori = FiniteElement("DG", cell, 1, variant="equispaced")
    DG1_vert = FiniteElement("DG", interval, 1, variant="equispaced")
    DG1_elt = TensorProductElement(DG1_hori, DG1_vert)
    DG1 = FunctionSpace(mesh, DG1_elt)
    vec_DG1 = VectorFunctionSpace(mesh, DG1_elt)

    # spaces
    DG0 = FunctionSpace(mesh, "DG", 0)
    CG1 = FunctionSpace(mesh, "CG", 1)
    Vt = FunctionSpace(mesh, theta_element)
    Vt_brok = FunctionSpace(mesh, BrokenElement(theta_element))
    Vu = FunctionSpace(mesh, v_element)
    vec_CG1 = VectorFunctionSpace(mesh, "CG", 1)

    # our actual theta and rho and v
    rho_CG1_true = Function(CG1).interpolate(expr)
    theta_CG1_true = Function(CG1).interpolate(expr)
    v_CG1_true = Function(vec_CG1).interpolate(as_vector([expr, expr, expr]))
    rho_Vt_true = Function(Vt).interpolate(expr)

    # make the initial fields by projecting expressions into the lowest order spaces
    rho_DG0 = Function(DG0).interpolate(expr)
    rho_CG1 = Function(CG1)
    theta_Vt = Function(Vt).interpolate(expr)
    theta_CG1 = Function(CG1)
    v_Vu = Function(Vu).project(as_vector([expr, expr, expr]))
    v_CG1 = Function(vec_CG1)
    rho_Vt = Function(Vt)

    # make the recoverers and do the recovery
    rho_recoverer = Recoverer(rho_DG0, rho_CG1, VDG=DG1, boundary_method=Boundary_Method.dynamics)
    theta_recoverer = Recoverer(theta_Vt, theta_CG1, VDG=DG1, boundary_method=Boundary_Method.dynamics)
    v_recoverer = Recoverer(v_Vu, v_CG1, VDG=vec_DG1, boundary_method=Boundary_Method.dynamics)
    rho_Vt_recoverer = Recoverer(rho_DG0, rho_Vt, VDG=Vt_brok, boundary_method=Boundary_Method.physics)

    rho_recoverer.project()
    theta_recoverer.project()
    v_recoverer.project()
    rho_Vt_recoverer.project()

    rho_diff = errornorm(rho_CG1, rho_CG1_true) / norm(rho_CG1_true)
    theta_diff = errornorm(theta_CG1, theta_CG1_true) / norm(theta_CG1_true)
    v_diff = errornorm(v_CG1, v_CG1_true) / norm(v_CG1_true)
    rho_Vt_diff = errornorm(rho_Vt, rho_Vt_true) / norm(rho_Vt_true)

    tolerance = 1e-7
    error_message = ("""
                     Incorrect recovery for {variable} with {boundary} boundary method
                     on {geometry} 3D Cartesian domain with {element} elements
                     """)
    assert rho_diff < tolerance, error_message.format(variable='rho', boundary='dynamics',
                                                      geometry=geometry, element=element)
    assert v_diff < tolerance, error_message.format(variable='v', boundary='dynamics',
                                                    geometry=geometry, element=element)
    assert theta_diff < tolerance, error_message.format(variable='rho', boundary='dynamics',
                                                        geometry=geometry, element=element)
    assert rho_Vt_diff < tolerance, error_message.format(variable='rho', boundary='physics',
                                                         geometry=geometry, element=element)
Пример #26
0
def run(state, transport_scheme, tmax, f_end):
    timestepper = PrescribedTransport(state, transport_scheme)
    timestepper.run(0, tmax)
    return norm(state.fields("f") - f_end) / norm(f_end)
Пример #27
0
def compute_norm(f, norm_type=np.inf):
    if norm_type == np.inf:
        with f.dat.vec_ro as vu:
            return vu.norm(PETSc.NormType.NORM_INFINITY)
    elif norm_type == 2:
        return fd.norm(f)
Пример #28
0
def test_interpolating_to_mesh():
    # Make the mesh the square `[1/4, 3/4] x [1/4, 3/4]`
    nx, ny = 32, 32
    mesh = firedrake.UnitSquareMesh(nx, ny)
    x, y = firedrake.SpatialCoordinate(mesh)
    Vc = mesh.coordinates.function_space()
    f = firedrake.interpolate(
        firedrake.as_vector((x / 2 + 1 / 4, y / 2 + 1 / 4)), Vc)
    mesh.coordinates.assign(f)

    # Set up the geometry of the gridded data set
    x0 = (0, 0)
    n = 32
    dx = 1.0 / n
    transform = rasterio.transform.from_origin(west=0.0,
                                               north=1.0,
                                               xsize=dx,
                                               ysize=dx)

    # Interpolate a scalar field
    array = np.array([[dx * (i + j) for j in range(n + 1)]
                      for i in range(n + 1)])
    missing = -9999.0
    array[0, 0] = missing
    array = np.flipud(array)

    memfile = rasterio.MemoryFile(ext='.tif')
    opts = {
        'driver': 'GTiff',
        'count': 1,
        'width': n,
        'height': n,
        'transform': transform,
        'nodata': -9999
    }

    with memfile.open(**opts) as dataset:
        dataset.write(array, indexes=1)
    dataset = memfile.open()

    Q = firedrake.FunctionSpace(mesh, family='CG', degree=1)
    p = firedrake.interpolate(x + y, Q)
    q = icepack.interpolate(dataset, Q)

    assert firedrake.norm(p - q) / firedrake.norm(p) < 1 / n

    # Interpolate a vector field
    array_vx = np.copy(array)
    array_vy = np.array([[dx * (j - i) for j in range(n + 1)]
                         for i in range(n + 1)])
    array_vy[-1, -1] = -9999.0
    array_vy = np.flipud(array_vy)

    memfile_vx, memfile_vy = rasterio.MemoryFile(), rasterio.MemoryFile()
    with memfile_vx.open(**opts) as vx, memfile_vy.open(**opts) as vy:
        vx.write(array_vx, indexes=1)
        vy.write(array_vy, indexes=1)
    vx, vy = memfile_vx.open(), memfile_vy.open()

    V = firedrake.VectorFunctionSpace(mesh, family='CG', degree=1)
    u = firedrake.interpolate(firedrake.as_vector((x + y, x - y)), V)
    v = icepack.interpolate((vx, vy), V)

    assert firedrake.norm(u - v) / firedrake.norm(u) < 1 / n
Пример #29
0
 def residual_phi(phi):
     return fd.norm(
         fd.assemble(
             d2(nabla_phi_bar(phi)) * inner(grad(phi), grad(sigma)) *
             dx))
Пример #30
0
def nlspace_solve(problem: InfDimProblem,
                  params=None,
                  results=None,
                  descent_output_dir=None):
    """
    Solve the optimization problem
        min      J(phi)
        phi in V
        under the constraints
        g_i(phi)=0  for all i=0..p-1
        h_i(phi)<=0 for all i=0..q-1

    Usage
    -----
    results=nlspace_solve(problem: InfDimProblem, params: dict, results:dict)

    Inputs
    ------
    problem : an `~InfDimProblem` object corresponding to the optimization
                  problem above.

    params  : (optional) a dictionary containing algorithm parameters
              (see below).

    results : (optional) a previous output of the nlspace_solve` function.
              The optimization will keep going from the last input of
              the dictionary `results['phi'][-1]`.
              Useful to restart an optimization after an interruption.
    descent_output_dir : Plot the descent direction in the given directory

    Output
    ------
    results : dictionary containing
        results['J']       : values of the objective function along the path
                             (J(phi_0),...,J(phi_n))
        results['G']       : equality constraint values
                             (G(phi_0),...,G(phi_n))
        results['H']       : inequality constraints values
                             (H(phi_0),...,H(phi_n))
        results['muls']    : lagrange multiplier values
                             (mu(phi_0),...,mu(phi_n))


    Optional algorithm parameters
    -----------------------------

    params['alphaJ']   : (default 1) scaling coefficient for the null space
        step xiJ decreasing the objective function

    params['alphaC']   : (default 1) scaling coefficient for the Gauss Newton
        step xiC decreasing the violation of the constraints

    params['alphas']   : (optional) vector of dimension
        problem.nconstraints + problem.nineqconstraints containing
        proportionality coefficients scaling the Gauss Newton direction xiC for
        each of the constraints

    params['debug'] : Tune the verbosity of the output (default 0)
                      Set param['debug']=-1 to display only the final result
                      Set param['debug']=-2 to remove any output

    params['dt'] : (default : `1.0`). Pseudo time-step expressed in a time unit.
        Used to modulate the optimization convergence/oscillatory behavior.

    params['hmin'] : (default : `1.0`). Mesh minimum length. TODO Replace this for dt
        in the calculation of the tolerances `eps`

    params['K']: tunes the distance at which inactive inequality constraints
        are felt. Constraints are felt from a distance K*params['dt']

    params['maxit']    : Maximal number of iterations (default : 4000)

    params['maxtrials']: (default 3) number of trials in between time steps
        until the merit function decreases

    params['tol']      : (default 1e-7) Algorithm stops when
            ||phi_{n+1}-phi_n||<params['tol']
        or after params['maxit'] iterations.

    params['tol_merit'] : (default 0) a new iterate phi_{n+1} is accepted if
        merit(phi_{n+1})<(1+sign(merit(phi_n)*params['tol_merit']))*merit(phi_n)

    params['tol_qp'] : (default 1e-20) the tolerance for the qp solver cvxopt

    params['show_progress_qp'] : (default False) If true, then the output of
        cvxopt will be displayed between iterations.

    params['monitor_time'] : (default False) If true, then the output of
        the time taken between optimization iterations.

    """

    params = set_parameters(params)

    alphas = np.asarray(
        params.get(
            "alphas",
            [1] * (len(problem.eqconstraints) + len(problem.ineqconstraints)),
        ))

    if descent_output_dir:
        descent_pvd = File(f"{descent_output_dir}/descent_direction.pvd")

    results = {
        "phi": [],
        "J": [],
        "G": [],
        "H": [],
        "muls": [],
        "merit": [],
    }

    phi = problem.x0()

    (J, G, H) = problem.eval(phi)

    normdx = 1  # current value for x_{n+1}-x_n

    new_phi = Function(phi.function_space())
    orig_phi = Function(phi.function_space())
    while normdx > params["tol"] and len(results["J"]) < params["maxit"]:
        with MPITimer(phi.comm) as timings:

            results["J"].append(J)
            results["G"].append(G)
            results["H"].append(H)

            if problem.accept():
                break

            it = len(results["J"]) - 1
            display("\n", params["debug"], 1)
            display(
                f"{it}. J=" + format(J, ".4g") + " " + "G=[" +
                ",".join(format(phi, ".4g") for phi in G[:10]) + "] " + "H=[" +
                ",".join(format(phi, ".4g") for phi in H[:10]) + "] " +
                " ||dx||_V=" + format(normdx, ".4g"),
                params["debug"],
                0,
            )

            # Returns the gradients (in the primal space). They are
            # firedrake.Function's
            (dJ, dG, dH) = problem.eval_gradients(phi)
            dC = dG + dH

            H = np.asarray(H)
            G = np.asarray(G)
            C = np.concatenate((G, H))

            # Obtain the tolerances for the inequality constraints and the indices
            # for the violated constraints
            eps = getEps(
                dC,
                problem.n_eqconstraints,
                params["dt"],
                params["K"],
                norm_type=params["normalisation_norm"],
            )
            tildeEps = getTilde(C, problem.n_eqconstraints, eps=eps)
            print(f"eps: {eps}")
            # Obtain the violated contraints
            tilde = getTilde(C, problem.n_eqconstraints)

            p_matrix = p_matrix_eval(dC, tildeEps)
            q_vector = q_vector_eval(dJ, dC, tildeEps)
            qp_results = solve_dual_problem(
                p_matrix,
                q_vector,
                tildeEps,
                problem.n_eqconstraints,
                show_progress_qp=params["show_progress_qp"],
                tol_qp=params["tol_qp"],
            )
            muls = np.zeros(len(C))
            oldmuls = np.zeros(len(C))
            hat = np.asarray([False] * len(C))

            if qp_results:
                muls[tildeEps] = np.asarray(qp_results["x"]).flatten()
                oldmuls = muls.copy()
                hat = np.asarray([True] * len(C))
                hat[problem.n_eqconstraints:] = (muls[problem.n_eqconstraints:]
                                                 > 30 * params["tol_qp"])
                if params.get("disable_dual", False):
                    hat = tildeEps

                dCdCT = dCdCT_eval(dC, hat)
                dCdCTinv = invert_dCdCT(dCdCT, params["debug"])
                muls = np.zeros(len(C))

                dCdJ = dCdJ_eval(dJ, dC, hat)
                muls[hat] = -dCdCTinv.dot(dCdJ[hat])

                if not np.all(muls[problem.n_eqconstraints:] >= 0):
                    display(
                        "Warning, the active set has not been predicted " +
                        "correctly Using old lagrange multipliers",
                        params["debug"],
                        level=1,
                        color="orange_4a",
                    )
                    hat = np.asarray([True] * len(C))
                    muls = oldmuls.copy()

            results["muls"].append(muls)
            display(f"Lagrange multipliers: {muls[:10]}",
                    params["debug"],
                    level=5)
            xiJ = xiJ_eval(dJ, dC, muls, hat)

            # Set of constraints union of active and new violated constraints.
            indicesEps = np.logical_or(tilde, hat)
            dCdCT = dCdCT_eval(dC, indicesEps)
            dCtdCtTinv = invert_dCdCT(dCdCT, params["debug"])

            xiC = xiC_eval(C, dC, dCtdCtTinv, alphas, indicesEps)

            # TODO Consider this? AC = min(0.9, alphaC * dt / max(compute_norm(xiC), 1e-9))
            AJ = params["alphaJ"]
            AC = params["alphaC"]

            # Make updates with merit function
            if xiC:
                problem.delta_x.assign(
                    Constant(-AJ) * xiJ - Constant(AC) * xiC)
            else:
                problem.delta_x.assign(Constant(-AJ) * xiJ)
            normdx = fd.norm(problem.delta_x)

            merit_eval_new = partial(merit_eval, muls, indicesEps, dCtdCtTinv)
            merit = merit_eval_new(AJ, J, AC, C)
            results["merit"].append(merit)
            if len(results["merit"]) > 3:
                print(
                    f"Merit oscillation: {(results['merit'][-1] - results['merit'][-2]) * (results['merit'][-2] - results['merit'][-3])}"
                )

            if descent_output_dir:
                descent_pvd.write(problem.delta_x)

            orig_phi.assign(phi)
            new_phi, newJ, newG, newH = line_search(
                problem,
                orig_phi,
                new_phi,
                merit_eval_new,
                merit,
                AJ,
                AC,
                dt=params["dt"],
                maxtrials=params["maxtrials"],
                tol_merit=params["tol_merit"],
                debug=params["debug"],
            )
            phi.assign(new_phi)
            (J, G, H) = (newJ, newG, newH)

        if params["monitor_time"]:
            print(
                f"Max time per iteration: {timings.max_time}, min time per iteration: {timings.min_time}"
            )

    results["J"].append(J)
    results["G"].append(G)
    results["H"].append(H)

    display("\n", params["debug"], -1)
    display("Optimization completed.", params["debug"], -1)
    return results