# Use FEniCS to formulate the FEM problem. A is the matrix, b is the rhs.
u_D = Constant(0.0)

tol = 1E-14


def boundary_D(x, on_boundary):

    return on_boundary and (near(x[0], 0, tol) or near(x[0], 1.0, tol))


bc = DirichletBC(V, u_D, boundary_D)
u = TrialFunction(V)
v = TestFunction(V)
f = Expression("10*exp(-(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2) \
                 + pow(x[2] - 0.5, 2)) / 0.02)", degree=6)
g = Expression("sin(5.0*x[0])*sin(5.0*x[1])", degree=6)
a = dot(grad(u), grad(v)) * dx
L = f * v * dx + g * v * ds
A = PETScMatrix()
b = PETScVector()
assemble_system(a, L, bc, A_tensor=A, b_tensor=b)

A = A.mat()
b = b.vec()

# =========================================================================

# Construct the alist for systems on levels from fine to coarse
# construct the transfer operators first
ruse = [None] * (nl - 1)
Exemple #2
0
# No-slip boundary condition for velocity
# x1 = 0, x1 = 1 and around the dolphin


@function.expression.numba_eval
def noslip_eval(values, x, cell):
    values[:, 0] = 0.0
    values[:, 1] = 0.0


# Extract subdomain facet arrays
mf = sub_domains.array()
mf0 = np.where(mf == 0)
mf1 = np.where(mf == 1)

noslip_expr = Expression(noslip_eval, shape=(2, ))
noslip = interpolate(noslip_expr, W.sub(0).collapse())
bc0 = DirichletBC(W.sub(0), noslip, mf0[0])

# Inflow boundary condition for velocity
# x0 = 1


@function.expression.numba_eval
def inflow_eval(values, x, cell):
    values[:, 0] = -np.sin(x[:, 1] * np.pi)
    values[:, 1] = 0.0


inflow_expr = Expression(inflow_eval, shape=(2, ))
inflow = interpolate(inflow_expr, W.sub(0).collapse())
Exemple #3
0
def test_karman(num_steps=2, lcar=0.1, show=False):
    mesh = create_mesh(lcar)

    W_element = VectorElement('Lagrange', mesh.ufl_cell(), 2)
    P_element = FiniteElement('Lagrange', mesh.ufl_cell(), 1)
    WP = FunctionSpace(mesh, W_element * P_element)

    W = WP.sub(0)
    # P = WP.sub(1)

    mesh_eps = 1.0e-12

    # Define mesh and boundaries.
    # pylint: disable=no-self-use
    class LeftBoundary(SubDomain):
        def inside(self, x, on_boundary):
            return on_boundary and x[0] < x0 + mesh_eps

    left_boundary = LeftBoundary()

    class RightBoundary(SubDomain):
        def inside(self, x, on_boundary):
            return on_boundary and x[0] > x1 - mesh_eps

    right_boundary = RightBoundary()

    class LowerBoundary(SubDomain):
        def inside(self, x, on_boundary):
            return on_boundary and x[1] < y0 + mesh_eps

    lower_boundary = LowerBoundary()

    class UpperBoundary(SubDomain):
        def inside(self, x, on_boundary):
            return on_boundary and x[1] > y1 - mesh_eps

    upper_boundary = UpperBoundary()

    class ObstacleBoundary(SubDomain):
        def inside(self, x, on_boundary):
            return (on_boundary and x0 + mesh_eps < x[0] < x1 - mesh_eps
                    and y0 + mesh_eps < x[1] < y1 - mesh_eps)

    obstacle_boundary = ObstacleBoundary()

    # Boundary conditions for the velocity.
    # Proper inflow and outflow conditions are a matter of voodoo. See for
    # example Gresho/Sani, or
    #
    #     Boundary conditions for open boundaries for the incompressible
    #     Navier-Stokes equation;
    #     B.C.V. Johansson;
    #     J. Comp. Phys. 105, 233-251 (1993).
    #
    # The latter in particularly suggest for the inflow:
    #
    #     u = u0,
    #     d^r v / dx^r = v_r,
    #     div(u) = 0,
    #
    # where u and v are the velocities in normal and tangential directions,
    # respectively, and r\in{0,1,2}. The setting r=0 essentially means to set
    # (u,v) statically at the left boundary, r=1 means to set u and control
    # dv/dn, which is what we do here (namely implicitly by dv/dn=0).
    # At the outflow,
    #
    #     d^j u / dx^j = 0,
    #     d^q v / dx^q = 0,
    #     p = p0,
    #
    # is suggested with j=q+1. Choosing q=0, j=1 means setting the tangential
    # component of the outflow to 0, and letting the normal component du/dn=0
    # (again, this is achieved implicitly by the weak formulation).
    #
    inflow = Expression('%e * (%e - x[1]) * (x[1] - %e) / %e' %
                        (entrance_velocity, y1, y0, (0.5 * (y1 - y0))**2),
                        degree=2)
    outflow = Expression('%e * (%e - x[1]) * (x[1] - %e) / %e' %
                         (entrance_velocity, y1, y0, (0.5 * (y1 - y0))**2),
                         degree=2)
    u_bcs = [
        DirichletBC(W, (0.0, 0.0), upper_boundary),
        DirichletBC(W, (0.0, 0.0), lower_boundary),
        DirichletBC(W, (0.0, 0.0), obstacle_boundary),
        DirichletBC(W.sub(0), inflow, left_boundary),
        #
        DirichletBC(W.sub(0), outflow, right_boundary),
    ]
    # dudt_bcs = [
    #     DirichletBC(W, (0.0, 0.0), upper_boundary),
    #     DirichletBC(W, (0.0, 0.0), lower_boundary),
    #     DirichletBC(W, (0.0, 0.0), obstacle_boundary),
    #     DirichletBC(W.sub(0), 0.0, left_boundary),
    #     # DirichletBC(W.sub(1), 0.0, right_boundary),
    #     ]

    # If there is a penetration boundary (i.e., n.u!=0), then the pressure must
    # be set somewhere to make sure that the Navier-Stokes problem remains
    # consistent.
    # When solving Stokes with no Dirichlet conditions whatsoever, the pressure
    # tends to 0 at the outlet. This is natural since there, the liquid can
    # flow out at the rate it needs to be under no pressure at all.
    # Hence, at outlets, set the pressure to 0.
    p_bcs = [
        # DirichletBC(P, 0.0, right_boundary)
    ]

    # Getting vortices is not easy. If we take the actual viscosity of water,
    # they don't appear.
    mu = 0.002
    # mu = materials.water.dynamic_viscosity(T=293.0)

    # For starting off, solve the Stokes equation.
    u0, p0 = flow.stokes.solve(WP,
                               u_bcs + p_bcs,
                               mu,
                               f=Constant((0.0, 0.0)),
                               verbose=False,
                               tol=1.0e-13,
                               max_iter=10000)
    u0.rename('velocity', 'velocity')
    p0.rename('pressure', 'pressure')

    rho = materials.water.density(T=293.0)
    # stepper = flow.navier_stokes.Chorin()
    # stepper = flow.navier_stokes.IPCS()
    stepper = flow.navier_stokes.Rotational()

    W2 = u0.function_space()
    P2 = p0.function_space()
    u_bcs = [
        DirichletBC(W2, (0.0, 0.0), upper_boundary),
        DirichletBC(W2, (0.0, 0.0), lower_boundary),
        DirichletBC(W2, (0.0, 0.0), obstacle_boundary),
        DirichletBC(W2.sub(0), inflow, left_boundary),
        #
        DirichletBC(W2.sub(0), outflow, right_boundary),
    ]
    # TODO settting the outflow _and_ the pressure at the outlet is actually
    #      not necessary. Even without the pressure Dirichlet conditions, the
    #      pressure correction system should be consistent.
    p_bcs = [DirichletBC(P2, 0.0, right_boundary)]

    # Report Reynolds number.
    # https://en.wikipedia.org/wiki/Reynolds_number#Sphere_in_a_fluid
    reynolds = entrance_velocity * obstacle_diameter * rho / mu
    print('Reynolds number:  %e' % reynolds)

    dt = 1.0e-5
    dt_max = 1.0
    t = 0.0

    with XDMFFile(mpi_comm_world(), 'karman.xdmf') as xdmf_file:
        xdmf_file.parameters['flush_output'] = True
        xdmf_file.parameters['rewrite_function_mesh'] = False

        k = 0
        while k < num_steps:
            k += 1
            print()
            print('t = %f' % t)
            if show:
                plot(u0)
                plot(p0)
                xdmf_file.write(u0, t)
                xdmf_file.write(p0, t)

            u1, p1 = stepper.step(Constant(dt), {0: u0},
                                  p0,
                                  u_bcs,
                                  p_bcs,
                                  Constant(rho),
                                  Constant(mu),
                                  f={
                                      0: Constant((0.0, 0.0)),
                                      1: Constant((0.0, 0.0))
                                  },
                                  verbose=False,
                                  tol=1.0e-10)
            u0.assign(u1)
            p0.assign(p1)

            # Adaptive stepsize control based solely on the velocity field.
            # CFL-like condition for time step. This should be some sort of
            # average of the temperature in the current step and the target
            # step.
            #
            # More on step-size control for Navier--Stokes:
            #
            #     Adaptive time step control for the incompressible
            #     Navier-Stokes equations;
            #     Volker John, Joachim Rang;
            #     Comput. Methods Appl. Mech. Engrg. 199 (2010) 514-524;
            #     <http://www.wias-berlin.de/people/john/ELECTRONIC_PAPERS/JR10.CMAME.pdf>.
            #
            # Section 3.3 in that paper notes that time-adaptivity for theta-
            # schemes is too costly. They rather reside to DIRK- and
            # Rosenbrock-methods.
            #
            begin('Step size adaptation...')
            ux, uy = u0.split()
            unorm = project(sqrt(ux**2 + uy**2),
                            FunctionSpace(mesh, 'Lagrange', 2),
                            form_compiler_parameters={'quadrature_degree': 4})
            unorm = norm(unorm.vector(), 'linf')

            # print('||u||_inf = %e' % unorm)
            # Some smooth step-size adaption.
            target_dt = 1.0 * mesh.hmax() / unorm
            print('current dt: %e' % dt)
            print('target dt:  %e' % target_dt)
            # alpha is the aggressiveness factor. The distance between the
            # current step size and the target step size is reduced by
            # |1-alpha|. Hence, if alpha==1 then dt_next==target_dt. Otherwise
            # target_dt is approached more slowly.
            alpha = 0.5
            dt = min(
                dt_max,
                # At most double the step size from step to step.
                dt * min(2.0, 1.0 + alpha * (target_dt - dt) / dt))
            print('next dt:    %e' % dt)
            t += dt
            end()

    return
Exemple #4
0
num_steps = 160  # number of time steps
dt = T / num_steps  # time step size
alpha = 0.3  # parameter alpha
beta = 0.0625  # parameter beta
theta = 0.125  # parameter theta

# Create mesh and define function space
nx = ny = nz = 8
mesh = UnitCubeMesh(nx, ny, nz)

V = FunctionSpace(mesh, 'P', 1)

# Define boundary condition
u_D = Expression('1 + x[0]*x[0] + alpha*x[1]*x[1] + theta*x[2]*x[2] + beta*t',
                 degree=2,
                 alpha=alpha,
                 theta=theta,
                 beta=beta,
                 t=0)


def boundary(x, on_boundary):
    return on_boundary


bc = DirichletBC(V, u_D, boundary)

# Define initial value
u_n = interpolate(u_D, V)
# u_n = project(u_D, V)

# Define variational problem
Exemple #5
0
#
# .. index::
#    single: interpolating functions; (in Cahn-Hilliard demo)
#
# Initial conditions are created by using the evaluate method
# then interpolated into a finite element space::


@function.expression.numba_eval
def init_cond(values, x, cell):
    values[:, 0] = 0.63 + 0.02 * (0.5 - random.random())
    values[:, 1] = 0.0


# Create intial conditions and interpolate
u_init = Expression(init_cond, shape=(2, ))
u.interpolate(u_init)

# The first line creates an object of type ``InitialConditions``.  The
# following two lines make ``u`` and ``u0`` interpolants of ``u_init``
# (since ``u`` and ``u0`` are finite element functions, they may not be
# able to represent a given function exactly, but the function can be
# approximated by interpolating it in a finite element space).
#
# .. index:: automatic differentiation
#
# The chemical potential :math:`df/dc` is computed using automated
# differentiation::

# Compute the chemical potential df/dc
c = variable(c)
def _test_nonlinear_solver_sparse(callback_type):
    from dolfin import Function
    from rbnics.backends.dolfin import NonlinearSolver

    # Create mesh and define function space
    mesh = IntervalMesh(132, 0, 2 * pi)
    V = FunctionSpace(mesh, "Lagrange", 1)

    # Define Dirichlet boundary (x = 0 or x = 2 * pi)
    def boundary(x):
        return x[0] < 0 + DOLFIN_EPS or x[0] > 2 * pi - 10 * DOLFIN_EPS

    # Define exact solution
    exact_solution_expression = Expression("x[0] + sin(2*x[0])",
                                           element=V.ufl_element())
    exact_solution = project(exact_solution_expression, V)

    # Define variational problem
    du = TrialFunction(V)
    v = TestFunction(V)
    u = Function(V)
    g = Expression(
        "4 * sin(2 * x[0]) * (pow(x[0] + sin(2 * x[0]), 2) + 1)" +
        " - 2 * (x[0] + sin(2 * x[0])) * pow(2 * cos(2 * x[0]) + 1," + " 2)",
        element=V.ufl_element())
    r = inner((1 + u**2) * grad(u), grad(v)) * dx - g * v * dx
    j = derivative(r, u, du)
    x = inner(du, v) * dx

    # Assemble inner product matrix
    X = assemble(x)

    # Define initial guess
    def initial_guess():
        initial_guess_expression = Expression("0.1 + 0.9 * x[0]",
                                              element=V.ufl_element())
        return project(initial_guess_expression, V)

    # Define boundary condition
    bc = [DirichletBC(V, exact_solution_expression, boundary)]

    # Define callback function depending on callback type
    assert callback_type in ("form callbacks", "tensor callbacks")
    if callback_type == "form callbacks":

        def callback(arg):
            return arg
    elif callback_type == "tensor callbacks":

        def callback(arg):
            return assemble(arg)

    # Define problem wrapper
    class ProblemWrapper(NonlinearProblemWrapper):
        # Residual function
        def residual_eval(self, solution):
            return callback(r)

        # Jacobian function
        def jacobian_eval(self, solution):
            return callback(j)

        # Define boundary condition
        def bc_eval(self):
            return bc

        # Define custom monitor to plot the solution
        def monitor(self, solution):
            if matplotlib.get_backend() != "agg":
                plot(solution, title="u")
                plt.show(block=False)
                plt.pause(1)
            else:
                print("||u|| = " + str(solution.vector().norm("l2")))

    # Solve the nonlinear problem
    problem_wrapper = ProblemWrapper()
    solution = u
    assign(solution, initial_guess())
    solver = NonlinearSolver(problem_wrapper, solution)
    solver.set_parameters({
        "linear_solver": "mumps",
        "maximum_iterations": 20,
        "report": True
    })
    solver.solve()

    # Compute the error
    error = Function(V)
    error.vector().add_local(+solution.vector().get_local())
    error.vector().add_local(-exact_solution.vector().get_local())
    error.vector().apply("")
    error_norm = error.vector().inner(X * error.vector())
    print("Sparse error (" + callback_type + "):", error_norm)
    assert isclose(error_norm, 0., atol=1.e-5)
    return (error_norm, V, u, r, j, X, initial_guess, exact_solution)
Exemple #7
0
        num_steps = np.rint(Tend / float(dt))

        # Generate mesh
        mesh = Mesh("./../../meshes/circle_0.xml")
        n = nx
        while n > 1:
            mesh = refine(mesh)
            n /= 2

        output_field = XDMFFile(mesh.mpi_comm(),
                                outdir + "psi_h_nx" + str(nx) + ".xdmf")

        # Velocity and initial condition
        V = VectorFunctionSpace(mesh, "DG", 3)
        uh = Function(V)
        uh.assign(Expression(("-Uh*x[1]", "Uh*x[0]"), Uh=Uh, degree=3))

        psi0_expression = GaussianPulse(center=(xc, yc),
                                        sigma=float(sigma),
                                        U=[Uh, Uh],
                                        time=0.0,
                                        height=1.0,
                                        degree=3)

        # Generate particles
        x = RandomCircle(Point(x0, y0), r).generate([pres, pres])
        s = np.zeros((len(x), 1), dtype=np.float_)

        # Initialize particles with position x and scalar property s at the mesh
        p = particles(x, [s], mesh)
        property_idx = 1  # Scalar quantity is stored at slot 1
Exemple #8
0
def RunJob(Tb, mu_value, path):
    runtimeInit = clock()

    tfile = File(path + '/t6t.pvd')
    mufile = File(path + "/mu.pvd")
    ufile = File(path + '/velocity.pvd')
    gradpfile = File(path + '/gradp.pvd')
    pfile = File(path + '/pstar.pvd')
    parameters = open(path + '/parameters', 'w', 0)
    vmeltfile = File(path + '/vmelt.pvd')
    rhofile = File(path + '/rhosolid.pvd')

    for name in dir():
        ev = str(eval(name))
        if name[0] != '_' and ev[0] != '<':
            parameters.write(name + ' = ' + ev + '\n')

    temp_values = [27. + 273, Tb + 273, 1300. + 273, 1305. + 273]
    dTemp = temp_values[3] - temp_values[0]
    temp_values = [x / dTemp for x in temp_values]  # non dimensionalising temp

    mu_a = mu_value  # this was taken from the blankenbach paper, can change..

    Ep = b / dTemp

    mu_bot = exp(-Ep * (temp_values[3] * dTemp - 1573) + cc) * mu_a

    Ra = rho_0 * alpha * g * dTemp * h**3 / (kappa_0 * mu_a)
    w0 = rho_0 * alpha * g * dTemp * h**2 / mu_a
    tau = h / w0
    p0 = mu_a * w0 / h

    print(mu_a, mu_bot, Ra, w0, p0)

    vslipx = 1.6e-09 / w0
    vslip = Constant((vslipx, 0.0))  # nondimensional
    noslip = Constant((0.0, 0.0))

    dt = 3.E11 / tau
    tEnd = 3.E13 / tau  # non-dimensionalising times

    class PeriodicBoundary(SubDomain):
        def inside(self, x, on_boundary):
            return left(x, on_boundary)

        def map(self, x, y):
            y[0] = x[0] - MeshWidth
            y[1] = x[1]

    pbc = PeriodicBoundary()

    class TempExp(Expression):
        def eval(self, value, x):
            if x[1] >= LAB(x):
                value[0] = temp_values[0] + (temp_values[1] - temp_values[0]
                                             ) * (MeshHeight -
                                                  x[1]) / (MeshHeight - LAB(x))
            else:
                value[0] = temp_values[3] - (
                    temp_values[3] - temp_values[2]) * (x[1]) / (LAB(x))

    class FluidTemp(Expression):
        def eval(self, value, x):
            if value[0] < 1295:
                value[0] = 1295

    mesh = RectangleMesh(Point(0.0, 0.0), Point(MeshWidth, MeshHeight), nx, ny)

    Svel = VectorFunctionSpace(mesh, 'CG', 2, constrained_domain=pbc)
    Spre = FunctionSpace(mesh, 'CG', 1, constrained_domain=pbc)
    Stemp = FunctionSpace(mesh, 'CG', 1, constrained_domain=pbc)
    Smu = FunctionSpace(mesh, 'CG', 1, constrained_domain=pbc)
    Sgradp = VectorFunctionSpace(mesh, 'CG', 2, constrained_domain=pbc)
    Srho = FunctionSpace(mesh, 'CG', 1, constrained_domain=pbc)
    S0 = MixedFunctionSpace([Svel, Spre, Stemp])

    u = Function(S0)
    v, p, T = split(u)
    v_t, p_t, T_t = TestFunctions(S0)

    T0 = interpolate(TempExp(), Stemp)

    muExp = Expression(
        'exp(-Ep * (T_val * dTemp - 1573) + cc * x[2] / meshHeight)',
        Smu.ufl_element(),
        Ep=Ep,
        dTemp=dTemp,
        cc=cc,
        meshHeight=MeshHeight,
        T_val=T0)

    mu = interpolate(muExp, Smu)

    rhosolid = Function(Srho)
    deltarho = Function(Srho)

    v0 = Function(Svel)
    vmelt = Function(Svel)

    v_theta = (1. - theta) * v0 + theta * v

    T_theta = (1. - theta) * T + theta * T0

    r_v = (inner(sym(grad(v_t)), 2.*mu*sym(grad(v))) \
        - div(v_t)*p \
        - T*v_t[1] )*dx

    r_p = p_t * div(v) * dx

    r_T = (T_t*((T - T0) \
        + dt*inner(v_theta, grad(T_theta))) \
        + (dt/Ra)*inner(grad(T_t), grad(T_theta)) )*dx
    #           + k_s*(Tf-T_theta)*dt

    Tf = T0.interpolate(FluidTemp())
    # Tf = T0.interpolate(Expression('value[0] >= 1295.0 ? value[0] : 1295.0'))

    # Tf.interpolate(Expression('value[0] >= 1295 ? value[0] : 1295'))
    # project(Expression('value[0] >= 1295 ? value[0] : 1295'), Tf)
    # Alex, a question for you:
    # can you see if there is a way to set Tf = T in regions where T >=1295 celsius
    #
    # 1295 celsius is my arbitrary choice for the LAB isotherm.  In regions
    # where T < 1295 C, set Tf to be some constant for now, such as 1295 C.
    # Once we do this, then we can add in a term like that last line above where
    # it will only be non-zero when the solid temperature, T, is cooler than 1295
    # can you do this? After this is done, we will then worry about a calculation
    # where we solve for Tf as a function of time in the regions cooler than 1295 C
    # Makes sense?  If not, we can skype soon -- email me with questions
    # 3/19/16
    r = r_v + r_p + r_T

    bcv0 = DirichletBC(S0.sub(0), noslip, top)
    bcv1 = DirichletBC(S0.sub(0), vslip, bottom)
    bcp0 = DirichletBC(S0.sub(1), Constant(0.0), bottom)
    bct0 = DirichletBC(S0.sub(2), Constant(temp_values[0]), top)
    bct1 = DirichletBC(S0.sub(2), Constant(temp_values[3]), bottom)

    bcs = [bcv0, bcv1, bcp0, bct0, bct1]

    t = 0
    count = 0
    while (t < tEnd):
        solve(r == 0, u, bcs)
        t += dt
        nV, nP, nT = u.split()
        gp = grad(nP)
        rhosolid = rho_0 * (1 - alpha * (nT * dTemp - 1573))
        deltarho = rhosolid - rhomelt
        yvec = Constant((0.0, 1.0))
        vmelt = nV * w0 - darcy * (gp * p0 / h - deltarho * yvec * g)
        if (count % 100 == 0):
            pfile << nP
            ufile << nV
            tfile << nT
            mufile << mu
            gradpfile << project(grad(nP), Sgradp)
            mufile << project(mu * mu_a, Smu)
            rhofile << project(rhosolid, Srho)
            vmeltfile << project(vmelt, Svel)
        count += 1
        assign(T0, nT)
        assign(v0, nV)
        mu.interpolate(muExp)

    print('Case mu=%g, Tb=%g complete.' % (mu_a, Tb), ' Run time =',
          clock() - runtimeInit, 's')
# Incident plane wave
theta = np.pi / 8


@function.expression.numba_eval
def ui_eval(values, x, cell_idx):
    values[:, 0] = np.exp(1.0j * k0 *
                          (np.cos(theta) * x[:, 0] + np.sin(theta) * x[:, 1]))


# Test and trial function space
V = FunctionSpace(mesh, ("Lagrange", deg))

# Prepare Expression as FE function
ui = interpolate(Expression(ui_eval), V)

# Define variational problem
u = TrialFunction(V)
v = TestFunction(V)
g = dot(grad(ui), n) + 1j * k0 * ui
a = inner(grad(u), grad(v)) * dx - k0**2 * inner(u, v) * dx + \
    1j * k0 * inner(u, v) * ds
L = inner(g, v) * ds

# Compute solution
u = Function(V)
solve(a == L, u, [])

# Save solution in XDMF format (to be viewed in Paraview, for example)
with XDMFFile(MPI.comm_world,
Exemple #10
0
    nx = 50
    ny = 50
    nz = 10

    model = Model()
    model.generate_uniform_mesh(nx,
                                ny,
                                nz,
                                xmin=0,
                                xmax=L,
                                ymin=0,
                                ymax=L,
                                generate_pbcs=True)

    Surface = Expression('- x[0] * tan(alpha)',
                         alpha=alpha,
                         element=model.Q.ufl_element())
    Bed = Expression('- x[0] * tan(alpha) - 1000.0',
                     alpha=alpha,
                     element=model.Q.ufl_element())
    Beta2 = Expression('1000 + 1000 * sin(2*pi*x[0]/L) * sin(2*pi*x[1]/L)',
                       alpha=alpha,
                       L=L,
                       element=model.Q.ufl_element())

    model.set_geometry(Surface, Bed, deform=True)
    model.set_parameters(IceParameters())
    model.initialize_variables()

    nonlin_solver_params = default_nonlin_solver_params()
    nonlin_solver_params['newton_solver']['linear_solver'] = 'mumps'
Qbar_E = FiniteElement("DGT", mesh.ufl_cell(), k)

Q_Rho = FunctionSpace(mesh, Q_E_Rho)
Qbar = FunctionSpace(mesh, Qbar_E)

phih, phih0 = Function(Q_Rho), Function(Q_Rho)
phibar = Function(Qbar)

# Advective velocity
# Swirling deformation advection (see LeVeque)
ux = 'pow(sin(pi*x[0]), 2) * sin(2*pi*x[1])'
vy = '-pow(sin(pi*x[1]), 2) * sin(2*pi*x[0])'
gt_plus = '0.5 * cos(pi*t)'
gt_min = '-0.5 * cos(pi*t)'

u_expr = Expression((ux+'*'+gt_min, vy+'*'+gt_min), degree=2, t=0.)
u_expre_neg = Expression((ux+'*'+gt_plus, vy+'*'+gt_plus), degree=2, t=0.)

# Mesh velocity
umesh = Function(Vcg)

# Advective velocity
uh = Function(V)
uh.assign(u_expr)

# Total velocity
uadvect = uh - umesh

# Now throw in the particles
x = RandomRectangle(Point(xmin, ymin), Point(xmax, ymax)).generate([pres, pres])
s = assign_particle_values(x, CosineHill(radius=0.25, center=[0.25, 0.5],
Exemple #12
0
def supg(mesh, convection, diffusion, element_degree):
    '''For each cell, this function return the expression

    ..math::

        \\begin{align*}
        \\tau &= \\frac{h}{2\\|b\\|}
        \\left(\\frac{1}{\\tanh Pe} - \\frac{1}{Pe}\\right)\\\\
        & = \\frac{h^2}{4\\varepsilon} \\frac{1}{Pe}
        \\left(\\frac{1}{\\tanh Pe} - \\frac{1}{Pe}\\right)
        \\end{align*}

    with the element diameter in the direction of the convection vector
    :math:`b` and the Péclet number :math:`Pe = \\frac{\\|b\\|
    h}{2\\varepsilon}`; see (3) in :cite:`sold2`.
    Note that :math:`\\tau` does not have a singularity for :math:`\\|b\\|=0`
    since

    ..math::

        \\frac{1}{\\tanh Pe} - \\frac{1}{Pe} = \\frac{1}{3}Pe + O(Pe^3)

    for :math:`Pe\\approx 0`. This Taylor expansion (with a few more terms) is
    made use of in the code.
    '''
    cppcode = '''#include <dolfin/mesh/Vertex.h>

class SupgStab : public Expression {
public:
double epsilon;
int p;
std::shared_ptr<GenericFunction> convection;
std::shared_ptr<Mesh> mesh;

SupgStab(): Expression()
{}

void eval(
  Array<double>& tau,
  const Array<double>& x,
  const ufc::cell& c
  ) const
{
  Array<double> v(x.size());
  convection->eval(v, x, c);
  double conv_norm = 0.0;
  for (uint i = 0; i < v.size(); ++i)
    conv_norm += v[i]*v[i];
  conv_norm = sqrt(conv_norm);

  // Bail out early if there's no convection; avoids NaNs.
  if (conv_norm < 1.0e-10) {
    tau[0] = 0.0;
    return;
  }

  Cell cell(*mesh, c.index);

  // // The alternative for the lazy:
  // const double h = cell.circumradius();

  // Compute the directed diameter of the cell, cf. :cite:`sold2`.
  //
  //    diam(cell, s) = 2*||s|| / sum_{nodes n_i} |s.\\grad\\psi|
  //
  // where \\psi is the P_1 basis function of n_i.
  //
  const double area = cell.volume();
  const unsigned int* vertices = cell.entities(0);
  assert(vertices);
  double sum = 0.0;
  for (int i=0; i<3; i++) {
    for (int j=i+1; j<3; j++) {
      // Get edge coords.
      const dolfin::Vertex v0(*mesh, vertices[i]);
      const dolfin::Vertex v1(*mesh, vertices[j]);
      const Point p0 = v0.point();
      const Point p1 = v1.point();
      const double e0 = p0[0] - p1[0];
      const double e1 = p0[1] - p1[1];

      // Note that
      //
      //     \\grad\\psi = ortho_edge / edgelength / height
      //               = ortho_edge / (2*area)
      //
      // so
      //
      //   (v.\\grad\\psi) = (v.ortho_edge) / (2*area).
      //
      // Move the constant factors out of the summation.
      //
      // It would be really nice if we could just do
      //    edge.dot((-v[1], v[0]))
      // but unfortunately, edges just dot with other edges.
      sum += fabs(e1*v[0] - e0*v[1]);
    }
  }
  const double h = 4 * conv_norm * area / sum;

  // Just a little sanity check here.
  assert(h <= cell.circumradius());

  const double Pe = 0.5*conv_norm * h/(p*epsilon);
  assert(Pe > 0.0);

  // We'd like to compute `xi = (1.0/tanh(Pe) - 1.0/Pe) / Pe`. This expression
  // can hardly be evaluated for small Pe, see
  // <https://stackoverflow.com/a/43279491/353337>. Hence, use its Taylor
  // expansion around 0.
  const double xi = Pe > 1.0e-5 ?
      (1.0/tanh(Pe) - 1.0/Pe) / Pe :
      1.0/3.0 - Pe*Pe / 45.0 + 2.0/945.0 * Pe*Pe*Pe*Pe;
  // const double xi =  (Pe > 1.0 ? 1.0 - 1.0/Pe : 0.0) / Pe;

  // Note that for small Pe, xi is approximately 1/3, i.e. approximately
  // independent of the convection.
  tau[0] = h*h / 4 / epsilon / p * xi;

  if (tau[0] > 1.0e3)
  {
    std::cout << "tau   = " << tau[0] << std::endl;
    std::cout << "||b|| = " << conv_norm << std::endl;
    std::cout << "Pe    = " << Pe << std::endl;
    std::cout << "h     = " << h << std::endl;
    std::cout << "xi    = " << xi << std::endl;
    throw 1;
  }

  return;
}
};
'''
    # TODO set degree
    tau = Expression(cppcode, degree=1)
    tau.convection = convection
    tau.mesh = mesh
    tau.epsilon = diffusion
    tau.p = element_degree
    return tau
    if edgefields:
        fh.write("CELL_DATA {0}\n".format(elems.shape[0]))
        for n,f in edgefields:
            PUTFIELD(n,f)
            
    fh.close()





if __name__=="__main__":
    from dolfin import UnitSquareMesh, Function, FunctionSpace, VectorFunctionSpace, TensorFunctionSpace, Expression

    mesh = UnitSquareMesh(10,10)
    S=FunctionSpace(mesh,"DG",0)
    V=VectorFunctionSpace(mesh,"DG",0)
    T=TensorFunctionSpace(mesh,"DG",0)
    Tsym = TensorFunctionSpace(mesh,"DG",0,symmetry=True)

    s = Function(S)
    s.interpolate(Expression('x[0]',element=S.ufl_element()))
    v = Function(V)
    v.interpolate(Expression(('x[0]','x[1]'),element=V.ufl_element()))
    t = Function(T)
    t.interpolate(Expression(( ('x[0]','1.0'),('2.0','x[1]')),element=T.ufl_element()))
    ts = Function(Tsym)
    ts.interpolate(Expression(( ('x[0]','1.0'),('x[1]',)),element=Tsym.ufl_element()))
    
    write_vtk_f("test.vtk",cellfunctions={'s':s,'v':v,'t':t,'tsym':ts})
Exemple #14
0
def _test_time_stepping_1_sparse(callback_type, integrator_type):
    # Create mesh and define function space
    mesh = IntervalMesh(132, 0, 2*pi)
    V = FunctionSpace(mesh, "Lagrange", 1)

    # Define Dirichlet boundary (x = 0 or x = 2*pi)
    def boundary(x):
        return x[0] < 0 + DOLFIN_EPS or x[0] > 2*pi - 10*DOLFIN_EPS
        
    # Define time step
    dt = 0.01
    T = 1.

    # Define exact solution
    exact_solution_expression = Expression("sin(x[0]+t)", t=0, element=V.ufl_element())
    # ... and interpolate it at the final time
    exact_solution_expression.t = T
    exact_solution = project(exact_solution_expression, V)

    # Define exact solution dot
    exact_solution_dot_expression = Expression("cos(x[0]+t)", t=0, element=V.ufl_element())
    # ... and interpolate it at the final time
    exact_solution_dot_expression.t = T
    exact_solution_dot = project(exact_solution_dot_expression, V)

    # Define variational problem
    du = TrialFunction(V)
    du_dot = TrialFunction(V)
    v = TestFunction(V)
    u = Function(V)
    u_dot = Function(V)
    g = Expression("sin(x[0]+t) + cos(x[0]+t)", t=0., element=V.ufl_element())
    r_u = inner(grad(u), grad(v))*dx
    j_u = derivative(r_u, u, du)
    r_u_dot = inner(u_dot, v)*dx
    j_u_dot = derivative(r_u_dot, u_dot, du_dot)
    r = r_u_dot + r_u - g*v*dx
    x = inner(du, v)*dx
    def bc(t):
        exact_solution_expression.t = t
        return [DirichletBC(V, exact_solution_expression, boundary)]

    # Assemble inner product matrix
    X = assemble(x)
    
    # Define callback function depending on callback type
    assert callback_type in ("form callbacks", "tensor callbacks")
    if callback_type == "form callbacks":
        def callback(arg):
            return arg
    elif callback_type == "tensor callbacks":
        def callback(arg):
            return assemble(arg)
            
    # Define problem wrapper
    class SparseProblemWrapper(TimeDependentProblem1Wrapper):
        # Residual and jacobian functions
        def residual_eval(self, t, solution, solution_dot):
            g.t = t
            return callback(r)
        def jacobian_eval(self, t, solution, solution_dot, solution_dot_coefficient):
            return callback(Constant(solution_dot_coefficient)*j_u_dot + j_u)
            
        # Define boundary condition
        def bc_eval(self, t):
            return bc(t)
            
        # Define initial condition
        def ic_eval(self):
            exact_solution_expression.t = 0.
            return project(exact_solution_expression, V)
            
        # Define custom monitor to plot the solution
        def monitor(self, t, solution, solution_dot):
            if matplotlib.get_backend() != "agg":
                plt.subplot(1, 2, 1).clear()
                plot(solution, title="u at t = " + str(t))
                plt.subplot(1, 2, 2).clear()
                plot(solution_dot, title="u_dot at t = " + str(t))
                plt.show(block=False)
                plt.pause(DOLFIN_EPS)
            else:
                print("||u|| at t = " + str(t) + ": " + str(solution.vector().norm("l2")))
                print("||u_dot|| at t = " + str(t) + ": " + str(solution_dot.vector().norm("l2")))

    # Solve the time dependent problem
    sparse_problem_wrapper = SparseProblemWrapper()
    (sparse_solution, sparse_solution_dot) = (u, u_dot)
    sparse_solver = SparseTimeStepping(sparse_problem_wrapper, sparse_solution, sparse_solution_dot)
    sparse_solver.set_parameters({
        "initial_time": 0.0,
        "time_step_size": dt,
        "final_time": T,
        "exact_final_time": "stepover",
        "integrator_type": integrator_type,
        "problem_type": "linear",
        "linear_solver": "mumps",
        "monitor": sparse_problem_wrapper.monitor,
        "report": True
    })
    all_sparse_solutions_time, all_sparse_solutions, all_sparse_solutions_dot = sparse_solver.solve()
    assert len(all_sparse_solutions_time) == int(T/dt + 1)
    assert len(all_sparse_solutions) == int(T/dt + 1)
    assert len(all_sparse_solutions_dot) == int(T/dt + 1)

    # Compute the error
    sparse_error = Function(V)
    sparse_error.vector().add_local(+ sparse_solution.vector().get_local())
    sparse_error.vector().add_local(- exact_solution.vector().get_local())
    sparse_error.vector().apply("")
    sparse_error_norm = sparse_error.vector().inner(X*sparse_error.vector())
    sparse_error_dot = Function(V)
    sparse_error_dot.vector().add_local(+ sparse_solution_dot.vector().get_local())
    sparse_error_dot.vector().add_local(- exact_solution_dot.vector().get_local())
    sparse_error_dot.vector().apply("")
    sparse_error_dot_norm = sparse_error_dot.vector().inner(X*sparse_error_dot.vector())
    print("SparseTimeStepping error (" + callback_type + ", " + integrator_type + "):", sparse_error_norm, sparse_error_dot_norm)
    assert isclose(sparse_error_norm, 0., atol=1.e-4)
    assert isclose(sparse_error_dot_norm, 0., atol=1.e-4)
    return ((sparse_error_norm, sparse_error_dot_norm), V, dt, T, u, u_dot, g, r, j_u, j_u_dot, X, exact_solution_expression, exact_solution, exact_solution_dot)
Exemple #15
0
    def output_term(self,
                    term='LHS',
                    norm='none',
                    units='rescaled',
                    output_label=False):

        # cast params as constant functions so that, if they are set to 0, FEniCS still understand
        # what is being integrated
        mu, M = Constant(self.physics.mu), Constant(self.physics.M)
        lam = Constant(self.physics.lam)
        mn, mf = Constant(self.mn), Constant(self.mf)

        D = self.physics.D

        if units == 'rescaled':
            resc = 1.
            str_phi = '\\hat{\\phi}'
            str_nabla2 = '\\hat{\\nabla}^2'
            str_m2 = '\\left( \\frac{\\mu}{m_n} \\right)^2'
            str_lambdaphi3 = '\\lambda\\left(\\frac{m_f}{m_n}\\right)^2\\hat{\\phi}^3'
            str_rho = '\\frac{m_n^{D-2}}{m_f}\\frac{\\hat{\\rho}}{M}'

        elif units == 'physical':
            resc = self.mn**2 * self.mf
            str_phi = '\\phi'
            str_nabla2 = '\\nabla^2'
            str_m2 = '\\mu^2'
            str_lambdaphi3 = '\\lambda\\phi^3'
            str_rho = '\\frac{\\rho}{M}'

        else:
            message = "Invalid choice of units: valid choices are 'physical' or 'rescaled'."
            raise ValueError, message

        phi = self.phi

        # define r for use in the computation of the Laplacian
        r = Expression('x[0]', degree=self.fem.func_degree)

        if term == 'LHS':  # I expand manually the Laplacian into (D-1)/r df/dr + d2f/dr2
            Term = Constant(D-1.)/r * phi.dx(0) + phi.dx(0).dx(0) \
                 + (mu/mn)**2*phi - lam*(mf/mn)**2*phi**3
            label = r"$%s%s + %s%s - %s$" % (str_nabla2, str_phi, str_m2,
                                             str_phi, str_lambdaphi3)
        elif term == 'RHS':
            Term = (mn**(D - 2.) / (mf * M)) * self.source.rho
            label = r"$%s$" % (str_rho)
        elif term == 1:
            Term = Constant(D - 1.) / r * phi.dx(0) + phi.dx(0).dx(0)
            label = r"$%s%s$" % (str_nabla2, str_phi)
        elif term == 2:
            Term = +(mu / mn)**2 * phi
            label = r"$%s%s$" % (str_m2, str_phi)
        elif term == 3:
            Term = -lam * (mf / mn)**2 * phi**3
            label = r"$-%s$" % (str_lambdaphi3)
        elif term == 4:
            Term = (mn**(D - 2.) / (mf * M)) * self.source.rho
            label = r"$%s$" % (str_rho)
        # rescale if needed to get physical units
        Term *= resc

        Term = project(Term, self.fem.dS, self.physics.D, self.fem.func_degree)

        # 'none' = return function, not norm
        if norm == 'none':
            result = Term
            # from here on return a norm. This nested if is to preserve the structure of the original
            # built-in FEniCS norm function
        elif norm == 'linf':
            # infinity norm, i.e. max abs value at vertices
            result = rD_norm(Term.vector(),
                             self.physics.D,
                             self.fem.func_degree,
                             norm_type=norm)
        else:
            result = rD_norm(Term,
                             self.physics.D,
                             self.fem.func_degree,
                             norm_type=norm)

        if output_label:
            return result, label
        else:
            return result
Exemple #16
0
def get_lhs_block_form_2(block_V):
    block_u = BlockTrialFunction(block_V)
    block_v = BlockTestFunction(block_V)
    (u1, u2) = block_split(block_u)
    (v1, v2) = block_split(block_v)
    block_form = [[None, None], [None, None]]
    # (1, 1) block
    shape_1 = block_V[0].ufl_element().value_shape()
    if len(shape_1) is 0:
        f1 = Expression("2*x[0] + 4*x[1]*x[1]", degree=2)
        block_form[0][0] = f1*u1*v1.dx(0)*dx
    elif len(shape_1) is 1 and shape_1[0] is 2:
        f1 = Expression(("2*x[0] + 4*x[1]*x[1]", "3*x[0] + 5*x[1]*x[1]"), degree=2)
        block_form[0][0] = (f1[0]*u1[0]*v1[0].dx(0) + f1[1]*u1[1].dx(1)*v1[1])*dx
    elif len(shape_1) is 1 and shape_1[0] is 3:
        f1 = Expression(("2*x[0] + 4*x[1]*x[1]", "3*x[0] + 5*x[1]*x[1]", "7*x[0] + 11*x[1]*x[1]"), degree=2)
        block_form[0][0] = (f1[0]*u1[0]*v1[0].dx(0) + f1[1]*u1[1].dx(1)*v1[1] + f1[2]*u1[2].dx(0)*v1[2].dx(1))*dx
    elif len(shape_1) is 2:
        f1 = Expression((("2*x[0] + 4*x[1]*x[1]", "3*x[0] + 5*x[1]*x[1]"),
                         ("7*x[0] + 11*x[1]*x[1]", "13*x[0] + 17*x[1]*x[1]")), degree=2)
        block_form[0][0] = (f1[0, 0]*u1[0, 0]*v1[0, 0].dx(0) + f1[0, 1]*u1[0, 1].dx(1)*v1[0, 1] + f1[1, 0]*u1[1, 0].dx(0)*v1[1, 0].dx(1) + f1[1, 1]*u1[1, 1].dx(0)*v1[1, 1])*dx
    # (2, 2) block
    shape_2 = block_V[1].ufl_element().value_shape()
    if len(shape_2) is 0:
        f2 = Expression("2*x[1] + 4*x[0]*x[0]", degree=2)
        block_form[1][1] = f2*u2*v2.dx(0)*dx
    elif len(shape_2) is 1 and shape_2[0] is 2:
        f2 = Expression(("2*x[1] + 4*x[0]*x[0]", "3*x[1] + 5*x[0]*x[0]"), degree=2)
        block_form[1][1] = (f2[0]*u2[0]*v2[0].dx(0) + f2[1]*u2[1].dx(1)*v2[1])*dx
    elif len(shape_2) is 1 and shape_2[0] is 3:
        f2 = Expression(("2*x[1] + 4*x[0]*x[0]", "3*x[1] + 5*x[0]*x[0]", "7*x[1] + 11*x[0]*x[0]"), degree=2)
        block_form[1][1] = (f2[0]*u2[0]*v2[0].dx(0) + f2[1]*u2[1].dx(1)*v2[1] + f2[2]*u2[2].dx(0)*v2[2].dx(1))*dx
    elif len(shape_2) is 2:
        f2 = Expression((("2*x[1] + 4*x[0]*x[0]", "3*x[1] + 5*x[0]*x[0]"),
                         ("7*x[1] + 11*x[0]*x[0]", "13*x[1] + 17*x[0]*x[0]")), degree=2)
        block_form[1][1] = (f2[0, 0]*u2[0, 0]*v2[0, 0].dx(0) + f2[0, 1]*u2[0, 1].dx(1)*v2[0, 1] + f2[1, 0]*u2[1, 0].dx(0)*v2[1, 0].dx(1) + f2[1, 1]*u2[1, 1].dx(0)*v2[1, 1])*dx
    # (1, 2) and (2, 1) blocks
    if len(shape_1) is 0:
        if len(shape_2) is 0:
            block_form[0][1] = f1*u2*v1.dx(0)*dx
            block_form[1][0] = f2*u1*v2.dx(0)*dx
        elif len(shape_2) is 1 and shape_2[0] is 2:
            block_form[0][1] = f1*u2[0]*v1.dx(0)*dx + f1*u2[1]*v1.dx(1)*dx
            block_form[1][0] = (f2[0]*u1*v2[0].dx(0) + f2[1]*u1.dx(1)*v2[1])*dx
        elif len(shape_2) is 1 and shape_2[0] is 3:
            block_form[0][1] = f1*u2[0]*v1.dx(0)*dx + f1*u2[1]*v1.dx(1)*dx + f1*u2[2]*v1*dx
            block_form[1][0] = (f2[0]*u1*v2[0].dx(0) + f2[1]*u1.dx(1)*v2[1] + f2[2]*u1.dx(0)*v2[2].dx(1))*dx
        elif len(shape_2) is 2:
            block_form[0][1] = f1*u2[0, 0]*v1.dx(0)*dx + f1*u2[1, 1]*v1.dx(0)*dx
            block_form[1][0] = (f2[0, 0]*u1*v2[0, 0].dx(0) + f2[0, 1]*u1.dx(1)*v2[0, 1] + f2[1, 0]*u1.dx(0)*v2[1, 0].dx(1) + f2[1, 1]*u1.dx(0)*v2[1, 1])*dx
    elif len(shape_1) is 1 and shape_1[0] is 2:
        if len(shape_2) is 0:
            block_form[0][1] = (f1[0]*u2*v1[0].dx(0) + f1[1]*u2.dx(1)*v1[1])*dx
            block_form[1][0] = f2*u1[0]*v2.dx(0)*dx + f2*u1[1]*v2.dx(0)*dx
        elif len(shape_2) is 1 and shape_2[0] is 2:
            block_form[0][1] = (f1[0]*u2[0]*v1[0].dx(0) + f1[1]*u2[1].dx(1)*v1[1])*dx
            block_form[1][0] = (f2[0]*u1[0]*v2[0].dx(0) + f2[1]*u1[1].dx(1)*v2[1])*dx
        elif len(shape_2) is 1 and shape_2[0] is 3:
            block_form[0][1] = (f1[0]*u2[0]*v1[0].dx(0) + f1[1]*u2[1].dx(1)*v1[1] + f1[0]*u2[2]*v1[0])*dx
            block_form[1][0] = (f2[0]*u1[0]*v2[0].dx(0) + f2[1]*u1[1].dx(1)*v2[1] + f2[2]*u1[0].dx(0)*v2[2].dx(1))*dx
        elif len(shape_2) is 2:
            block_form[0][1] = (f1[0]*u2[0, 0]*v1[0].dx(0) + f1[1]*u2[1, 1].dx(1)*v1[1])*dx
            block_form[1][0] = (f2[0, 0]*u1[0]*v2[0, 0].dx(0) + f2[0, 1]*u1[0].dx(1)*v2[0, 1] + f2[1, 0]*u1[1].dx(0)*v2[1, 0].dx(1) + f2[1, 1]*u1[0].dx(0)*v2[1, 1])*dx
    elif len(shape_1) is 1 and shape_1[0] is 3:
        if len(shape_2) is 0:
            block_form[0][1] = (f1[0]*u2*v1[0].dx(0) + f1[1]*u2.dx(1)*v1[1] + f1[2]*u2.dx(0)*v1[2].dx(1))*dx
            block_form[1][0] = f2*u1[0]*v2.dx(0)*dx + f2*u1[1]*v2.dx(1)*dx + f2*u1[2]*v2*dx
        elif len(shape_2) is 1 and shape_2[0] is 2:
            block_form[0][1] = (f1[0]*u2[0]*v1[0].dx(0) + f1[1]*u2[1].dx(1)*v1[1] + f1[2]*u2[0].dx(0)*v1[2].dx(1))*dx
            block_form[1][0] = (f2[0]*u1[0]*v2[0].dx(0) + f2[1]*u1[1].dx(1)*v2[1] + f2[1]*u1[2].dx(1)*v2[1])*dx
        elif len(shape_2) is 1 and shape_2[0] is 3:
            block_form[0][1] = (f1[0]*u2[0]*v1[0].dx(0) + f1[1]*u2[1].dx(1)*v1[1] + f1[2]*u2[2].dx(0)*v1[2].dx(1))*dx
            block_form[1][0] = (f2[0]*u1[0]*v2[0].dx(0) + f2[1]*u1[1].dx(1)*v2[1] + f2[2]*u1[2].dx(0)*v2[2].dx(1))*dx
        elif len(shape_2) is 2:
            block_form[0][1] = (f1[0]*u2[0, 0]*v1[0].dx(0) + f1[1]*u2[1, 0].dx(1)*v1[1] + f1[2]*u2[0, 1].dx(0)*v1[2].dx(1) + f1[0]*u2[1, 1]*v1[0].dx(1))*dx
            block_form[1][0] = (f2[0, 0]*u1[0]*v2[0, 0].dx(0) + f2[0, 1]*u1[1].dx(1)*v2[0, 1] + f2[1, 0]*u1[2].dx(0)*v2[1, 0].dx(1) + f2[1, 1]*u1[0].dx(0)*v2[1, 1])*dx
    elif len(shape_1) is 2:
        if len(shape_2) is 0:
            block_form[0][1] = (f1[0, 0]*u2*v1[0, 0].dx(0) + f1[0, 1]*u2.dx(1)*v1[0, 1] + f1[1, 0]*u2.dx(0)*v1[1, 0].dx(1) + f1[1, 1]*u2.dx(0)*v1[1, 1])*dx
            block_form[1][0] = f2*u1[0, 0]*v2.dx(0)*dx + f2*u1[1, 1]*v2.dx(1)*dx
        elif len(shape_2) is 1 and shape_2[0] is 2:
            block_form[0][1] = (f1[0, 0]*u2[0]*v1[0, 0].dx(0) + f1[0, 1]*u2[0].dx(1)*v1[0, 1] + f1[1, 0]*u2[1].dx(0)*v1[1, 0].dx(1) + f1[1, 1]*u2[1].dx(0)*v1[1, 1])*dx
            block_form[1][0] = (f2[0]*u1[0, 0]*v2[0].dx(0) + f2[1]*u1[1, 1].dx(1)*v2[1])*dx
        elif len(shape_2) is 1 and shape_2[0] is 3:
            block_form[0][1] = (f1[0, 0]*u2[0]*v1[0, 0].dx(0) + f1[0, 1]*u2[1].dx(1)*v1[0, 1] + f1[1, 0]*u2[2].dx(0)*v1[1, 0].dx(1) + f1[1, 1]*u2[0].dx(0)*v1[1, 1])*dx
            block_form[1][0] = (f2[0]*u1[0, 0]*v2[0].dx(0) + f2[1]*u1[1, 0].dx(1)*v2[1] + f2[2]*u1[0, 1].dx(0)*v2[2].dx(1) + f2[0]*u1[1, 1]*v2[0].dx(1))*dx
        elif len(shape_2) is 2:
            block_form[0][1] = (f1[0, 0]*u2[0, 0]*v1[0, 0].dx(0) + f1[0, 1]*u2[0, 1].dx(1)*v1[0, 1] + f1[1, 0]*u2[1, 0].dx(0)*v1[1, 0].dx(1) + f1[1, 1]*u2[1, 1].dx(0)*v1[1, 1])*dx
            block_form[1][0] = (f2[0, 0]*u1[0, 0]*v2[0, 0].dx(0) + f2[0, 1]*u1[0, 1].dx(1)*v2[0, 1] + f2[1, 0]*u1[1, 0].dx(0)*v2[1, 0].dx(1) + f2[1, 1]*u1[1, 1].dx(0)*v2[1, 1])*dx
    return block_form
Exemple #17
0
    def compute_derrick(self):

        D = self.physics.D

        r = Expression('x[0]', degree=self.fem.func_degree)
        # r^(D-1)
        rD = Expression('pow(x[0],D-1)', D=D, degree=self.fem.func_degree)

        mu, M = Constant(self.physics.mu), Constant(self.physics.M)
        lam = Constant(self.physics.lam)
        mn = Constant(self.mn)

        r_values, Phi_values = get_values(self.Phi, output_mesh=True)

        # the numerical value of the potential energy goes below the machine precision on
        # its biggest component after some radius (at those radii the biggest component is the vacuum energy)
        # analytically, we know the integral over that and bigger radii should be close to 0
        # to avoid integrating numerical noise (and blowing it up by r^D) we restrict integration
        # on the submesh where the potential energy is resolved within machine precision

        # find radius at which potential energy drops below machine precision
        vacuum_energy = self.physics.mu**4 / (4. * self.physics.lam)
        eV = lam / 4. * self.Phi**4 - mu**2 / 2. * self.Phi**2 + Constant(
            vacuum_energy)
        eV_values = self.physics.lam / 4. * Phi_values**4 - self.physics.mu**2 / 2. * Phi_values**2 + vacuum_energy
        eV_idx_wrong = np.where(eV_values < d.DOLFIN_EPS * vacuum_energy)[0][0]
        eV_r_wrong = r_values[eV_idx_wrong]

        # define a submesh where the potential energy density is resolved
        class eV_Resolved(SubDomain):
            def inside(self, x, on_boundary):
                return x[0] < eV_r_wrong

        eV_resolved = eV_Resolved()
        eV_subdomain = d.CellFunction('size_t', self.fem.mesh.mesh)
        eV_subdomain.set_all(0)
        eV_resolved.mark(eV_subdomain, 1)
        eV_submesh = d.SubMesh(self.fem.mesh.mesh, eV_subdomain, 1)

        # integrate potential energy density
        E_V = d.assemble(eV * rD * dx(eV_submesh))
        E_V /= self.mn**D  # get physical distances - integral now has mass dimension 4 - D

        # kinetic energy - here we are limited by the machine precision on the gradient
        # the numerical value of the field is limited by the machine precision on the VEV, which
        # we are going to use as threshold
        eK_idx_wrong = np.where(
            abs(Phi_values - self.physics.Vev) < d.DOLFIN_EPS *
            self.physics.Vev)[0][0]
        eK_r_wrong = r_values[eK_idx_wrong]

        # define a submesh where the kinetic energy density is resolved
        class eK_Resolved(SubDomain):
            def inside(self, x, on_boundary):
                return x[0] < eK_r_wrong

        eK_resolved = eK_Resolved()
        eK_subdomain = d.CellFunction('size_t', self.fem.mesh.mesh)
        eK_subdomain.set_all(0)
        eK_resolved.mark(eK_subdomain, 1)
        eK_submesh = d.SubMesh(self.fem.mesh.mesh, eK_subdomain, 1)

        # integrate kinetic energy density
        eK = Constant(0.5) * self.grad_Phi**2
        E_K = d.assemble(eK * rD * dx(eK_submesh))
        E_K /= self.mn**D  # get physical distances - integral now has mass dimension 4 - D

        # matter coupling energy
        erho = self.source.rho / M * self.Phi
        E_rho = d.assemble(
            erho * rD *
            dx)  # rescaled rho, and so the integral, has mass dimension 4 - D

        # integral terms of Derrick's theorem
        derrick1 = (D - 2.) * E_K + D * (E_V + E_rho)
        derrick4 = 2. * (D - 2.) * E_K

        # non-integral terms of Derrick's theorem - these have mass dimension 4 - D
        derrick2 = self.source.Rho_bar * self.source.Rs**D * \
                   self.Phi(self.fem.mesh.rs) / self.physics.M
        derrick3 = self.source.Rho_bar * self.source.Rs**(D+1.) * \
                   self.grad_Phi(self.fem.mesh.rs) / self.physics.M

        self.derrick = [derrick1, derrick2, derrick3, derrick4]
Exemple #18
0
 def generate_shape_expression():
     return Expression(
         NormalInflow._shape_expression_schema, degree=2, amplitude=1.0
     )
 def initial_guess():
     initial_guess_expression = Expression("0.1 + 0.9 * x[0]",
                                           element=V.ufl_element())
     return project(initial_guess_expression, V)
Exemple #20
0
def problem_coscos():
    """cosine example.
    """
    def mesh_generator(n):
        mesh = UnitSquareMesh(n, n, "left/right")
        dim = mesh.topology().dim()
        domains = MeshFunction("size_t", mesh, dim)
        domains.set_all(0)
        dx = Measure("dx", subdomain_data=domains)
        boundaries = MeshFunction("size_t", mesh, dim - 1)
        boundaries.set_all(0)
        ds = Measure("ds", subdomain_data=boundaries)
        return mesh, dx, ds

    x = sympy.DeferredVector("x")

    # Choose the solution, the parameters specifically, such that the boundary
    # conditions are fulfilled exactly, namely:
    #
    #    sol(x) = 0   for x[0] == 0, and
    #    dot(n, grad(sol)) = 0    everywhere else.
    #
    alpha = 2 * pi
    r1 = 1.0
    beta = numpy.cos(alpha * r1) - r1 * alpha * numpy.sin(alpha * r1)

    solution = {
        "value": (
            beta * (1.0 - sympy.cos(alpha * x[0])),
            beta * (1.0 - sympy.cos(alpha * x[0]))
            # beta * sympy.sin(alpha * x[0]),
            # beta * sympy.sin(alpha * x[0])
        ),
        "degree":
        MAX_DEGREE,
    }

    # Produce a matching right-hand side.
    phi = solution["value"]
    mu = 1.0
    sigma = 1.0
    omega = 1.0
    rhs_sympy = (
        -sympy.diff(1 / (mu * x[0]) * sympy.diff(x[0] * phi[0], x[0]), x[0]) -
        sympy.diff(1 / (mu * x[0]) * sympy.diff(x[0] * phi[0], x[1]), x[1]) -
        omega * sigma * phi[1],
        -sympy.diff(1 / (mu * x[0]) * sympy.diff(x[0] * phi[1], x[0]), x[0]) -
        sympy.diff(1 / (mu * x[0]) * sympy.diff(x[0] * phi[1], x[1]), x[1]) +
        omega * sigma * phi[0],
    )

    rhs_sympy = (sympy.simplify(rhs_sympy[0]), sympy.simplify(rhs_sympy[1]))

    # The rhs expressions contain terms like 1/x[0]. If naively evaluated, this
    # will result in NaNs, even for points where not x[0]==0. This is because,
    # by default, expressions get interpolated to polynomials.
    # See
    # <https://fenicsproject.org/qa/12796/1-x-near-boundary-nans-where-there-shouldnt-be-nans>,
    # <https://bitbucket.org/fenics-project/dolfin/issues/831/some-problems-with-quadrature-expressions>.
    # for a workaround.
    Q = FiniteElement("Quadrature",
                      triangle,
                      degree=MAX_DEGREE,
                      quad_scheme="default")
    rhs = {
        "value": (
            Expression(helpers.ccode(rhs_sympy[0]), element=Q),
            Expression(helpers.ccode(rhs_sympy[1]), element=Q),
        ),
        "degree":
        MAX_DEGREE,
    }

    # Show the solution and the right-hand side.
    show = False
    if show:
        from dolfin import plot, interactive

        n = 50
        mesh, _, _ = mesh_generator(n)
        plot(Expression(helpers.ccode(phi[0])), mesh=mesh, title="phi.real")
        plot(Expression(helpers.ccode(phi[1])), mesh=mesh, title="phi.imag")
        plot(rhs[0], mesh=mesh, title="f.real")
        plot(rhs[1], mesh=mesh, title="f.imag")
        interactive()
    return mesh_generator, solution, rhs, triangle
Exemple #21
0
import numpy as np
from dolfin import (
    interpolate,
    Constant,
    Expression,
    FunctionSpace,
    UnitIntervalMesh,
)

from tests.utils.solves import assert_solves
from tests.utils.types import assemble_with_mixed_types

MESH = UnitIntervalMesh(32)
EXACT_EXPRESSION = "sin(DOLFIN_PI * x[0])"
EXACT = Expression(EXACT_EXPRESSION, degree=4)

DIFFUSIVITY = 1.0 / (np.pi * np.pi)
DIFFUSION_TERM = f"DOLFIN_PI * DOLFIN_PI * {DIFFUSIVITY} * {EXACT_EXPRESSION}"

CONVECTIVITY = [1.0 / np.pi]
CONVECTION_TERM = "-cos(DOLFIN_PI * x[0])"

REACTIVITY = 1.0
REACTION_TERM = EXACT_EXPRESSION


def test_1d_types():
    mesh = UnitIntervalMesh(4)
    CG = FunctionSpace(mesh, "CG", 1)
    DG = FunctionSpace(mesh, "DG", 0)
    diffusion = Expression("1. + x[0] * (1. - x[0])", degree=2)
Exemple #22
0
def test_call(R, V, W, Q, mesh):
    u0 = Function(R)
    u1 = Function(V)
    u2 = Function(W)
    u3 = Function(Q)

    @function.expression.numba_eval
    def expr_eval1(values, x, t):
        values[:, 0] = x[:, 0] + x[:, 1] + x[:, 2]

    e1 = Expression(expr_eval1)

    @function.expression.numba_eval
    def expr_eval2(values, x, t):
        values[:, 0] = x[:, 0] + x[:, 1] + x[:, 2]
        values[:, 1] = x[:, 0] - x[:, 1] - x[:, 2]
        values[:, 2] = x[:, 0] + x[:, 1] + x[:, 2]

    e2 = Expression(expr_eval2, shape=(3, ))

    @function.expression.numba_eval
    def expr_eval3(values, x, t):
        values[:, 0] = x[:, 0] + x[:, 1] + x[:, 2]
        values[:, 1] = x[:, 0] - x[:, 1] - x[:, 2]
        values[:, 2] = x[:, 0] + x[:, 1] + x[:, 2]
        values[:, 3] = x[:, 0]
        values[:, 4] = x[:, 1]
        values[:, 5] = x[:, 2]
        values[:, 6] = -x[:, 0]
        values[:, 7] = -x[:, 1]
        values[:, 8] = -x[:, 2]

    e3 = Expression(expr_eval3, shape=(3, 3))

    u0.vector().set(1.0)
    u1.interpolate(e1)
    u2.interpolate(e2)
    u3.interpolate(e3)

    p0 = ((Vertex(mesh, 0).point() + Vertex(mesh, 1).point()) / 2.0)
    x0 = (mesh.geometry.x(0) + mesh.geometry.x(1)) / 2.0

    tree = cpp.geometry.BoundingBoxTree(mesh, mesh.geometry.dim)

    assert np.allclose(u0(x0, tree), u0(x0, tree))
    assert np.allclose(u0(x0, tree), u0(p0, tree))
    assert np.allclose(u1(x0, tree), u1(x0, tree))
    assert np.allclose(u1(x0, tree), u1(p0, tree))
    assert np.allclose(u2(x0, tree)[0], u1(p0, tree))

    assert np.allclose(u2(x0, tree), u2(p0, tree))
    assert np.allclose(u3(x0, tree)[:3], u2(x0, tree), rtol=1e-15, atol=1e-15)

    p0_list = [p for p in p0]
    x0_list = [x for x in x0]
    assert np.allclose(u0(x0_list, tree), u0(x0_list, tree))
    assert np.allclose(u0(x0_list, tree), u0(p0_list, tree))

    with pytest.raises(ValueError):
        u0([0, 0, 0, 0], tree)
    with pytest.raises(ValueError):
        u0([0, 0], tree)
Exemple #23
0
from LagrangianParticles import LagrangianParticles
from dolfin import VectorFunctionSpace, interpolate, UnitSquareMesh, Expression, info
import numpy as np

dt = 0.01

# Initial
mesh = UnitSquareMesh(20, 20)
x = np.linspace(0.25, 0.75, 1000)
y = 0.5*np.ones_like(x)
x, y = np.r_[x, y], np.r_[y, x]
particle_positions = np.c_[x, y]

# At one dt one rigid body rotation around dt
particle_positions_dt = particle_positions + dt*np.c_[-(y-0.5), (x-0.5)]

V = VectorFunctionSpace(mesh, 'CG', 1)
lp = LagrangianParticles(V)
lp.add_particles(particle_positions, properties_d={'dt position': particle_positions_dt})

# Time travel
u = interpolate(Expression(('-(x[1]-0.5)', '(x[0]-0.5)')), V)
lp.step(u, dt=dt)

e = [np.linalg.norm(p.position-p.properties['dt position']) < 1E-15 for p in lp]
info('Has %d particles' % len(e))
assert all(e)
Exemple #24
0
def test_assign(V, W):
    for V0, V1, vector_space in [(V, W, False), (W, V, True)]:
        u = Function(V0)
        u0 = Function(V0)
        u1 = Function(V0)
        u2 = Function(V0)
        u3 = Function(V1)

        u.vector()[:] = 1.0
        u0.vector()[:] = 2.0
        u1.vector()[:] = 3.0
        u2.vector()[:] = 4.0
        u3.vector()[:] = 5.0

        uu = Function(V0)
        uu.assign(2 * u)
        assert uu.vector().get_local().sum() == u0.vector().get_local().sum()

        uu = Function(V1)
        uu.assign(3 * u)
        assert uu.vector().get_local().sum() == u1.vector().get_local().sum()

        # Test complex assignment
        expr = 3 * u - 4 * u1 - 0.1 * 4 * u * 4 + u2 + 3 * u0 / 3. / 0.5
        expr_scalar = 3 - 4 * 3 - 0.1 * 4 * 4 + 4. + 3 * 2. / 3. / 0.5
        uu.assign(expr)
        assert (round(
            uu.vector().get_local().sum() -
            float(expr_scalar * uu.vector().size()), 7) == 0)

        # Test expression scaling
        expr = 3 * expr
        expr_scalar *= 3
        uu.assign(expr)
        assert (round(
            uu.vector().get_local().sum() -
            float(expr_scalar * uu.vector().size()), 7) == 0)

        # Test expression scaling
        expr = expr / 4.5
        expr_scalar /= 4.5
        uu.assign(expr)
        assert (round(
            uu.vector().get_local().sum() -
            float(expr_scalar * uu.vector().size()), 7) == 0)

        # Test self assignment
        expr = 3 * u - 5.0 * u2 + u1 - 5 * u
        expr_scalar = 3 - 5 * 4. + 3. - 5
        u.assign(expr)
        assert (round(
            u.vector().get_local().sum() -
            float(expr_scalar * u.vector().size()), 7) == 0)

        # Test zero assignment
        u.assign(-u2 / 2 + 2 * u1 - u1 / 0.5 + u2 * 0.5)
        assert round(u.vector().get_local().sum() - 0.0, 7) == 0

        # Test erroneous assignments
        uu = Function(V1)

        @function.expression.numba_eval
        def expr_eval(values, x, t):
            values[:, 0] = 1.0

        f = Expression(expr_eval)

        with pytest.raises(RuntimeError):
            uu.assign(1.0)
        with pytest.raises(RuntimeError):
            uu.assign(4 * f)

        if not vector_space:
            with pytest.raises(RuntimeError):
                uu.assign(u * u0)
            with pytest.raises(RuntimeError):
                uu.assign(4 / u0)
            with pytest.raises(RuntimeError):
                uu.assign(4 * u * u1)
Exemple #25
0
    def __init__(self):

        GMSH_EPS = 1.0e-15

        # https://fenicsproject.org/qa/12891/initialize-mesh-from-vertices-connectivities-at-once
        points, cells, point_data, cell_data, _ = meshes.crucible_with_coils.generate(
        )

        # Convert the cell data to 'uint' so we can pick a size_t MeshFunction
        # below as usual.
        for k0 in cell_data:
            for k1 in cell_data[k0]:
                cell_data[k0][k1] = numpy.array(cell_data[k0][k1],
                                                dtype=numpy.dtype("uint"))

        with TemporaryDirectory() as temp_dir:
            tmp_filename = os.path.join(temp_dir, "test.xml")
            meshio.write_points_cells(
                tmp_filename,
                points,
                cells,
                cell_data=cell_data,
                file_format="dolfin-xml",
            )
            self.mesh = Mesh(tmp_filename)
            self.subdomains = MeshFunction(
                "size_t", self.mesh,
                os.path.join(temp_dir, "test_gmsh:physical.xml"))

        self.subdomain_materials = {
            1: my_materials.porcelain,
            2: materials.argon,
            3: materials.gallium_arsenide_solid,
            4: materials.gallium_arsenide_liquid,
            27: materials.air,
        }

        # coils
        for k in range(5, 27):
            self.subdomain_materials[k] = my_materials.ek90

        # Define the subdomains which together form a single coil.
        self.coil_domains = [
            [5, 6, 7, 8, 9],
            [10, 11, 12, 13, 14],
            [15, 16, 17, 18, 19],
            [20, 21, 22, 23],
            [24, 25, 26],
        ]

        self.wpi = 4

        self.submesh_workpiece = SubMesh(self.mesh, self.subdomains, self.wpi)

        # http://fenicsproject.org/qa/2026/submesh-workaround-for-parallel-computation
        # submesh_parallel_bug_fixed = False
        # if submesh_parallel_bug_fixed:
        #     submesh_workpiece = SubMesh(self.mesh, self.subdomains, self.wpi)
        # else:
        #     # To get the mesh in parallel, we need to read it in from a file.
        #     # Writing out can only happen in serial mode, though. :/
        #     base = os.path.join(current_path,
        #                         '../../meshes/2d/crucible-with-coils-submesh'
        #                         )
        #     filename = base + '.xml'
        #     if not os.path.isfile(filename):
        #         warnings.warn(
        #             'Submesh file \'{}\' does not exist. Creating... '.format(
        #             filename
        #             ))
        #         if MPI.size(mpi_comm_world()) > 1:
        #             raise RuntimeError(
        #                 'Can only write submesh in serial mode.'
        #                 )
        #         submesh_workpiece = \
        #             SubMesh(self.mesh, self.subdomains, self.wpi)
        #         output_stream = File(filename)
        #         output_stream << submesh_workpiece
        #     # Read the mesh
        #     submesh_workpiece = Mesh(filename)

        coords = self.submesh_workpiece.coordinates()
        ymin = min(coords[:, 1])
        ymax = max(coords[:, 1])

        # Find the top right point.
        k = numpy.argmax(numpy.sum(coords, 1))
        topright = coords[k, :]

        # Initialize mesh function for boundary domains
        class Left(SubDomain):
            def inside(self, x, on_boundary):
                # Explicitly exclude the lowest and the highest point of the
                # symmetry axis.
                # It is necessary for the consistency of the pressure-Poisson
                # system in the Navier-Stokes solver that the velocity is
                # exactly 0 at the boundary r>0. Hence, at the corner points
                # (r=0, melt-crucible, melt-crystal) we must enforce u=0
                # already and cannot have a component in z-direction.
                return (on_boundary and x[0] < GMSH_EPS
                        and x[1] < ymax - GMSH_EPS and x[1] > ymin + GMSH_EPS)

        class Crucible(SubDomain):
            def inside(self, x, on_boundary):
                return on_boundary and (
                    (x[0] > GMSH_EPS and x[1] < ymax - GMSH_EPS) or
                    (x[0] > topright[0] - GMSH_EPS
                     and x[1] > topright[1] - GMSH_EPS) or
                    (x[0] < GMSH_EPS and x[1] < ymin + GMSH_EPS))

        # At the top right part (boundary melt--gas), slip is allowed, so only
        # n.u=0 is enforced. Very weirdly, the PPE is consistent if and only if
        # the end points of UpperRight are in UpperRight. This contrasts
        # Left(), where the end points must NOT belong to Left().  Judging from
        # the experiments, these settings do the right thing.
        # TODO try to better understand the PPE system/dolfin's boundary
        # settings
        class Upper(SubDomain):
            def inside(self, x, on_boundary):
                return on_boundary and x[1] > ymax - GMSH_EPS

        class UpperRight(SubDomain):
            def inside(self, x, on_boundary):
                return (on_boundary and x[1] > ymax - GMSH_EPS
                        and x[0] > 0.038 - GMSH_EPS)

        # The crystal boundary is taken to reach up to 0.038 where the
        # Dirichlet boundary data is about the melting point of the crystal,
        # 1511K. This setting gives pretty acceptable results when there is no
        # convection except the one induced by buoyancy. Is there is any more
        # stirring going on, though, the end point of the crystal with its
        # fixed temperature of 1511K might be the hottest point globally. This
        # looks rather unphysical.
        # TODO check out alternatives
        class UpperLeft(SubDomain):
            def inside(self, x, on_boundary):
                return (on_boundary and x[1] > ymax - GMSH_EPS
                        and x[0] < 0.038 + GMSH_EPS)

        left = Left()
        crucible = Crucible()
        upper_left = UpperLeft()
        upper_right = UpperRight()

        self.wp_boundaries = MeshFunction(
            "size_t",
            self.submesh_workpiece,
            self.submesh_workpiece.topology().dim() - 1,
        )
        self.wp_boundaries.set_all(0)
        left.mark(self.wp_boundaries, 1)
        crucible.mark(self.wp_boundaries, 2)
        upper_right.mark(self.wp_boundaries, 3)
        upper_left.mark(self.wp_boundaries, 4)

        if DEBUG:
            from dolfin import plot, interactive

            plot(self.wp_boundaries, title="Boundaries")
            interactive()

        submesh_boundary_indices = {
            "left": 1,
            "crucible": 2,
            "upper right": 3,
            "upper left": 4,
        }

        # Boundary conditions for the velocity.
        #
        # [1] Incompressible flow and the finite element method; volume two;
        #     Isothermal Laminar Flow;
        #     P.M. Gresho, R.L. Sani;
        #
        # For the choice of function space, [1] says:
        #     "In 2D, the triangular elements P_2^+P_1 and P_2^+P_{-1} are very
        #      good [...]. [...] If you wish to avoid bubble functions on
        #      triangular elements, P_2P_1 is not bad, and P_2(P_1+P_0) is even
        #      better [...]."
        #
        # It turns out that adding the bubble space significantly hampers the
        # convergence of the Stokes solver and also considerably increases the
        # time it takes to construct the Jacobian matrix of the Navier--Stokes
        # problem if no optimization is applied.
        V_element = FiniteElement("CG", self.submesh_workpiece.ufl_cell(), 2)
        with_bubbles = False
        if with_bubbles:
            V_element += FiniteElement("B", self.submesh_workpiece.ufl_cell(),
                                       2)
        self.W_element = MixedElement(3 * [V_element])
        self.W = FunctionSpace(self.submesh_workpiece, self.W_element)

        rot0 = Expression(("0.0", "0.0", "-2*pi*x[0] * 5.0/60.0"), degree=1)
        # rot0 = (0.0, 0.0, 0.0)
        rot1 = Expression(("0.0", "0.0", "2*pi*x[0] * 5.0/60.0"), degree=1)
        self.u_bcs = [
            DirichletBC(self.W, rot0, crucible),
            DirichletBC(self.W.sub(0), 0.0, left),
            DirichletBC(self.W.sub(2), 0.0, left),
            # Make sure that u[2] is 0 at r=0.
            DirichletBC(self.W, rot1, upper_left),
            DirichletBC(self.W.sub(1), 0.0, upper_right),
        ]
        self.p_bcs = []

        self.P_element = FiniteElement("CG", self.submesh_workpiece.ufl_cell(),
                                       1)
        self.P = FunctionSpace(self.submesh_workpiece, self.P_element)

        self.Q_element = FiniteElement("CG", self.submesh_workpiece.ufl_cell(),
                                       2)
        self.Q = FunctionSpace(self.submesh_workpiece, self.Q_element)

        # Dirichlet.
        # This is a bit of a tough call since the boundary conditions need to
        # be read from a Tecplot file here.
        filename = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                "data/crucible-boundary.dat")
        data = tecplot_reader.read(filename)
        RZ = numpy.c_[data["ZONE T"]["node data"]["r"],
                      data["ZONE T"]["node data"]["z"]]
        T_vals = data["ZONE T"]["node data"]["temp. [K]"]

        class TecplotDirichletBC(Expression):
            def eval(self, value, x):
                # Find on which edge x sits, and raise exception if it doesn't.
                edge_found = False
                for edge in data["ZONE T"]["element data"]:
                    # Given a point X and an edge X0--X1,
                    #
                    #     (1 - theta) X0 + theta X1,
                    #
                    # the minimum distance is assumed for
                    #
                    #    argmin_theta ||(1-theta) X0  + theta X1 - X||^2
                    #    = <X1 - X0, X - X0> / ||X1 - X0||^2.
                    #
                    # If the distance is 0 and 0<=theta<=1, we found the edge.
                    #
                    # Note that edges are 1-based in Tecplot.
                    X0 = RZ[edge[0] - 1]
                    X1 = RZ[edge[1] - 1]
                    theta = numpy.dot(X1 - X0, x - X0) / numpy.dot(
                        X1 - X0, X1 - X0)
                    diff = (1.0 - theta) * X0 + theta * X1 - x
                    if (numpy.dot(diff, diff) < 1.0e-10 and 0.0 <= theta
                            and theta <= 1.0):
                        # Linear interpolation of the temperature value.
                        value[0] = (1.0 - theta) * T_vals[
                            edge[0] - 1] + theta * T_vals[edge[1] - 1]
                        edge_found = True
                        break
                # This class is supposed to be used for Dirichlet boundary
                # conditions. For some reason, FEniCS also evaluates
                # DirichletBC objects at coordinates which do not sit on the
                # boundary, see
                # <http://fenicsproject.org/qa/1033/dirichletbc-expressions-evaluated-away-from-the-boundary>.
                # The assigned values have no meaning though, so not assigning
                # values[0] here is okay.
                #
                # from matplotlib import pyplot as pp
                # pp.plot(x[0], x[1], 'xg')
                if not edge_found:
                    value[0] = 0.0
                    if False:
                        warnings.warn(
                            "Coordinate ({:e}, {:e}) doesn't sit on edge.".
                            format(x[0], x[1]))
                    # pp.plot(RZ[:, 0], RZ[:, 1], '.k')
                    # pp.plot(x[0], x[1], 'xr')
                    # pp.show()
                    # raise RuntimeError('Input coordinate '
                    #                    '{} is not on boundary.'.format(x))
                return

        tecplot_dbc = TecplotDirichletBC(degree=5)
        self.theta_bcs_d = [DirichletBC(self.Q, tecplot_dbc, upper_left)]
        self.theta_bcs_d_strict = [
            DirichletBC(self.Q, tecplot_dbc, upper_right),
            DirichletBC(self.Q, tecplot_dbc, crucible),
            DirichletBC(self.Q, tecplot_dbc, upper_left),
        ]

        # Neumann
        dTdr_vals = data["ZONE T"]["node data"]["dTempdx [K/m]"]
        dTdz_vals = data["ZONE T"]["node data"]["dTempdz [K/m]"]

        class TecplotNeumannBC(Expression):
            def eval(self, value, x):
                # Same problem as above: This expression is not only evaluated
                # at boundaries.
                for edge in data["ZONE T"]["element data"]:
                    X0 = RZ[edge[0] - 1]
                    X1 = RZ[edge[1] - 1]
                    theta = numpy.dot(X1 - X0, x - X0) / numpy.dot(
                        X1 - X0, X1 - X0)
                    dist = numpy.linalg.norm((1 - theta) * X0 + theta * X1 - x)
                    if dist < 1.0e-5 and 0.0 <= theta and theta <= 1.0:
                        value[0] = (1 - theta) * dTdr_vals[
                            edge[0] - 1] + theta * dTdr_vals[edge[1] - 1]
                        value[1] = (1 - theta) * dTdz_vals[
                            edge[0] - 1] + theta * dTdz_vals[edge[1] - 1]
                        break
                return

            def value_shape(self):
                return (2, )

        tecplot_nbc = TecplotNeumannBC(degree=5)
        n = FacetNormal(self.Q.mesh())
        self.theta_bcs_n = {
            submesh_boundary_indices["upper right"]: dot(n, tecplot_nbc),
            submesh_boundary_indices["crucible"]: dot(n, tecplot_nbc),
        }
        self.theta_bcs_r = {}

        # It seems that the boundary conditions from above are inconsistent in
        # that solving with Dirichlet overall and mixed Dirichlet-Neumann give
        # different results; the value *cannot* correspond to one solution.
        # From looking at the solutions, the pure Dirichlet setting appears
        # correct, so extract the Neumann values directly from that solution.

        # Pick fixed coefficients roughly at the temperature that we expect.
        # This could be made less magic by having the coefficients depend on
        # theta and solving the quasilinear equation.
        temp_estimate = 1550.0

        # Get material parameters
        wp_material = self.subdomain_materials[self.wpi]
        if isinstance(wp_material.specific_heat_capacity, float):
            cp = wp_material.specific_heat_capacity
        else:
            cp = wp_material.specific_heat_capacity(temp_estimate)
        if isinstance(wp_material.density, float):
            rho = wp_material.density
        else:
            rho = wp_material.density(temp_estimate)
        if isinstance(wp_material.thermal_conductivity, float):
            k = wp_material.thermal_conductivity
        else:
            k = wp_material.thermal_conductivity(temp_estimate)

        reference_problem = cyl_heat.Heat(
            self.Q,
            convection=None,
            kappa=k,
            rho=rho,
            cp=cp,
            source=Constant(0.0),
            dirichlet_bcs=self.theta_bcs_d_strict,
        )
        theta_reference = reference_problem.solve_stationary()
        theta_reference.rename("theta", "temperature (Dirichlet)")

        # Create equivalent boundary conditions from theta_ref. This
        # makes sure that the potentially expensive Expression evaluation in
        # theta_bcs_* is replaced by something reasonably cheap.
        self.theta_bcs_d = [
            DirichletBC(bc.function_space(), theta_reference,
                        bc.domain_args[0]) for bc in self.theta_bcs_d
        ]
        # Adapt Neumann conditions.
        n = FacetNormal(self.Q.mesh())
        self.theta_bcs_n = {
            k: dot(n, grad(theta_reference))
            # k: Constant(1000.0)
            for k in self.theta_bcs_n
        }

        if DEBUG:
            # Solve the heat equation with the mixed Dirichlet-Neumann
            # boundary conditions and compare it to the Dirichlet-only
            # solution.
            theta_new = Function(self.Q,
                                 name="temperature (Neumann + Dirichlet)")
            from dolfin import Measure

            ds_workpiece = Measure("ds", subdomain_data=self.wp_boundaries)

            heat = cyl_heat.Heat(
                self.Q,
                convection=None,
                kappa=k,
                rho=rho,
                cp=cp,
                source=Constant(0.0),
                dirichlet_bcs=self.theta_bcs_d,
                neumann_bcs=self.theta_bcs_n,
                robin_bcs=self.theta_bcs_r,
                my_ds=ds_workpiece,
            )
            theta_new = heat.solve_stationary()
            theta_new.rename("theta", "temperature (Neumann + Dirichlet)")

            from dolfin import plot, interactive, errornorm

            print("||theta_new - theta_ref|| = {:e}".format(
                errornorm(theta_new, theta_reference)))
            plot(theta_reference)
            plot(theta_new)
            plot(theta_reference - theta_new, title="theta_ref - theta_new")
            interactive()

        self.background_temp = 1400.0

        # self.omega = 2 * pi * 10.0e3
        self.omega = 2 * pi * 300.0

        return
def ParametrizedExpression(truth_problem,
                           parametrized_expression_code=None,
                           *args,
                           **kwargs):
    if parametrized_expression_code is None:
        return None

    assert "mu" in kwargs
    mu = kwargs["mu"]
    assert mu is not None
    assert isinstance(mu, tuple)
    P = len(mu)
    for p in range(P):
        assert isinstance(parametrized_expression_code, (tuple, str))
        if isinstance(parametrized_expression_code, tuple):
            if isinstance(parametrized_expression_code[0], tuple):
                matrix_after_replacements = list()
                for row in parametrized_expression_code:
                    assert isinstance(row, tuple)
                    new_row = list()
                    for item in row:
                        assert isinstance(item, str)
                        new_row.append(
                            item.replace("mu[" + str(p) + "]", "mu_" + str(p)))
                    new_row = tuple(new_row)
                    matrix_after_replacements.append(new_row)
                parametrized_expression_code = tuple(matrix_after_replacements)
            else:
                vector_after_replacements = list()
                for item in parametrized_expression_code:
                    assert isinstance(item, str)
                    vector_after_replacements.append(
                        item.replace("mu[" + str(p) + "]", "mu_" + str(p)))
                parametrized_expression_code = tuple(vector_after_replacements)
        elif isinstance(parametrized_expression_code, str):
            parametrized_expression_code = parametrized_expression_code.replace(
                "mu[" + str(p) + "]", "mu_" + str(p))
        else:
            raise TypeError(
                "Invalid expression type in ParametrizedExpression")

    # Detect mesh
    if "domain" in kwargs:
        mesh = kwargs["domain"]
    else:
        mesh = truth_problem.V.mesh()

    # Prepare a dictionary of mu
    mu_dict = dict()
    for (p, mu_p) in enumerate(mu):
        assert isinstance(mu_p, (Expression, Number))
        if isinstance(mu_p, Number):
            mu_dict["mu_" + str(p)] = mu_p
        elif isinstance(mu_p, Expression):
            assert is_parametrized_constant(mu_p)
            mu_dict["mu_" + str(p)] = parametrized_constant_to_float(
                mu_p, point=mesh.coordinates()[0])
    del kwargs["mu"]
    kwargs.update(mu_dict)

    # Initialize expression
    expression = Expression(parametrized_expression_code, *args, **kwargs)
    expression._mu = mu  # to avoid repeated assignments
    expression.problem = truth_problem

    # Store mesh
    expression._mesh = mesh

    # Cache all problem -> expression relation
    first_parametrized_expression_for_truth_problem = (
        truth_problem not in _truth_problem_to_parametrized_expressions)
    if first_parametrized_expression_for_truth_problem:
        _truth_problem_to_parametrized_expressions[truth_problem] = list()
    _truth_problem_to_parametrized_expressions[truth_problem].append(
        expression)

    # Keep mu in sync
    if first_parametrized_expression_for_truth_problem:

        def generate_overridden_set_mu(standard_set_mu):
            def overridden_set_mu(self, mu):
                standard_set_mu(mu)
                for expression_ in _truth_problem_to_parametrized_expressions[
                        self]:
                    if expression_._mu is not mu:
                        expression_._set_mu(mu)

            return overridden_set_mu

        if (
                "set_mu" in _original_setters
                and truth_problem in _original_setters["set_mu"]
        ):  # truth_problem.set_mu was already patched by the decorator @sync_setters
            standard_set_mu = _original_setters["set_mu"][truth_problem]
            overridden_set_mu = generate_overridden_set_mu(standard_set_mu)
            _original_setters["set_mu"][truth_problem] = types.MethodType(
                overridden_set_mu, truth_problem)
        else:
            standard_set_mu = truth_problem.set_mu
            overridden_set_mu = generate_overridden_set_mu(standard_set_mu)
            PatchInstanceMethod(truth_problem, "set_mu",
                                overridden_set_mu).patch()

    def expression_set_mu(self, mu):
        assert isinstance(mu, tuple)
        assert len(mu) >= len(self._mu)
        mu = mu[:len(self._mu)]
        for (p, mu_p) in enumerate(mu):
            assert isinstance(mu_p, (Expression, Number))
            if isinstance(mu_p, Number):
                setattr(self, "mu_" + str(p), mu_p)
            elif isinstance(mu_p, Expression):
                assert is_parametrized_constant(mu_p)
                setattr(
                    self, "mu_" + str(p),
                    parametrized_constant_to_float(
                        mu_p, point=mesh.coordinates()[0]))
        self._mu = mu

    AttachInstanceMethod(expression, "_set_mu", expression_set_mu).attach()
    # Note that this override is different from the one that we use in decorated problems,
    # since (1) we do not want to define a new child class, (2) we have to execute some preprocessing
    # on the data, (3) it is a one-way propagation rather than a sync.
    # For these reasons, the decorator @sync_setters is not used but we partially duplicate some code

    # Possibly also keep time in sync
    if hasattr(truth_problem, "set_time"):
        if first_parametrized_expression_for_truth_problem:

            def generate_overridden_set_time(standard_set_time):
                def overridden_set_time(self, t):
                    standard_set_time(t)
                    for expression_ in _truth_problem_to_parametrized_expressions[
                            self]:
                        if hasattr(expression_, "t"):
                            if expression_.t is not t:
                                assert isinstance(expression_.t, Number)
                                expression_.t = t

                return overridden_set_time

            if (
                    "set_time" in _original_setters
                    and truth_problem in _original_setters["set_time"]
            ):  # truth_problem.set_time was already patched by the decorator @sync_setters
                standard_set_time = _original_setters["set_time"][
                    truth_problem]
                overridden_set_time = generate_overridden_set_time(
                    standard_set_time)
                _original_setters["set_time"][
                    truth_problem] = types.MethodType(overridden_set_time,
                                                      truth_problem)
            else:
                standard_set_time = truth_problem.set_time
                overridden_set_time = generate_overridden_set_time(
                    standard_set_time)
                PatchInstanceMethod(truth_problem, "set_time",
                                    overridden_set_time).patch()

    return expression
def test_unsteady_stokes():
    nx, ny = 15, 15
    k = 1
    nu = Constant(1.0e-0)
    dt = Constant(2.5e-2)
    num_steps = 20
    theta0 = 1.0  # Initial theta value
    theta1 = 0.5  # Theta after 1 step
    theta = Constant(theta0)

    mesh = UnitSquareMesh(nx, ny)

    # The 'unsteady version' of the benchmark in the 2012 paper by Labeur&Wells
    u_exact = Expression(
        (
            "sin(t) * x[0]*x[0]*(1.0 - x[0])*(1.0 - x[0])*(2.0*x[1] \
                           -6.0*x[1]*x[1] + 4.0*x[1]*x[1]*x[1])",
            "-sin(t)* x[1]*x[1]*(1.0 - x[1])*(1.0 - x[1])*(2.0*x[0] \
                           - 6.0*x[0]*x[0] + 4.0*x[0]*x[0]*x[0])",
        ),
        t=0,
        degree=7,
        domain=mesh,
    )
    p_exact = Expression("sin(t) * x[0]*(1.0 - x[0])",
                         t=0,
                         degree=7,
                         domain=mesh)
    du_exact = Expression(
        (
            "cos(t) * x[0]*x[0]*(1.0 - x[0])*(1.0 - x[0])*(2.0*x[1] \
                            - 6.0*x[1]*x[1] + 4.0*x[1]*x[1]*x[1])",
            "-cos(t)* x[1]*x[1]*(1.0 - x[1])*(1.0 - x[1])*(2.0*x[0] \
                            -6.0*x[0]*x[0] + 4.0*x[0]*x[0]*x[0])",
        ),
        t=0,
        degree=7,
        domain=mesh,
    )

    ux_exact = Expression(
        (
            "x[0]*x[0]*(1.0 - x[0])*(1.0 - x[0])*(2.0*x[1] \
                            - 6.0*x[1]*x[1] + 4.0*x[1]*x[1]*x[1])",
            "-x[1]*x[1]*(1.0 - x[1])*(1.0 - x[1])*(2.0*x[0] \
                            - 6.0*x[0]*x[0] + 4.0*x[0]*x[0]*x[0])",
        ),
        degree=7,
        domain=mesh,
    )

    px_exact = Expression("x[0]*(1.0 - x[0])", degree=7, domain=mesh)

    sin_ext = Expression("sin(t)", t=0, degree=7, domain=mesh)

    f = du_exact + sin_ext * div(px_exact * Identity(2) -
                                 2 * sym(grad(ux_exact)))

    Vhigh = VectorFunctionSpace(mesh, "DG", 7)
    Phigh = FunctionSpace(mesh, "DG", 7)

    # New syntax:
    V = VectorElement("DG", mesh.ufl_cell(), k)
    Q = FiniteElement("DG", mesh.ufl_cell(), k - 1)
    Vbar = VectorElement("DGT", mesh.ufl_cell(), k)
    Qbar = FiniteElement("DGT", mesh.ufl_cell(), k)

    mixedL = FunctionSpace(mesh, MixedElement([V, Q]))
    mixedG = FunctionSpace(mesh, MixedElement([Vbar, Qbar]))
    V2 = FunctionSpace(mesh, V)

    Uh = Function(mixedL)
    Uhbar = Function(mixedG)
    U0 = Function(mixedL)
    Uhbar0 = Function(mixedG)
    u0, p0 = split(U0)
    ubar0, pbar0 = split(Uhbar0)
    ustar = Function(V2)

    # Then the boundary conditions
    bc0 = DirichletBC(mixedG.sub(0), Constant((0, 0)), Gamma)
    bc1 = DirichletBC(mixedG.sub(1), Constant(0), Corner, "pointwise")
    bcs = [bc0, bc1]

    alpha = Constant(6 * k * k)
    forms_stokes = FormsStokes(mesh, mixedL, mixedG,
                               alpha).forms_unsteady(ustar, dt, nu, f)
    ssc = StokesStaticCondensation(
        mesh,
        forms_stokes["A_S"],
        forms_stokes["G_S"],
        forms_stokes["G_ST"],
        forms_stokes["B_S"],
        forms_stokes["Q_S"],
        forms_stokes["S_S"],
    )

    t = 0.0
    step = 0
    for step in range(num_steps):
        step += 1
        t += float(dt)
        if comm.Get_rank() == 0:
            print("Step " + str(step) + " Time " + str(t))

        # Set time level in exact solution
        u_exact.t = t
        p_exact.t = t

        du_exact.t = t - (1 - float(theta)) * float(dt)
        sin_ext.t = t - (1 - float(theta)) * float(dt)

        ssc.assemble_global_lhs()
        ssc.assemble_global_rhs()
        for bc in bcs:
            ssc.apply_boundary(bc)

        ssc.solve_problem(Uhbar, Uh, "none", "default")
        assign(U0, Uh)
        assign(ustar, U0.sub(0))
        assign(Uhbar0, Uhbar)
        if step == 1:
            theta.assign(theta1)

        udiv_e = sqrt(assemble(div(Uh.sub(0)) * div(Uh.sub(0)) * dx))

    u_ex_h = interpolate(u_exact, Vhigh)
    p_ex_h = interpolate(p_exact, Phigh)

    u_error = sqrt(assemble(dot(Uh.sub(0) - u_ex_h, Uh.sub(0) - u_ex_h) * dx))
    p_error = sqrt(assemble(dot(Uh.sub(1) - p_ex_h, Uh.sub(1) - p_ex_h) * dx))

    assert udiv_e < 1e-12
    assert u_error < 1.5e-4
    assert p_error < 1e-2
def resume_env(plot=False,  # To plot results (Field, controls, lift, drag, rec area) during training
               dump_vtu=100,  # If not False, create vtu files of area, velocity, pressure, every 'dump_vtu' steps
               dump_debug=100,  # If not False, output step info of ep,step,rec_area,L,D,jets Q* to saved_models/debug.csv, every 'dump_debug' steps
               dump_CL=100,  # If not False, output step info of ep,step,rec_area,L,D,jets Q* to command line, every 'dump_CL' steps
               remesh=False,
               random_start=False,
               single_run=False):

    # ---------------------------------------------------------------------------------
    # the configuration version number 1

    simulation_duration = 50.0 #duree en secondes de la simulation
    dt = 0.004

    root = 'mesh/turek_2d'  # Root of geometry file path
    if(not os.path.exists('mesh')):
        os.mkdir('mesh')

    geometry_params = {'output': '.'.join([root, 'geo']),  # mesh/turek_2d.geo // relative output path of geometry file according to geo params
                    'template': '../geometry_2d.template_geo',  # relative path of geometry file template
                    'clscale': 1,  # mesh size scaling ratio (all mesh characteristic lenghts of geometry file scaled by this factor)
                    'remesh': remesh,  # remesh toggle (from resume_env args)
                    'jets_toggle': 1,  # toggle Jets --> 0 : No jets, 1: Yes jets
                    'jet_width': 0.1,  # Jet Width
                    'height_cylinder': 1,  # Cylinder Height
                    'ar': 1.0,  # Cylinder Aspect Ratio
                    'cylinder_y_shift': 0,  # Cylinder Center Shift from Centreline, Positive UP
                    'x_upstream': 20,  # Domain Upstream Length (from left-most rect point)
                    'x_downstream': 26,  # Domain Downstream Length (from right-most rect point)
                    'height_domain': 25,  # Domain Height
                    'mesh_size_cylinder': 0.05,  # Mesh Size on Cylinder Walls
                    'mesh_size_jets': 0.01,  # Mesh size on jet boundaries
                    'mesh_size_medium': 0.3,  # Medium mesh size (at boundary where coarsening starts)
                    'mesh_size_coarse': 1,  # Coarse mesh Size Close to Domain boundaries outside wake
                    'coarse_y_distance_top_bot': 4,  # y-distance from center where mesh coarsening starts
                    'coarse_x_distance_left_from_LE': 2.5}  # x-distance from upstream face where mesh coarsening starts

    profile = Expression(('1', '0'), degree=2)  # Inflow profile (defined as FEniCS expression)

    flow_params = {'mu': 1E-2,  # Dynamic viscosity. This in turn defines the Reynolds number: Re = U * D / mu
                  'rho': 1,  # Density
                  'inflow_profile': profile}  # flow_params['inflow_profile'] stores a reference to the profile function

    solver_params = {'dt': dt}

    # Define probes positions
    probe_distribution = {'distribution_type': 'rabault241',
                          'probes_at_jets': False,  # Whether to use probes at jets or not (for distributions other than 'rabault151'
                          'n_base': 8}  # Number of probes at cylinder base if 'base' distribution is used

    list_position_probes = probe_positions(probe_distribution, geometry_params)

    output_params = {'locations': list_position_probes,  # List of (x,y) np arrays with probe positions
                     'probe_type': 'pressure'  # Set quantity measured by probes (pressure/velocity)
                     }

    optimization_params = {"num_steps_in_pressure_history": 1,  # Number of steps that constitute an environment state (state shape = this * len(locations))
                        "min_value_jet_MFR": -0.1,  # Set min and max Q* for weak actuation
                        "max_value_jet_MFR": 0.1,
                        "smooth_control": 0.1,  # parameter alpha to smooth out control
                        "zero_net_Qs": True,  # True for Q1 + Q2 = 0
                        "random_start": random_start}

    inspection_params = {"plot": plot,
                        "dump_vtu": dump_vtu,
                        "dump_debug": dump_debug,
                        "dump_CL": dump_CL,
                        "range_pressure_plot": [-2.0, 1],   # ylim for pressure dynamic plot
                        "range_drag_plot": [-0.175, -0.13],  # ylim for drag dynamic plot
                        "range_lift_plot": [-0.2, +0.2],  # ylim for lift dynamic plot
                        "line_drag": -0.7282,  # Mean drag without control
                        "line_lift": 0,   # Mean lift without control
                        "show_all_at_reset": False,
                        "single_run":single_run
                        }

    reward_function = 'drag_plain_lift'

    verbose = 0  # For detailed output (see Env2DCylinder)

    number_steps_execution = int((simulation_duration/dt)/nb_actuations)  # Duration in timesteps of action interval (Number of numerical timesteps over which NN action is kept constant, control being interpolated)

    # ---------------------------------------------------------------------------------
    # do the initialization

    # If remesh = True, we sim with no control until a well-developed unsteady wake is obtained. That state is saved and
    # used as a start for each subsequent learning episode.

    # If so, set the value of n-iter (no. iterations to calculate converged initial state)
    if(remesh):
        n_iter = int(200.0 / dt)
        if (os.path.exists('mesh')):
            shutil.rmtree('mesh')  # If previous mesh directory exists, we delete it
        os.mkdir('mesh')  # Create new empty mesh directory
        print("Make converge initial state for {} iterations".format(n_iter))
    else:
        n_iter = None


    # Processing the name of the simulation (to be used in outputs)
    simu_name = 'Simu'


    if geometry_params["ar"] != 1:
        next_param = 'AR' + str(geometry_params["ar"])
        simu_name = '_'.join([simu_name, next_param])  # e.g: if cyl_size (mesh) = 0.025 --> simu_name += '_M25'
    if optimization_params["max_value_jet_MFR"] != 0.01:
        next_param = 'maxF' + str(optimization_params["max_value_jet_MFR"])[2:]
        simu_name = '_'.join([simu_name, next_param])  # e.g: if max_MFR = 0.09 --> simu_name += '_maxF9'
    if nb_actuations != 80:
        next_param = 'NbAct' + str(nb_actuations)
        simu_name = '_'.join([simu_name, next_param])  # e.g: if max_MFR = 100 --> simu_name += '_NbAct100'

    next_param = 'drag'
    if reward_function == 'recirculation_area':
        next_param = 'area'
    if reward_function == 'max_recirculation_area':
        next_param = 'max_area'
    elif reward_function == 'drag':
        next_param = 'last_drag'
    elif reward_function == 'max_plain_drag':
        next_param = 'max_plain_drag'
    elif reward_function == 'drag_plain_lift':
        next_param = 'lift'
    elif reward_function == 'drag_avg_abs_lift':
        next_param = 'avgAbsLift'
    simu_name = '_'.join([simu_name, next_param])

    # Pass parameters to the Environment class
    env_2d_cylinder = Env2DCylinder(path_root=root,
                                    geometry_params=geometry_params,
                                    flow_params=flow_params,
                                    solver_params=solver_params,
                                    output_params=output_params,
                                    optimization_params=optimization_params,
                                    inspection_params=inspection_params,
                                    n_iter_make_ready=n_iter,
                                    verbose=verbose,
                                    reward_function=reward_function,
                                    number_steps_execution=number_steps_execution,
                                    simu_name = simu_name)

    return(env_2d_cylinder)  # resume_env() returns instance of Environment object
mesh = UnitSquareMesh(200, 200)
# mesh = create_dolfin_mesh(*meshzoo.triangle(1500, corners=[[0, 0], [1, 0], [0, 1]]))


V = FunctionSpace(mesh, "CG", 1)

u = TrialFunction(V)
v = TestFunction(V)

n = FacetNormal(mesh)
# A = assemble(dot(grad(u), grad(v)) * dx - dot(n, grad(u)) * v * ds)
A = assemble(dot(grad(u), grad(v)) * dx - dot(n, grad(u)) * v * ds)
M = assemble(u * v * dx)

f = Expression("sin(pi * x[0]) * sin(pi * x[1])", element=V.ufl_element())
x = project(f, V)

Ax = A * x.vector()
Minv_Ax = Function(V).vector()
solve(M, Minv_Ax, Ax)
val = Ax.inner(Minv_Ax)

print(val)


# Exact value
x = sympy.Symbol("x")
y = sympy.Symbol("y")
f = sympy.sin(sympy.pi * x) * sympy.sin(sympy.pi * y)
f2 = sympy.diff(f, x, x) + sympy.diff(f, y, y)
Exemple #30
0
def test_interpolation_jit_rank1(W):
    f = Expression(("1.0", "1.0", "1.0"), degree=0)
    w = interpolate(f, W)
    x = w.vector()
    assert abs(x.get_local()).max() == 1
    assert abs(x.get_local()).min() == 1