def variational_forms(self, dt: df.Constant) -> Tuple[Any, Any]:
        """Create the variational forms corresponding to the given
        discretization of the given system of equations.

        *Arguments*
          kn (:py:class:`ufl.Expr` or float)
            The time step

        *Returns*
          (lhs, rhs) (:py:class:`tuple` of :py:class:`ufl.Form`)

        """
        # Extract theta parameter and conductivities
        theta = self._parameters.theta
        Mi = self._intracellular_conductivity
        Me = self._extracellular_conductivity

        # Define variational formulation
        if self._parameters.linear_solver_type == "direct":
            v, u, multiplier = df.TrialFunctions(self._VUR)
            v_test, u_test, multiplier_test = df.TestFunctions(self._VUR)
        else:
            v, u = df.TrialFunctions(self._VUR)
            v_test, u_test = df.TestFunctions(self._VUR)

        Dt_v = (v - self._v_prev) / dt
        Dt_v *= self._chi_cm  # Chi is surface to volume aration. Cm is capacitance
        v_mid = theta * v + (1.0 - theta) * self._v_prev

        # Set-up measure and rhs from stimulus
        dOmega = df.Measure("dx",
                            domain=self._mesh,
                            subdomain_data=self._cell_function)
        dGamma = df.Measure("ds",
                            domain=self._mesh,
                            subdomain_data=self._interface_function)

        # Loop over all domains
        G = Dt_v * v_test * dOmega()
        for key in self._cell_tags - self._restrict_tags:
            G += df.inner(Mi[key] * df.grad(v_mid),
                          df.grad(v_test)) * dOmega(key)
            G += df.inner(Mi[key] * df.grad(v_mid),
                          df.grad(u_test)) * dOmega(key)

        for key in self._cell_tags:
            G += df.inner(Mi[key] * df.grad(u), df.grad(v_test)) * dOmega(key)
            G += df.inner((Mi[key] + Me[key]) * df.grad(u),
                          df.grad(u_test)) * dOmega(key)
            # If Lagrangian multiplier
            if self._parameters.linear_solver_type == "direct":
                G += (multiplier_test * u + multiplier * u_test) * dOmega(key)

        for key in set(self._interface_tags):
            # Default to 0 if not defined for tag
            G += self._neumann_bc.get(key,
                                      df.Constant(0)) * u_test * dGamma(key)

        a, L = df.system(G)
        return a, L
Beispiel #2
0
def computeVelocityField(mesh):
    Xh = dl.VectorFunctionSpace(mesh, 'Lagrange', 2)
    Wh = dl.FunctionSpace(mesh, 'Lagrange', 1)
    XW = dl.MixedFunctionSpace([Xh, Wh])

    Re = 1e2

    g = dl.Expression(('0.0', '(x[0] < 1e-14) - (x[0] > 1 - 1e-14)'))
    bc1 = dl.DirichletBC(XW.sub(0), g, v_boundary)
    bc2 = dl.DirichletBC(XW.sub(1), dl.Constant(0), q_boundary, 'pointwise')
    bcs = [bc1, bc2]

    vq = dl.Function(XW)
    (v, q) = dl.split(vq)
    (v_test, q_test) = dl.TestFunctions(XW)

    def strain(v):
        return dl.sym(dl.nabla_grad(v))

    F = ((2. / Re) * dl.inner(strain(v), strain(v_test)) +
         dl.inner(dl.nabla_grad(v) * v, v_test) - (q * dl.div(v_test)) +
         (dl.div(v) * q_test)) * dl.dx

    dl.solve(F == 0,
             vq,
             bcs,
             solver_parameters={
                 "newton_solver": {
                     "relative_tolerance": 1e-4,
                     "maximum_iterations": 100
                 }
             })

    return v
Beispiel #3
0
    def do_one_L_problem(self, i_vk, C_VK_data=0.1):
        """Solve the L equation for one (K,V) position using the "mixed" methodology,
        so that we get on-grid solutions of both `f` and `df/dL`. This
        requires that we use a 2nd-degree element to solve for the `df/dL`
        vector, called `sigma` here.

        """
        W = d.FunctionSpace(self.l_mesh, self.c12.l_element * self.c11.l_element)

        D = d.Function(self.c12.l_space) # note: W.sub(0) does not work
        C_VK = d.Function(self.c11.l_space)

        sigma, u = d.TrialFunctions(W) # u is my 'f', sigma is df/dL
        tau, v = d.TestFunctions(W)
        soln = d.Function(W)

        bc = [d.DirichletBC(W.sub(1), d.Constant(0), direct_boundary)]

        a = (u * tau.dx(0) + d.dot(sigma, tau) + D * sigma * v.dx(0)) * d.dx
        L = C_VK * v * d.dx
        equation = (a == L)

        # typical D value: 1e-23
        ddata = np.array(self.c12.D_LL[:,i_vk])
        #print('D:', ddata.min(), ddata.max(), ddata.mean(), np.sqrt((ddata**2).mean()))
        D.vector()[:] = ddata

        C_VK.vector()[:] = C_VK_data
        C_VK.vector()[self.i_lmax] += self.c11.source_term[:,i_vk]

        d.solve(equation, soln, bc)

        sigma, u = soln.split(deepcopy=True)
        return self.c11.l_sort(), self.c11.l_sort(u), self.c12.l_sort(), self.c12.l_sort(sigma)
Beispiel #4
0
    def initial_guess(self):
        r"""
        Obtains an initial guess for the Newton solver.

        This is done by solving the equation of motion for :math:`\lambda=0`, which form a linear system.
        All other parameters are unchanged.

        """

        # cast params as constant functions so that, if they are set to 0,
        # fenics still understand what it is integrating
        m, M, Mp = Constant(self.fields.m), Constant(self.fields.M), Constant(
            self.fields.Mp)
        alpha = Constant(self.fields.alpha)
        Mn, Mf1, Mf2 = Constant(self.Mn), Constant(self.Mf1), Constant(
            self.Mf2)

        # get the boundary conditions
        Dirichlet_bc = self.get_Dirichlet_bc()

        # create a vector (phi,h) with the two trial functions for the fields
        u = d.TrialFunction(self.V)
        # ... and split it into phi and h
        phi, h, y, z = d.split(u)

        # define test functions over the function space
        v1, v2, v3, v4 = d.TestFunctions(self.V)

        # r^2
        r2 = Expression('pow(x[0],2)', degree=self.fem.func_degree)

        # define bilinear form
        # Eq.1
        a1 = y * v1 * r2 * dx - ( m / Mn )**2 * phi * v1 * r2 * dx \
             - alpha * ( Mf2/Mf1 ) * z * v1 * r2 * dx

        # Eq.2
        a2 = z * v2 * r2 * dx - ( M / Mn )**2 * h * v2 * r2 * dx \
             - alpha * ( Mf1/Mf2 ) * y * v2 * r2 * dx

        a3 = -inner(grad(phi), grad(v3)) * r2 * dx - y * v3 * r2 * dx

        a4 = -inner(grad(h), grad(v4)) * r2 * dx - z * v4 * r2 * dx

        # both equations
        a = a1 + a2 + a3 + a4

        # define linear form
        L = self.source.rho / Mp * Mn / Mf1 * v1 * r2 * dx

        # define a vector with the solution
        sol = d.Function(self.V)

        # solve linearised system
        pde = d.LinearVariationalProblem(a, L, sol, Dirichlet_bc)
        solver = d.LinearVariationalSolver(pde)
        solver.solve()

        return sol
    def create(self):
        self.metadata = {
            "quadrature_degree": self.deg_q,
            "quadrature_scheme": "default",
        }
        self.dxm = df.dx(metadata=self.metadata,
                         subdomain_data=self.mesh_function)

        # solution field
        Ed = df.VectorElement("CG", self.mesh.ufl_cell(), degree=self.deg_d)
        Ee = df.FiniteElement("CG", self.mesh.ufl_cell(), degree=self.deg_d)

        self.V = df.FunctionSpace(self.mesh, Ed * Ee)
        self.Vd, self.Ve = self.V.split()

        self.dd, self.de = df.TrialFunctions(self.V)
        self.d_, self.e_ = df.TestFunctions(self.V)

        self.u = df.Function(self.V, name="d-e mixed space")
        self.d, self.e = df.split(self.u)

        # generic quadrature function spaces
        VQF, VQV, VQT = c.helper.spaces(self.mesh, self.deg_q,
                                        c.q_dim(self.constraint))

        # quadrature functions
        Q = c.Q
        # inputs to the model
        self.q_in = OrderedDict()
        self.q_in[Q.EPS] = df.Function(VQV, name="current strains")
        self.q_in[Q.E] = df.Function(
            VQF, name="current nonlocal equivalent strains")

        self.q_in_calc = {}
        self.q_in_calc[Q.EPS] = c.helper.LocalProjector(
            self.eps(self.d), VQV, self.dxm)
        self.q_in_calc[Q.E] = c.helper.LocalProjector(self.e, VQF, self.dxm)

        # outputs of the model
        self.q = {}
        self.q[Q.SIGMA] = df.Function(VQV, name="current stresses")
        self.q[Q.DSIGMA_DEPS] = df.Function(VQT, name="stress-strain tangent")
        self.q[Q.DSIGMA_DE] = df.Function(
            VQV, name="stress-nonlocal-strain tangent")
        self.q[Q.EEQ] = df.Function(VQF,
                                    name="current (local) equivalent strain")
        self.q[Q.DEEQ] = df.Function(VQV,
                                     name="equivalent-strain-strain tangent")

        self.q_history = {
            Q.KAPPA: df.Function(VQF, name="current history variable kappa")
        }

        self.n = len(self.q[Q.SIGMA].vector().get_local()) // c.q_dim(
            self.constraint)
        self.nq = self.n // self.mesh.num_cells()
        self.ip_flags = None
        if self.mesh_function is not None:
            self.ip_flags = np.repeat(self.mesh_function.array(), self.nq)
    def __init__(self, mesh, Vh_STATE, x0):
        """
        Constructor.
        INPUTS:
        
        - mesh: the mesh
        - Vh_STATE: the finite element space for the state variable
        - x0: location at which we want to compute the jet-thickness
        """

        Vh_help = dl.FunctionSpace(mesh, "CG", 1)
        xfun = dl.interpolate(dl.Expression("x[0]", degree=1), Vh_help)
        x_coord = xfun.vector().gather_on_zero()

        mpi_comm = mesh.mpi_comm()
        rank = dl.MPI.rank(mpi_comm)
        nproc = dl.MPI.size(mpi_comm)

        # round x0 so that it is aligned with the mesh
        if nproc > 1:
            from mpi4py import MPI
            comm = MPI.COMM_WORLD
            if rank == 0:
                idx = (np.abs(x_coord - x0)).argmin()
                self.x0 = x_coord[idx]
            else:
                self.x0 = None

            self.x0 = comm.bcast(self.x0, root=0)
        else:
            idx = (np.abs(x_coord - x0)).argmin()
            self.x0 = x_coord[idx]

        line_segment = dl.AutoSubDomain(lambda x: dl.near(x[0], self.x0))
        markers_f = dl.FacetFunction("size_t", mesh)
        markers_f.set_all(0)
        line_segment.mark(markers_f, 1)
        dS = dl.dS[markers_f]

        x_test = dl.TestFunctions(Vh_STATE)
        u_test = x_test[0]

        e1 = dl.Constant(("1.", "0."))

        self.int_u = dl.assemble(dl.avg(dl.dot(u_test, e1)) * dS(1))
        #self.u_cl = dl.assemble( dl.dot(u_test,e1)*dP(1) )

        self.u_cl = dl.Function(Vh_STATE).vector()
        ps = dl.PointSource(Vh_STATE.sub(0).sub(0), dl.Point(self.x0, 0.), 1.)
        ps.apply(self.u_cl)

        scaling = self.u_cl.sum()
        if np.abs(scaling - 1.) > 1e-6:
            print scaling
            raise ValueError()

        self.state = dl.Function(Vh_STATE).vector()
        self.help = dl.Function(Vh_STATE).vector()
Beispiel #7
0
    def weak_residual_form(self, sol):
        r"""
        Computes the residual with respect to the weak form of the equations:

        .. math:: F = F_1 + F_2 + F_3
        
        with
        
        .. math:: F_1(\hat{\pi},\hat{W},\hat{Y}) & = - \int\hat{\nabla}\hat{\pi}\hat{\nabla}v_1 \hat{r}^2 d\hat{r}
                  - \int \hat{Y} v_1 \hat{r}^2 d\hat{r} \\

                  F_2(\hat{\pi},\hat{W},\hat{Y}) & = \int \hat{W} v_2 \hat{r}^2 d\hat{r} 
                  - \int \hat{Y}^n v_2 \hat{r}^2 d\hat{r} \\

                  F_3(\hat{\pi},\hat{W},\hat{Y}) & = \int \hat{Y} v_3 \hat{r}^2 d\hat{r} 
                  - \int \left( \frac{m}{M_n} \right)^2 \hat{\pi} v_3 \hat{r}^2 d\hat{r} + 

                  & + \epsilon \left( \frac{M_n}{\Lambda} \right)^{3n-1}
                  \left(\frac{M_{f1}}{M_n}\right)^{n-1} \int\hat{\nabla} \hat{W} \hat{\nabla} v_3 \hat{r}^2 d\hat{r} 
                  - \int \frac{\hat{\rho}}{M_p}\frac{M_n}{M_{f1}} v_3 \hat{r}^2 d\hat{r}

        The weak residual is employed within :func:`solver.Solver.solve` to check convergence -
        also see :func:`solver.Solver.compute_errors`.

        *Parameters*
            sol
                the solution with respect to which the weak residual is computed.


        """

        # cast params as constant functions so that, if they are set to 0, FEniCS still understand
        # what is being integrated
        m, Lambda, Mp = Constant(self.fields.m), Constant(
            self.fields.Lambda), Constant(self.fields.Mp)
        n = self.fields.n
        epsilon = Constant(self.fields.epsilon)
        Mn, Mf1 = Constant(self.Mn), Constant(self.Mf1)

        # test functions
        v1, v2, v3 = d.TestFunctions(self.V)

        # split solution into pi, w, y
        pi, w, y = d.split(sol)

        # r^2
        r2 = Expression('pow(x[0],2)', degree=self.fem.func_degree)

        # define the weak residual form
        F1 = -inner(grad(pi), grad(v1)) * r2 * dx - y * v1 * r2 * dx
        F2 = w * v2 * r2 * dx - y**n * v2 * r2 * dx
        F3 = y * v3 * r2 * dx - (m/Mn)**2 * pi * v3 * r2 * dx + \
             epsilon * ( Mn / Lambda )**(3*n-1) * ( Mf1 / Mn )**(n-1) * inner( grad(w), grad(v3) ) * r2 * dx \
             - self.source.rho / Mp * Mn / Mf1 * v3 * r2 * dx

        F = F1 + F2 + F3

        return F
Beispiel #8
0
    def setup(self, DG3, m, Ms, unit_length=1.0):
        self.DG3 = DG3
        self.m = m
        self.Ms = Ms
        self.unit_length = unit_length

        mesh = DG3.mesh()
        self.mesh = mesh

        DG = df.FunctionSpace(mesh, "DG", 0)
        BDM = df.FunctionSpace(mesh, "BDM", 1)

        #deal with three components simultaneously, each represents a vector
        W1 = df.MixedFunctionSpace([BDM, BDM, BDM])
        (sigma0, sigma1, sigma2) = df.TrialFunctions(W1)
        (tau0, tau1, tau2) = df.TestFunctions(W1)

        W2 = df.MixedFunctionSpace([DG, DG, DG])
        (u0, u1, u2) = df.TrialFunctions(W2)
        (v0, v1, v2) = df.TestFunction(W2)

        # what we need is A x = K1 m
        a0 = (df.dot(sigma0, tau0) + df.dot(sigma1, tau1) +
              df.dot(sigma2, tau2)) * df.dx
        self.A = df.assemble(a0)

        a1 = -(df.div(tau0) * u0 + df.div(tau1) * u1 +
               df.div(tau2) * u2) * df.dx
        self.K1 = df.assemble(a1)

        def boundary(x, on_boundary):
            return on_boundary

        # actually, we need to apply the Neumann boundary conditions.
        # we need a tensor here
        zero = df.Constant((0, 0, 0, 0, 0, 0, 0, 0, 0))
        self.bc = df.DirichletBC(W1, zero, boundary)
        self.bc.apply(self.A)

        a2 = (df.div(sigma0) * v0 + df.div(sigma1) * v1 +
              df.div(sigma2) * v2) * df.dx
        self.K2 = df.assemble(a2)
        self.L = df.assemble((v0 + v1 + v2) * df.dx).array()

        self.mu0 = mu0
        self.exchange_factor = 2.0 * self.C / (self.mu0 * Ms *
                                               self.unit_length**2)

        self.coeff = self.exchange_factor / self.L

        # b = K m
        self.b = df.PETScVector()

        # the vector in BDM space
        self.sigma_v = df.PETScVector(self.K2.size(1))

        # to store the exchange fields
        self.H = df.PETScVector()
Beispiel #9
0
    def do_VK_problems(self, C_L_data, dest_f_11=None, dest_dfdV_11=None, dest_dfdK_11=None):
        if dest_f_11 is None:
            dest_f_11 = np.empty(self.c11.cube_shape)
        else:
            assert dest_f_11.shape == self.c11.cube_shape

        if dest_dfdV_11 is None:
            dest_dfdV_11 = np.empty(self.c11.cube_shape)
        else:
            assert dest_dfdV_11.shape == self.c11.cube_shape

        if dest_dfdK_11 is None:
            dest_dfdK_11 = np.empty(self.c11.cube_shape)
        else:
            assert dest_dfdK_11.shape == self.c11.cube_shape

        W = d.FunctionSpace(self.vk_mesh, self.c21.vk_vector_element * self.c11.vk_scalar_element)

        # We can treat the tensor data array as having shape (N, 2, 2), where
        # N is the number of elements of the equivalent scalar gridding of the
        # mesh. array[:,0,0] is element [0,0] of the tensor. array[:,0,1] is
        # the upper right element, etc.

        D = d.Function(self.c21.vk_tensor_space)
        dbuf = np.empty(D.vector().size()).reshape((-1, 2, 2))

        C_L = d.Function(self.c11.vk_scalar_space)

        sigma, u = d.TrialFunctions(W) # u is my 'f'
        tau, v = d.TestFunctions(W)

        a = (u * d.div(tau) + d.dot(sigma, tau) + d.inner(D * sigma, d.grad(v))) * d.dx
        L = C_L * v * d.dx
        equation = (a == L)
        soln = d.Function(W)

        bc = d.DirichletBC(W.sub(1), d.Constant(0), direct_boundary)

        for i_l in range(self.c21.l_coords.size):
            dbuf[:,0,0] = self.c21.D_VV[i_l]
            dbuf[:,1,0] = self.c21.D_VK[i_l]
            dbuf[:,0,1] = self.c21.D_VK[i_l]
            dbuf[:,1,1] = self.c21.D_KK[i_l]
            D.vector()[:] = dbuf.reshape((-1,))

            if i_l == self.i_lmax:
                C_L.vector()[:] = C_L_data[i_l] + self.c11.source_term
            else:
                C_L.vector()[:] = C_L_data[i_l]

            d.solve(equation, soln, bc)
            s_sigma, s_u = soln.split(deepcopy=True)
            dest_f_11[i_l] = s_u.vector().array()
            s_sigma = s_sigma.vector().array().reshape((-1, 2))
            dest_dfdV_11[i_l] = self.c21.vk_downsample(s_sigma[:,0])
            dest_dfdK_11[i_l] = self.c21.vk_downsample(s_sigma[:,1])

        return dest_f_11, dest_dfdV_11, dest_dfdK_11
Beispiel #10
0
    def do_L_problems(self, C_VK_data, dest_f_11=None, dest_dfdL_11=None):
        """Solve the L equations using the "mixed" methodology, so that we get on-grid
        solutions of both `f` and `df/dL`. This requires that we use a
        2nd-degree element to solve for the `df/dL` vector, called `sigma`
        here.

        """
        if dest_f_11 is None:
            dest_f_11 = np.empty(self.c11.cube_shape)
        else:
            assert dest_f_11.shape == self.c11.cube_shape

        if dest_dfdL_11 is None:
            dest_dfdL_11 = np.empty(self.c11.cube_shape)
        else:
            assert dest_dfdL_11.shape == self.c11.cube_shape

        W = d.FunctionSpace(self.l_mesh, self.c12.l_element * self.c11.l_element)

        D = d.Function(self.c12.l_space)
        C_VK = d.Function(self.c11.l_space)

        sigma, u = d.TrialFunctions(W) # u is my 'f'
        tau, v = d.TestFunctions(W)
        soln = d.Function(W)

        bc = [d.DirichletBC(W.sub(1), d.Constant(0), direct_boundary)]

        a = (u * tau.dx(0) + d.dot(sigma, tau) + D * sigma * v.dx(0)) * d.dx
        L = C_VK * v * d.dx
        equation = (a == L)

        # contiguous arrays are required for input to `x.vector()[:]` so we
        # need buffers. Well, we don't need them, but this should save on
        # allocations.

        buf11 = np.empty(self.c11.l_coords.shape)
        buf12 = np.empty(self.c12.l_coords.shape)

        for i_vk in range(self.c12.logv_coords.size):
            buf12[:] = self.c12.D_LL[:,i_vk]
            D.vector()[:] = buf12

            buf11[:] = C_VK_data[:,i_vk]
            buf11[self.i_lmax] += self.c11.source_term
            C_VK.vector()[:] = buf11

            d.solve(equation, soln, bc)
            s_sigma, s_u = soln.split(deepcopy=True)
            dest_f_11[:,i_vk] = s_u.vector().array()
            dest_dfdL_11[:,i_vk] = self.c12.l_downsample(s_sigma)

        return dest_f_11, dest_dfdL_11
Beispiel #11
0
    def computeVelocityField(self):
        """
        The steady-state Navier-Stokes equation for velocity v:
        -1/Re laplace v + nabla q + v dot nabla v = 0  in Omega
        nabla dot v = 0                                in Omega
        v = g                                          on partial Omega
        """
        Xh = dl.VectorFunctionSpace(self.mesh, 'Lagrange', self.eldeg)
        Wh = dl.FunctionSpace(self.mesh, 'Lagrange', 1)

        mixed_element = dl.MixedElement([Xh.ufl_element(), Wh.ufl_element()])
        XW = dl.FunctionSpace(self.mesh, mixed_element)

        Re = dl.Constant(self.Re)

        def v_boundary(x, on_boundary):
            return on_boundary

        def q_boundary(x, on_boundary):
            return x[0] < dl.DOLFIN_EPS and x[1] < dl.DOLFIN_EPS

        g = dl.Expression(('0.0', '(x[0] < 1e-14) - (x[0] > 1 - 1e-14)'),
                          element=Xh.ufl_element())
        bc1 = dl.DirichletBC(XW.sub(0), g, v_boundary)
        bc2 = dl.DirichletBC(XW.sub(1), dl.Constant(0), q_boundary,
                             'pointwise')
        bcs = [bc1, bc2]

        vq = dl.Function(XW)
        (v, q) = ufl.split(vq)
        (v_test, q_test) = dl.TestFunctions(XW)

        def strain(v):
            return ufl.sym(ufl.grad(v))

        F = ((2. / Re) * ufl.inner(strain(v), strain(v_test)) +
             ufl.inner(ufl.nabla_grad(v) * v, v_test) - (q * ufl.div(v_test)) +
             (ufl.div(v) * q_test)) * ufl.dx

        dl.solve(F == 0,
                 vq,
                 bcs,
                 solver_parameters={
                     "newton_solver": {
                         "relative_tolerance": 1e-4,
                         "maximum_iterations": 100,
                         "linear_solver": "default"
                     }
                 })

        return v
Beispiel #12
0
 def set_function_space(self):
     """set_function_space."""
     self.V = df.VectorFunctionSpace(self.mesh,'P',1,dim=self.number_of_moment)
     #Set test function(s)
     v_list = df.TestFunctions(self.V) 
     #Convert to ufl form
     self.v = df.as_vector(v_list) 
     #set trial function(s)
     if self.problem_type == 'nonlinear':
         u_list = df.Function(self.V) 
     elif self.problem_type == 'linear':
         u_list = df.TrialFunctions(self.V)
     #Convert to ufl form
     self.u = df.as_vector(u_list) 
Beispiel #13
0
    def __init__(self, Vh_STATE, Vhs, weights, geo, bcs0, datafile, variance_u,
                 variance_g):
        if hasattr(geo, "dx"):
            self.dx = geo.dx(geo.PHYSICAL)
        else:
            self.dx = dl.dx

        self.ds = geo.ds(geo.AXIS)

        x, y, U, V, uu, vv, ww, uv, k = np.loadtxt(datafile,
                                                   skiprows=2,
                                                   unpack=True)
        u_fun_data = VelocityDNS(x=x,
                                 y=y,
                                 U=U,
                                 V=V,
                                 symmetrize=True,
                                 coflow=0.)

        u_data = dl.interpolate(u_fun_data, Vhs[0])

        if Vh_STATE.num_sub_spaces() == 3:
            u_trial, p_trial, g_trial = dl.TrialFunctions(Vh_STATE)
            u_test, p_test, g_test = dl.TestFunctions(Vh_STATE)
        else:
            raise InputError()

        Wform = dl.Constant(1./variance_u)*dl.inner(u_trial, u_test)*self.dx +\
                dl.Constant(1./variance_g)*g_trial*g_test*self.ds

        self.W = dl.assemble(Wform)
        dummy = dl.Vector()
        self.W.init_vector(dummy, 0)
        [bc.zero(self.W) for bc in bcs0]
        Wt = Transpose(self.W)
        [bc.zero(Wt) for bc in bcs0]
        self.W = Transpose(Wt)

        xfun = dl.Function(Vh_STATE)
        assigner = dl.FunctionAssigner(Vh_STATE, Vhs)
        assigner.assign(xfun, [
            u_data,
            dl.Function(Vhs[1]),
            dl.interpolate(dl.Constant(1.), Vhs[2])
        ])

        self.d = xfun.vector()

        self.w = (weights * 0.5)
Beispiel #14
0
    def __init__(self, Vh_STATE, Vhs, bcs0, datafile, dx=dl.dx):
        self.dx = dx
        x, y, U, V, uu, vv, ww, uv, k = np.loadtxt(datafile,
                                                   skiprows=2,
                                                   unpack=True)
        u_fun_mean = VelocityDNS(x=x,
                                 y=y,
                                 U=U,
                                 V=V,
                                 symmetrize=True,
                                 coflow=0.)
        u_fun_data = VelocityDNS(x=x,
                                 y=y,
                                 U=U,
                                 V=V,
                                 symmetrize=False,
                                 coflow=0.)
        k_fun_mean = KDNS(x=x, y=y, k=k, symmetrize=True)
        k_fun_data = KDNS(x=x, y=y, k=k, symmetrize=False)

        u_data = dl.interpolate(u_fun_data, Vhs[0])
        k_data = dl.interpolate(k_fun_data, Vhs[2])

        noise_var_u = dl.assemble(
            dl.inner(u_data - u_fun_mean, u_data - u_fun_mean) * self.dx)
        noise_var_k = dl.assemble(
            dl.inner(k_data - k_fun_mean, k_data - k_fun_mean) * self.dx)

        u_trial, p_trial, k_trial, e_trial = dl.TrialFunctions(Vh_STATE)
        u_test, p_test, k_test, e_test = dl.TestFunctions(Vh_STATE)

        Wform = dl.Constant(1./noise_var_u)*dl.inner(u_trial, u_test)*self.dx + \
                dl.Constant(1./noise_var_k)*dl.inner(k_trial, k_test)*self.dx

        self.W = dl.assemble(Wform)
        dummy = dl.Vector()
        self.W.init_vector(dummy, 0)
        [bc.zero(self.W) for bc in bcs0]
        Wt = Transpose(self.W)
        [bc.zero(Wt) for bc in bcs0]
        self.W = Transpose(Wt)

        xfun = dl.Function(Vh_STATE)
        assigner = dl.FunctionAssigner(Vh_STATE, Vhs)
        assigner.assign(
            xfun,
            [u_data, dl.Function(Vhs[1]), k_data,
             dl.Function(Vhs[3])])
        self.d = xfun.vector()
Beispiel #15
0
def ode_test_form(request):
    Model = eval(request.param)
    model = Model()
    mesh = df.UnitSquareMesh(10, 10)
    V = df.FunctionSpace(mesh, "CG", 1)
    S = state_space(mesh, model.num_states())
    Mx = df.MixedElement((V.ufl_element(), S.ufl_element()))
    VS = df.FunctionSpace(mesh, Mx)
    vs = df.Function(VS)
    vs.assign(df.project(model.initial_conditions(), VS))
    (v, s) = df.split(vs)
    (w, r) = df.TestFunctions(VS)
    rhs = df.inner(model.F(v, s), r) + df.inner(- model.I(v, s), w)
    form = rhs*df.dP
    return form
Beispiel #16
0
def create_forms(W, rho, nu, g_a, boundary_markers, gamma=0.0):
    v, p = df.TrialFunctions(W)
    v_t, p_t = df.TestFunctions(W)

    a = (
        2.0 * nu * df.inner(df.sym(df.grad(v)), df.grad(v_t)) -
        p * df.div(v_t) - df.div(v) * p_t
        #- nu * df.div(v) * p_t
    ) * df.dx

    L = rho * df.inner(df.Constant((0.0, -g_a)), v_t) * df.dx

    # Grad-div stabilization
    a += df.Constant(gamma) * df.div(v) * df.div(v_t) * df.dx

    return a, L
Beispiel #17
0
    def do_one_VK_problem(self, i_l, C_L_data=0.1):
        """Solve the VK equations for one L position using the "mixed" methodology,
        so that we get on-grid solutions for `f` and `grad_VK(f)`.

        """
        W = d.FunctionSpace(self.vk_mesh, self.c21.vk_vector_element * self.c11.vk_scalar_element)

        # We can treat the tensor data array as having shape (N, 2, 2), where
        # N is the number of elements of the equivalent scalar gridding of the
        # mesh. array[:,0,0] is element [0,0] of the tensor. array[:,0,1] is
        # the upper right element, etc.

        D = d.Function(self.c21.vk_tensor_space)
        dbuf = np.empty(D.vector().size()).reshape((-1, 2, 2))

        C_L = d.Function(self.c11.vk_scalar_space)

        sigma, u = d.TrialFunctions(W) # u is my 'f'
        tau, v = d.TestFunctions(W)

        a = (u * d.div(tau) + d.dot(sigma, tau) + d.inner(D * sigma, d.grad(v))) * d.dx
        L = C_L * v * d.dx
        equation = (a == L)
        soln = d.Function(W)

        bc = d.DirichletBC(W.sub(1), d.Constant(0), direct_boundary)

        dbuf[:,0,0] = self.c21.D_VV[i_l]
        dbuf[:,1,0] = self.c21.D_VK[i_l]
        dbuf[:,0,1] = self.c21.D_VK[i_l]
        dbuf[:,1,1] = self.c21.D_KK[i_l]
        D.vector()[:] = dbuf.reshape((-1,))

        if i_l == self.i_lmax:
            C_L.vector()[:] = C_L_data + self.c11.source_term
        else:
            C_L.vector()[:] = C_L_data

        d.solve(equation, soln, bc)

        ssigma, su = soln.split(deepcopy=True)
        u = self.c11.vk_to_rect(su.vector().array())
        sigma = ssigma.vector().array().reshape((-1, 2))
        dudv = self.c21.vk_to_rect(sigma[:,0])
        dudk = self.c21.vk_to_rect(sigma[:,1])
        return u, dudv, dudk
Beispiel #18
0
def create_forms(W, rho, nu, F, g_a, p_h, boundary_markers):
    v, p = df.TrialFunctions(W)
    v_t, p_t = df.TestFunctions(W)

    a = (
        2.0 * nu * df.inner(df.sym(df.grad(v)), df.grad(v_t)) -
        p * df.div(v_t) - df.div(v) * p_t
        #- nu * df.div(v) * p_t
    ) * df.dx

    L = rho * df.inner(df.Constant((0.0, -g_a)), v_t) * df.dx

    n = df.FacetNormal(W.mesh())
    ds = df.Measure("ds", subdomain_data=boundary_markers)
    L += df.inner(df.Constant((F, 0.0)), v_t) * ds(3)  # driving force
    L -= p_h * df.inner(n, v_t) * (ds(2) + ds(4))  # hydrostatic balance

    return a, L
    def set_forms(self, unknown, geom_ord=[0]):
        """
        Set up weak forms of elliptic PDE.
        """
        if any(s >= 0 for s in geom_ord):
            ## forms for forward equation ##
            # 4. Define variational problem
            # functions
            if not hasattr(self, 'states_fwd'):
                self.states_fwd = df.Function(self.W)
            # u, l = df.split(self.states_fwd)
            u, l = df.TrialFunctions(self.W)
            v, m = df.TestFunctions(self.W)
            f = self._source_term(degree=2)
            # variational forms
            if 'true' in str(type(unknown)):
                unknown = df.interpolate(unknown, self.V)
            self.F = df.exp(unknown) * df.inner(
                df.grad(u), df.grad(v)) * df.dx + (
                    u * m + v *
                    l) * self.ds - f * v * df.dx + self.nugg * l * m * df.dx
#             self.dFdstates = df.derivative(self.F, self.states_fwd) # Jacobian
#             self.a = unknown*df.inner(df.grad(u), df.grad(v))*df.dx + (u*m + v*l)*self.ds + self.nugg*l*m*df.dx
#             self.L = f*v*df.dx
        if any(s >= 1 for s in geom_ord):
            ## forms for adjoint equation ##
            # Set up the objective functional J
            #             u,_,_ = df.split(self.states_fwd)
            #             J_form = obj.form(u)
            # Compute adjoint of forward operator
            F2 = df.action(self.F, self.states_fwd)
            self.dFdstates = df.derivative(
                F2, self.states_fwd)  # linearized forward operator
            args = ufl.algorithms.extract_arguments(
                self.dFdstates)  # arguments for bookkeeping
            self.adj_dFdstates = df.adjoint(
                self.dFdstates, reordered_arguments=args
            )  # adjoint linearized forward operator
            #             self.dJdstates = df.derivative(J_form, self.states_fwd, df.TestFunction(self.W)) # derivative of functional with respect to solution
            #             self.dirac_1 = obj.ptsrc(u,1) # dirac_1 cannot be initialized here because it involves evaluation
            ## forms for gradient ##
            self.dFdunknown = df.derivative(F2, unknown)
            self.adj_dFdunknown = df.adjoint(self.dFdunknown)
    def solve_system(self, rhs, factor, u0, t):
        """
        Dolfin's linear solver for (M-factor*A)u = rhs

        Args:
            rhs (dtype_f): right-hand side for the nonlinear system
            factor (float): abbrev. for the node-to-node stepsize (or any other factor required)
            u0 (dtype_u): initial guess for the iterative solver (not used here so far)
            t (float): current time

        Returns:
            dtype_u: solution as mesh
        """

        sol = self.dtype_u(self.V)

        self.w.assign(sol.values)

        # fixme: is this really necessary to do each time?
        q1, q2 = df.TestFunctions(self.V)
        w1, w2 = df.split(self.w)
        r1, r2 = df.split(rhs.values)
        F1 = w1 * q1 * df.dx - factor * self.F1 - r1 * q1 * df.dx
        F2 = w2 * q2 * df.dx - factor * self.F2 - r2 * q2 * df.dx
        F = F1 + F2
        du = df.TrialFunction(self.V)
        J = df.derivative(F, self.w, du)

        problem = df.NonlinearVariationalProblem(F, self.w, [], J)
        solver = df.NonlinearVariationalSolver(problem)

        prm = solver.parameters
        prm['newton_solver']['absolute_tolerance'] = 1E-09
        prm['newton_solver']['relative_tolerance'] = 1E-08
        prm['newton_solver']['maximum_iterations'] = 100
        prm['newton_solver']['relaxation_parameter'] = 1.0

        solver.solve()

        sol.values.assign(self.w)

        return sol
Beispiel #21
0
    def buildFunctionSpace(self):
        #-----------------------------------------------------------------------------------
        """ Built the product function space """

        ## Deprecated code from earlier version of Fenics
        #self.VReal = df.FunctionSpace(self.mesh,'CG',self.meshOpt['polynomialOrder'])
        #elf.VImag = df.FunctionSpace(self.mesh,'CG',self.meshOpt['polynomialOrder'])
        #self.V = df.MixedFunctionSpace([self.VReal,self.VImag])

        elem = df.FiniteElement('CG', self.mesh.ufl_cell(),
                                self.meshOpt['polynomialOrder'])
        self.VReal = df.FunctionSpace(self.mesh, elem)
        self.VImag = df.FunctionSpace(self.mesh, elem)
        self.V = df.FunctionSpace(self.mesh, elem * elem)
        self.ur, self.ui = df.TrialFunctions(self.V)
        self.wr, self.wi = df.TestFunctions(self.V)

        self._buildFunctionSpaceCompleted = True

        return
Beispiel #22
0
    def define_momentum_equation(self):
        """
        Setup the momentum equation weak form
        """
        sim = self.simulation
        Vuvw = sim.data['uvw_star'].function_space()
        tests = dolfin.TestFunctions(Vuvw)
        trials = dolfin.TrialFunctions(Vuvw)

        # Split into components
        v = dolfin.as_vector(tests[:])
        u = dolfin.as_vector(trials[:])

        # The pressure is explicit p* and q is zero (on a domain, to avoid warnings)
        p = sim.data['p']

        class MyZero(Zero):
            def ufl_domains(self):
                return p.ufl_domains()

        q = MyZero()

        lm_trial = lm_test = None

        # Define the momentum equation weak form
        eq = define_dg_equations(
            u,
            v,
            p,
            q,
            lm_trial,
            lm_test,
            self.simulation,
            include_hydrostatic_pressure=self.include_hydrostatic_pressure,
            incompressibility_flux_type='central',  # Only used with q
            use_grad_q_form=False,  # Only used with q
            use_grad_p_form=self.use_grad_p_form,
            use_stress_divergence_form=self.use_stress_divergence_form,
        )
        self.form_lhs, self.form_rhs = dolfin.system(eq)
Beispiel #23
0
R = parameters["R"]
Rz = parameters["Rz"]
res = parameters["res"]
dt = TimeStepSelector(parameters["dt"])
tau = df.Constant(parameters["tau"])
h = df.Constant(parameters["h"])
M = df.Constant(parameters["M"])

geo_map = EllipsoidMap(R, R, Rz)
geo_map.initialize(res, restart_folder=parameters["restart_folder"])

W = geo_map.mixed_space(4)

# Define trial and test functions
du = df.TrialFunction(W)
chi, xi, eta, etahat = df.TestFunctions(W)

# Define functions
u = df.TrialFunction(W)
u_ = df.Function(W, name="u_")  # current solution
u_1 = df.Function(W, name="u_1")  # solution from previous converged step

# Split mixed functions
psi,  mu, nu, nuhat = df.split(u)
psi_, mu_, nu_, nuhat_ = df.split(u_)
psi_1, mu_1, nu_1, nuhat_1 = df.split(u_1)

# Create intial conditions
if parameters["restart_folder"] is None:
    init_mode = parameters["init_mode"]
    if init_mode == "random":
Beispiel #24
0
    def step(self, t0: float, t1: float) -> None:
        """Solve on the given time interval (t0, t1).

        Arguments:
            interval (:py:class:`tuple`)
                The time interval (t0, t1) for the step

        *Invariants*
            Assuming that v\_ is in the correct state for t0, gives
            self.vur in correct state at t1.
        """
        timer = df.Timer("PDE step")

        # Extract theta and conductivities
        theta = self._parameters["theta"]
        Mi = self._M_i
        Me = self._M_e

        # Extract interval and thus time-step
        kn = df.Constant(t1 - t0)

        # Define variational formulation
        if self._parameters["linear_solver_type"] == "direct":
            v, u, l = df.TrialFunctions(self.VUR)
            w, q, lamda = df.TestFunctions(self.VUR)
        else:
            v, u = df.TrialFunctions(self.VUR)
            w, q = df.TestFunctions(self.VUR)

        # Get physical parameters
        chi = self._parameters["Chi"]
        capacitance = self._parameters["Cm"]

        Dt_v = (v - self.v_) / kn
        Dt_v *= chi * capacitance
        v_mid = theta * v + (1.0 - theta) * self.v_

        # Set time
        t = t0 + theta * (t1 - t0)
        self.time.assign(t)

        # Define spatial integration domains:
        dz = df.Measure("dx",
                        domain=self._mesh,
                        subdomain_data=self._cell_domains)
        db = df.Measure("ds",
                        domain=self._mesh,
                        subdomain_data=self._facet_domains)

        # Get domain labels
        cell_tags = map(int, set(
            self._cell_domains.array()))  # np.int64 does not workv
        facet_tags = map(int, set(self._facet_domains.array()))

        # Loop overe all domain labels
        G = Dt_v * w * dz()
        for key in cell_tags:
            G += df.inner(Mi[key] * df.grad(v_mid), df.grad(w)) * dz(key)
            G += df.inner(Mi[key] * df.grad(u), df.grad(w)) * dz(key)
            G += df.inner(Mi[key] * df.grad(v_mid), df.grad(q)) * dz(key)
            G += df.inner(
                (Mi[key] + Me[key]) * df.grad(u), df.grad(q)) * dz(key)

            if self._I_s is None:
                G -= chi * df.Constant(0) * w * dz(key)
            else:
                # _is = self._I_s.get(key, df.Constant(0))
                # G -= chi*_is*w*dz(key)
                G -= chi * self._I_s[key] * w * dz(key)

            # If Lagrangian multiplier
            if self._parameters["linear_solver_type"] == "direct":
                G += (lamda * u + l * q) * dz(key)

            # Add applied current as source in elliptic equation if applicable
            if self._I_a:
                G -= chi * self._I_a[key] * q * dz(key)

        if self._ect_current is not None:
            for key in facet_tags:
                # Detfalt to 0 if not defined for that facet tag
                # TODO: Should I include `chi` here? I do not think so
                G += self._ect_current.get(key, df.Constant(0)) * q * db(key)

        # Define variational problem
        a, L = df.system(G)
        pde = df.LinearVariationalProblem(a, L, self.vur, bcs=self._bcs)

        # Set-up solver
        solver = df.LinearVariationalSolver(pde)
        solver.solve()
def Cost(xp):
    comm = nMPI.COMM_WORLD
    mpi_rank = comm.Get_rank()

    x1, x2 = xp #The two variables (length and feed offset)

    rs = 8.0  # radiation boundary radius
    l = x1  # Patch length
    w = 4.5  # Patch width
    s1 = x2 * x1 / 2.0  # Feed offset
    h = 1.0   # Patch height
    t = 0.05   # Metal thickness
    lc = 1.0  # Coax length
    rc = 0.25 # Coax shield radius
    cc = 0.107 #Coax center conductor 50 ohm air diel
    eps = 1.0e-4
    tol = 1.0e-6
    eta = 377.0 # vacuum intrinsic wave impedance
    eps_c = 1.0 # dielectric permittivity

    k0 = 2.45 * 2.0 * np.pi / 30.0 # Frequency in GHz
    ls = 0.025 #Mesh density parameters for GMSH
    lm = 0.8
    lw = 0.06
    lp = 0.3

    # Run GMSH only on one MPI processor (process 0).
    # We use the GMSH Python interface to generate the geometry and mesh objects
    if mpi_rank == 0:
        print("x[0] = {0:<f}, x[1] = {1:<f} ".format(xp[0], xp[1]))
        print("length = {0:<f}, width = {1:<f}, feed offset = {2:<f}".format(l, w, s1))
        gmsh.initialize()
        gmsh.option.setNumber('General.Terminal', 1)
        gmsh.model.add("SimplePatchOpt")
# Radiation sphere
        gmsh.model.occ.addSphere(0.0, 0.0, 0.0, rs, 1)
        gmsh.model.occ.addBox(0.0, -rs, 0.0, rs, 2*rs, rs, 2)
        gmsh.model.occ.intersect([(3,1)],[(3,2)], 3, removeObject=True, removeTool=True)
# Patch
        gmsh.model.occ.addBox(0.0, -l/2, h, w/2, l, t, 4)
# coax center
        gmsh.model.occ.addCylinder(0.0, s1, -lc, 0.0, 0.0, lc+h, cc, 5, 2.0*np.pi)

# coax shield
        gmsh.model.occ.addCylinder(0.0, s1, -lc, 0.0, 0.0, lc, rc, 7)
        gmsh.model.occ.addBox(0.0, s1-rc, -lc, rc, 2.0*rc, lc, 8)
        gmsh.model.occ.intersect([(3,7)], [(3,8)], 9, removeObject=True, removeTool=True)
        gmsh.model.occ.fuse([(3,3)], [(3,9)], 10, removeObject=True, removeTool=True)
# cutout internal boundaries
        gmsh.model.occ.cut([(3,10)], [(3,4),(3,5)], 11, removeObject=True, removeTool=True)

        gmsh.option.setNumber('Mesh.MeshSizeMin', ls)
        gmsh.option.setNumber('Mesh.MeshSizeMax', lm)
        gmsh.option.setNumber('Mesh.Algorithm', 6)
        gmsh.option.setNumber('Mesh.Algorithm3D', 1)
        gmsh.option.setNumber('Mesh.MshFileVersion', 4.1)
        gmsh.option.setNumber('Mesh.Format', 1)
        gmsh.option.setNumber('Mesh.MinimumCirclePoints', 36)
        gmsh.option.setNumber('Mesh.CharacteristicLengthFromCurvature', 1)

        gmsh.model.occ.synchronize()

        pts = gmsh.model.getEntities(0)
        gmsh.model.mesh.setSize(pts, lm) #Set background mesh density
        pts = gmsh.model.getEntitiesInBoundingBox(-eps, -l/2-eps, h-eps, w/2+eps, l/2+eps, h+t+eps)
        gmsh.model.mesh.setSize(pts, ls)

        pts = gmsh.model.getEntitiesInBoundingBox(-eps, s1-rc-eps, -lc-eps, rc+eps, s1+rc+eps, h+eps)
        gmsh.model.mesh.setSize(pts, lw)
        pts = gmsh.model.getEntitiesInBoundingBox(-eps, -rc-eps, -eps, rc+eps, rc+eps, eps)
        gmsh.model.mesh.setSize(pts, lw)

# Embed points to reduce mesh density on patch faces
        fce1 = gmsh.model.getEntitiesInBoundingBox(-eps, -l/2-eps, h+t-eps, w/2+eps, l/2+eps, h+t+eps, 2)
        gmsh.model.occ.synchronize()
        gmsh.model.geo.addPoint(w/4, -l/4, h+t, lp, 1000)
        gmsh.model.geo.addPoint(w/4, 0.0, h+t, lp, 1001)
        gmsh.model.geo.addPoint(w/4, l/4, h+t, lp, 1002)
        gmsh.model.geo.synchronize()
        gmsh.model.occ.synchronize()
        print(fce1)
        fce2 = gmsh.model.getEntitiesInBoundingBox(-eps, -l/2-eps, h-eps, w/2+eps, l/2+eps, h+eps, 2)
        gmsh.model.geo.addPoint(w/4, -9*l/32, h, lp, 1003)
        gmsh.model.geo.addPoint(w/4, 0.0, h, lp, 1004)
        gmsh.model.geo.addPoint(w/4, 9*l/32, h, lp, 1005)
        gmsh.model.geo.synchronize()
        for tt in fce1:
           gmsh.model.mesh.embed(0, [1000, 1001, 1002], 2, tt[1])
        for tt in fce2:
           gmsh.model.mesh.embed(0, [1003, 1004, 1005], 2, tt[1])
        print(fce2)
        gmsh.model.occ.remove(fce1)
        gmsh.model.occ.remove(fce2)
        gmsh.model.occ.synchronize()
        gmsh.model.addPhysicalGroup(3, [11], 1)
        gmsh.model.setPhysicalName(3, 1, "Air")
        gmsh.model.mesh.optimize("Relocate3D", niter=5)
        gmsh.model.mesh.generate(3)
        gmsh.write("SimplePatch.msh")
        gmsh.finalize()
# Mesh generation is finished.  We now use Meshio to translate GMSH mesh to xdmf file for 
# importation into Fenics FE solver
        msh = meshio.read("SimplePatch.msh")
        for cell in msh.cells:
            if  cell.type == "tetra":
                tetra_cells = cell.data

        for key in msh.cell_data_dict["gmsh:physical"].keys():
            if key == "tetra":
                tetra_data = msh.cell_data_dict["gmsh:physical"][key]

        tetra_mesh = meshio.Mesh(points=msh.points, cells={"tetra": tetra_cells},
                           cell_data={"VolumeRegions":[tetra_data]})

        meshio.write("mesh.xdmf", tetra_mesh)
# Here we import the mesh into Fenics
    mesh = dolfin.Mesh()
    with dolfin.XDMFFile("mesh.xdmf") as infile:
        infile.read(mesh)
    mvc = dolfin.MeshValueCollection("size_t", mesh, 3)
    with dolfin.XDMFFile("mesh.xdmf") as infile:
        infile.read(mvc, "VolumeRegions")
    cf = dolfin.cpp.mesh.MeshFunctionSizet(mesh, mvc)
# The boundary classes for the FE solver
    class PEC(dolfin.SubDomain):
        def inside(self, x, on_boundary):
            return on_boundary

    class InputBC(dolfin.SubDomain):
        def inside(self, x, on_boundary):
            return on_boundary and dolfin.near(x[2], -lc, tol)

    class OutputBC(dolfin.SubDomain):
        def inside(self, x, on_boundary):
            rr = np.sqrt(x[0]*x[0]+x[1]*x[1]+x[2]*x[2])
            return on_boundary and dolfin.near(rr, 8.0, 1.0e-1)

    class PMC(dolfin.SubDomain):
        def inside(self, x, on_boundary):
            return on_boundary and dolfin.near(x[0], 0.0, tol)


# Volume domains
    dolfin.File("VolSubDomains.pvd").write(cf)
    dolfin.File("Mesh.pvd").write(mesh)
# Mark boundaries
    sub_domains = dolfin.MeshFunction("size_t", mesh, mesh.topology().dim() - 1)
    sub_domains.set_all(4)
    pec = PEC()
    pec.mark(sub_domains, 0)
    in_port = InputBC()
    in_port.mark(sub_domains, 1)
    out_port = OutputBC()
    out_port.mark(sub_domains, 2)
    pmc = PMC()
    pmc.mark(sub_domains, 3)
    dolfin.File("BoxSubDomains.pvd").write(sub_domains)
# Set up function spaces
    cell = dolfin.tetrahedron
    ele_type = dolfin.FiniteElement('N1curl', cell, 2, variant="integral") # H(curl) element for EM
    V2 = dolfin.FunctionSpace(mesh, ele_type * ele_type)
    V = dolfin.FunctionSpace(mesh, ele_type)
    (u_r, u_i) = dolfin.TrialFunctions(V2)
    (v_r, v_i) = dolfin.TestFunctions(V2)
    dolfin.info(mesh)
#surface integral definitions from boundaries
    ds = dolfin.Measure('ds', domain = mesh, subdomain_data = sub_domains)
#volume regions
    dx_air = dolfin.Measure('dx', domain = mesh, subdomain_data = cf, subdomain_id = 1)
    dx_subst = dolfin.Measure('dx', domain = mesh, subdomain_data = cf, subdomain_id = 2)
# with source and sink terms
    u0 = dolfin.Constant((0.0, 0.0, 0.0)) #PEC definition
# The incident field sources (E and H-fields)
    h_src = dolfin.Expression(('-(x[1] - s) / (2.0 * pi * (pow(x[0], 2.0) + pow(x[1] - s,2.0)))', 'x[0] / (2.0 * pi *(pow(x[0],2.0) + pow(x[1] - s,2.0)))', 0.0), degree = 2,  s = s1)
    e_src = dolfin.Expression(('x[0] / (2.0 * pi * (pow(x[0], 2.0) + pow(x[1] - s,2.0)))', 'x[1] / (2.0 * pi *(pow(x[0],2.0) + pow(x[1] - s,2.0)))', 0.0), degree = 2, s = s1)
    Rrad = dolfin.Expression(('sqrt(x[0] * x[0] + x[1] * x[1] + x[2] * x[2])'), degree = 2)
#Boundary condition dictionary
    boundary_conditions = {0: {'PEC' : u0},
                       1: {'InputBC': (h_src)},
                       2: {'OutputBC': Rrad}}

    n = dolfin.FacetNormal(mesh)

#Build PEC boundary conditions for real and imaginary parts
    bcs = []
    for i in boundary_conditions:
        if 'PEC' in boundary_conditions[i]:
            bc = dolfin.DirichletBC(V2.sub(0), boundary_conditions[i]['PEC'], sub_domains, i)
            bcs.append(bc)
            bc = dolfin.DirichletBC(V2.sub(1), boundary_conditions[i]['PEC'], sub_domains, i)
            bcs.append(bc)

# Build input BC source term and loading term
    integral_source = []
    integrals_load =[]
    for i in boundary_conditions:
        if 'InputBC' in boundary_conditions[i]:
            r = boundary_conditions[i]['InputBC']
            bb1 = 2.0 * (k0 * eta) * dolfin.inner(v_i, dolfin.cross(n, r)) * ds(i) #Factor of two from field equivalence principle
            integral_source.append(bb1)
            bb2 = dolfin.inner(dolfin.cross(n, v_i), dolfin.cross(n, u_r)) * k0 * np.sqrt(eps_c) * ds(i)
            integrals_load.append(bb2)
            bb2 = dolfin.inner(-dolfin.cross(n, v_r), dolfin.cross(n, u_i)) * k0 * np.sqrt(eps_c) * ds(i)
            integrals_load.append(bb2)

    for i in boundary_conditions:
        if 'OutputBC' in boundary_conditions[i]:
           r = boundary_conditions[i]['OutputBC']
           bb2 = (dolfin.inner(dolfin.cross(n, v_i), dolfin.cross(n, u_r)) * k0 + 1.0 * dolfin.inner(dolfin.cross(n, v_i), dolfin.cross(n, u_i)) / r)* ds(i)
           integrals_load.append(bb2)
           bb2 = (dolfin.inner(-dolfin.cross(n, v_r), dolfin.cross(n, u_i)) * k0 + 1.0 * dolfin.inner(dolfin.cross(n, v_r), dolfin.cross(n, u_r)) / r)* ds(i)
           integrals_load.append(bb2)
# for PMC, do nothing. Natural BC.

    a = (dolfin.inner(dolfin.curl(v_r), dolfin.curl(u_r)) + dolfin.inner(dolfin.curl(v_i), dolfin.curl(u_i)) - eps_c * k0 * k0 * (dolfin.inner(v_r, u_r) + dolfin.inner(v_i, u_i))) * dx_subst + (dolfin.inner(dolfin.curl(v_r), dolfin.curl(u_r)) + dolfin.inner(dolfin.curl(v_i), dolfin.curl(u_i)) - k0 * k0 * (dolfin.inner(v_r, u_r) + dolfin.inner(v_i, u_i))) * dx_air + sum(integrals_load)
    L = sum(integral_source)

    u1 = dolfin.Function(V2)
    vdim = u1.vector().size()
    print("Solution vector size =", vdim)

    dolfin.solve(a == L, u1, bcs, solver_parameters = {'linear_solver' : 'mumps'}) 

#Here we write files of the field solution for inspection
    u1_r, u1_i = u1.split(True)
    fp = dolfin.File("EField_r.pvd")
    fp << u1_r
    fp = dolfin.File("EField_i.pvd")
    fp << u1_i
# Compute power relationships and reflection coefficient
    H = dolfin.interpolate(h_src, V) # Get input field
    P =  dolfin.assemble((-dolfin.dot(u1_r,dolfin.cross(dolfin.curl(u1_i),n))+dolfin.dot(u1_i,dolfin.cross(dolfin.curl(u1_r),n))) * ds(2))
    P_refl = dolfin.assemble((-dolfin.dot(u1_i,dolfin.cross(dolfin.curl(u1_r), n)) + dolfin.dot(u1_r, dolfin.cross(dolfin.curl(u1_i), n))) * ds(1))
    P_inc = dolfin.assemble((dolfin.dot(H, H) * eta / (2.0 * np.sqrt(eps_c))) * ds(1))
    print("Integrated power on port 2:", P/(2.0 * k0 * eta))
    print("Incident power at port 1:", P_inc)
    print("Integrated reflected power on port 1:", P_inc - P_refl / (2.0 * k0 * eta))
#Reflection coefficient is returned as cost function
    rho_old = (P_inc - P_refl / (2.0 * k0 * eta)) / P_inc #Fraction of incident power reflected as objective function
    return rho_old
Beispiel #26
0
4. 2. Add states
'''
# Define mixed function space-split into temperature and displacement FS
d = mesh.geometry().dim()
cell = mesh.ufl_cell()
displacement_fe = df.VectorElement("CG", cell, 1)
temperature_fe = df.FiniteElement("CG", cell, 1)

mixed_fs = df.FunctionSpace(mesh,
                            df.MixedElement([displacement_fe, temperature_fe]))
mixed_fs.sub(1).dofmap().dofs()
mixed_function = df.Function(mixed_fs)
displacements_function, temperature_function = df.split(mixed_function)
# displacements_function,temperature_function = mixed_function.split()

v, T_hat = df.TestFunctions(mixed_fs)

residual_form = get_residual_form(displacements_function, v, density_function,
                                  temperature_function, T_hat, KAPPA, K, ALPHA)

residual_form -=  (df.dot(f_r, v) * dss(10) + df.dot(f_t, v) * dss(14)  + \
                    q*T_hat*dss(5) + q_half*T_hat*dss(6) + q_quart*T_hat*dss(7))
print("get residual_form-------")
# print('ssssssss',df.assemble(T_hat*df.dx).get_local())
pde_problem.add_state('mixed_states', mixed_function, residual_form, 'density')
'''
4. 3. Add outputs
'''

# Add output-avg_density to the PDE problem:
volume = df.assemble(df.Constant(1.) * df.dx(domain=mesh))
Beispiel #27
0
    else:
        field = subproblem[0]["name"]
        fields.append(field)
        field_to_subspace[field] = spaces[name]
        field_to_subproblem[field] = (name, -1)

# Create initial folders for storing results
newfolder, tstepfiles = create_initial_folders(folder, restart_folder, fields,
                                               tstep, parameters)

# Create overarching test and trial functions
test_functions = dict()
trial_functions = dict()
for name, subproblem in subproblems.items():
    if len(subproblem) > 1:
        test_functions[name] = df.TestFunctions(spaces[name])
        trial_functions[name] = df.TrialFunctions(spaces[name])
    else:
        test_functions[name] = df.TestFunction(spaces[name])
        trial_functions[name] = df.TrialFunction(spaces[name])

# Create work dictionaries for all subproblems
w_ = dict((subproblem, df.Function(space, name=subproblem))
          for subproblem, space in spaces.items())
w_1 = dict((subproblem, df.Function(space, name=subproblem + "_1"))
           for subproblem, space in spaces.items())
w_tmp = dict((subproblem, df.Function(space, name=subproblem + "_tmp"))
             for subproblem, space in spaces.items())

# Shortcuts to the fields
x_ = dict()
Beispiel #28
0
    def __init__(self, fenics_2d_rve, **kwargs):
        """[summary]

        Parameters
        ----------
        object : [type]
            [description]
        fenics_2d_rve : [type]
            [description]
        element : tuple or dict
            Type and degree of element for displacement FunctionSpace
            Ex: ('CG', 2) or {'family':'Lagrange', degree:2}
        solver : dict
            Choose the type of the solver, its method and the preconditioner.
            An up-to-date list of the available solvers and preconditioners
            can be obtained with dolfin.list_linear_solver_methods() and
            dolfin.list_krylov_solver_preconditioners().

        """
        self.rve = fenics_2d_rve
        self.topo_dim = topo_dim = fenics_2d_rve.dim
        try:
            bottom_left_corner = fenics_2d_rve.bottom_left_corner
        except AttributeError:
            logger.warning(
                "For the definition of the periodicity boundary conditions,"
                "the bottom left corner of the RVE is assumed to be on (0.,0.)"
            )
            bottom_left_corner = np.zeros(shape=(topo_dim, ))
        self.pbc = periodicity.PeriodicDomain.pbc_dual_base(
            fenics_2d_rve.gen_vect, "XY", bottom_left_corner, topo_dim)

        solver = kwargs.pop("solver", {})
        # {'type': solver_type, 'method': solver_method, 'preconditioner': preconditioner}
        s_type = solver.pop("type", None)
        s_method = solver.pop("method", SOLVER_METHOD)
        s_precond = solver.pop("preconditioner", None)
        if s_type is None:
            if s_method in DOLFIN_KRYLOV_METHODS.keys():
                s_type = "Krylov"
            elif s_method in DOLFIN_LU_METHODS.keys():
                s_type = "LU"
            else:
                raise RuntimeError("The indicated solver method is unknown.")
        self._solver = dict(type=s_type, method=s_method)
        if s_precond:
            self._solver["preconditioner"] = s_precond

        element = kwargs.pop("element", ("Lagrange", 2))
        if isinstance(element, dict):
            element = (element["family"], element["degree"])
        self._element = element

        # * Function spaces
        cell = self.rve.mesh.ufl_cell()
        self.scalar_FE = fe.FiniteElement(element[0], cell, element[1])
        self.displ_FE = fe.VectorElement(element[0], cell, element[1])
        strain_deg = element[1] - 1 if element[1] >= 1 else 0
        strain_dim = int(topo_dim * (topo_dim + 1) / 2)
        self.strain_FE = fe.VectorElement("DG",
                                          cell,
                                          strain_deg,
                                          dim=strain_dim)
        # Espace fonctionel scalaire
        self.X = fe.FunctionSpace(self.rve.mesh,
                                  self.scalar_FE,
                                  constrained_domain=self.pbc)
        # Espace fonctionnel 3D : deformations, notations de Voigt
        self.W = fe.FunctionSpace(self.rve.mesh, self.strain_FE)
        # Espace fonctionel 2D pour les champs de deplacement
        # TODO : reprendre le Ve défini pour l'espace fonctionnel mixte. Par ex: V = FunctionSpace(mesh, Ve)
        self.V = fe.VectorFunctionSpace(self.rve.mesh,
                                        element[0],
                                        element[1],
                                        constrained_domain=self.pbc)

        # * Espace fonctionel mixte pour la résolution :
        # * 2D pour les champs + scalaire pour multiplicateur de Lagrange

        # "R" : Real element with one global degree of freedom
        self.real_FE = fe.VectorElement("R", cell, 0)
        self.M = fe.FunctionSpace(
            self.rve.mesh,
            fe.MixedElement([self.displ_FE, self.real_FE]),
            constrained_domain=self.pbc,
        )

        # Define variational problem
        self.v, self.lamb_ = fe.TestFunctions(self.M)
        self.u, self.lamb = fe.TrialFunctions(self.M)
        self.w = fe.Function(self.M)

        # bilinear form
        self.a = (
            fe.inner(sigma(self.rve.C_per, epsilon(self.u)), epsilon(self.v)) *
            fe.dx + fe.dot(self.lamb_, self.u) * fe.dx +
            fe.dot(self.lamb, self.v) * fe.dx)
        self.K = fe.assemble(self.a)
        if self._solver["type"] == "Krylov":
            self.solver = fe.KrylovSolver(self.K, self._solver["method"])
        elif self._solver["type"] == "LU":
            self.solver = fe.LUSolver(self.K, self._solver["method"])
            self.solver.parameters["symmetric"] = True
        try:
            self.solver.parameters.preconditioner = self._solver[
                "preconditioner"]
        except KeyError:
            pass
        # fe.info(self.solver.parameters, True)

        self.localization = dict()
        # dictionary of localization field objects,
        # will be filled up when calling auxiliary problems (lazy evaluation)
        self.ConstitutiveTensors = dict()
def test_assembly_solve_taylor_hood(mesh):
    """Assemble Stokes problem with Taylor-Hood elements and solve."""
    P2 = dolfin.VectorFunctionSpace(mesh, ("Lagrange", 2))
    P1 = dolfin.FunctionSpace(mesh, ("Lagrange", 1))

    def boundary0(x, only_boundary):
        """Define boundary x = 0"""
        return x[:, 0] < 10 * numpy.finfo(float).eps

    def boundary1(x, only_boundary):
        """Define boundary x = 1"""
        return x[:, 0] > (1.0 - 10 * numpy.finfo(float).eps)

    u0 = dolfin.Function(P2)
    u0.vector().set(1.0)
    u0.vector().ghostUpdate(addv=PETSc.InsertMode.INSERT,
                            mode=PETSc.ScatterMode.FORWARD)
    bc0 = dolfin.DirichletBC(P2, u0, boundary0)
    bc1 = dolfin.DirichletBC(P2, u0, boundary1)

    u, p = dolfin.TrialFunction(P2), dolfin.TrialFunction(P1)
    v, q = dolfin.TestFunction(P2), dolfin.TestFunction(P1)

    a00 = inner(ufl.grad(u), ufl.grad(v)) * dx
    a01 = ufl.inner(p, ufl.div(v)) * dx
    a10 = ufl.inner(ufl.div(u), q) * dx
    a11 = None

    p00 = a00
    p01, p10 = None, None
    p11 = inner(p, q) * dx

    # FIXME
    # We need zero function for the 'zero' part of L
    p_zero = dolfin.Function(P1)
    f = dolfin.Function(P2)
    L0 = ufl.inner(f, v) * dx
    L1 = ufl.inner(p_zero, q) * dx

    # -- Blocked and nested

    A0 = dolfin.fem.assemble_matrix_nest([[a00, a01], [a10, a11]], [bc0, bc1])
    A0norm = nest_matrix_norm(A0)
    P0 = dolfin.fem.assemble_matrix_nest([[p00, p01], [p10, p11]], [bc0, bc1])
    P0norm = nest_matrix_norm(P0)
    b0 = dolfin.fem.assemble_vector_nest([L0, L1], [[a00, a01], [a10, a11]],
                                         [bc0, bc1])
    b0norm = b0.norm()

    ksp = PETSc.KSP()
    ksp.create(mesh.mpi_comm())
    ksp.setOperators(A0, P0)
    nested_IS = P0.getNestISs()
    ksp.setType("minres")
    pc = ksp.getPC()
    pc.setType("fieldsplit")
    pc.setFieldSplitIS(["u", nested_IS[0][0]], ["p", nested_IS[1][1]])
    ksp_u, ksp_p = pc.getFieldSplitSubKSP()
    ksp_u.setType("preonly")
    ksp_u.getPC().setType('lu')
    ksp_u.getPC().setFactorSolverType('mumps')
    ksp_p.setType("preonly")

    def monitor(ksp, its, rnorm):
        # print("Num it, rnorm:", its, rnorm)
        pass

    ksp.setTolerances(rtol=1.0e-8, max_it=50)
    ksp.setMonitor(monitor)
    ksp.setFromOptions()
    x0 = b0.copy()
    ksp.solve(b0, x0)
    assert ksp.getConvergedReason() > 0

    # -- Blocked and monolithic

    A1 = dolfin.fem.assemble_matrix_block([[a00, a01], [a10, a11]], [bc0, bc1])
    assert A1.norm() == pytest.approx(A0norm, 1.0e-12)
    P1 = dolfin.fem.assemble_matrix_block([[p00, p01], [p10, p11]], [bc0, bc1])
    assert P1.norm() == pytest.approx(P0norm, 1.0e-12)
    b1 = dolfin.fem.assemble_vector_block([L0, L1], [[a00, a01], [a10, a11]],
                                          [bc0, bc1])
    assert b1.norm() == pytest.approx(b0norm, 1.0e-12)

    ksp = PETSc.KSP()
    ksp.create(mesh.mpi_comm())
    ksp.setOperators(A1, P1)
    ksp.setType("minres")
    pc = ksp.getPC()
    pc.setType('lu')
    pc.setFactorSolverType('mumps')
    ksp.setTolerances(rtol=1.0e-8, max_it=50)
    ksp.setFromOptions()
    x1 = A1.createVecRight()
    ksp.solve(b1, x1)
    assert ksp.getConvergedReason() > 0
    assert x1.norm() == pytest.approx(x0.norm(), 1e-8)

    # -- Monolithic

    P2 = ufl.VectorElement("Lagrange", mesh.ufl_cell(), 2)
    P1 = ufl.FiniteElement("Lagrange", mesh.ufl_cell(), 1)
    TH = P2 * P1
    W = dolfin.FunctionSpace(mesh, TH)
    (u, p) = dolfin.TrialFunctions(W)
    (v, q) = dolfin.TestFunctions(W)
    a00 = ufl.inner(ufl.grad(u), ufl.grad(v)) * dx
    a01 = ufl.inner(p, ufl.div(v)) * dx
    a10 = ufl.inner(ufl.div(u), q) * dx
    a = a00 + a01 + a10

    p00 = ufl.inner(ufl.grad(u), ufl.grad(v)) * dx
    p11 = ufl.inner(p, q) * dx
    p_form = p00 + p11

    f = dolfin.Function(W.sub(0).collapse())
    p_zero = dolfin.Function(W.sub(1).collapse())
    L0 = inner(f, v) * dx
    L1 = inner(p_zero, q) * dx
    L = L0 + L1

    bc0 = dolfin.DirichletBC(W.sub(0), u0, boundary0)
    bc1 = dolfin.DirichletBC(W.sub(0), u0, boundary1)

    A2 = dolfin.fem.assemble_matrix(a, [bc0, bc1])
    A2.assemble()
    assert A2.norm() == pytest.approx(A0norm, 1.0e-12)
    P2 = dolfin.fem.assemble_matrix(p_form, [bc0, bc1])
    P2.assemble()
    assert P2.norm() == pytest.approx(P0norm, 1.0e-12)

    b2 = dolfin.fem.assemble_vector(L)
    dolfin.fem.apply_lifting(b2, [a], [[bc0, bc1]])
    b2.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE)
    dolfin.fem.set_bc(b2, [bc0, bc1])
    b2norm = b2.norm()
    assert b2norm == pytest.approx(b0norm, 1.0e-12)

    ksp = PETSc.KSP()
    ksp.create(mesh.mpi_comm())
    ksp.setOperators(A2, P2)
    ksp.setType("minres")
    pc = ksp.getPC()
    pc.setType('lu')
    pc.setFactorSolverType('mumps')

    def monitor(ksp, its, rnorm):
        # print("Num it, rnorm:", its, rnorm)
        pass

    ksp.setTolerances(rtol=1.0e-8, max_it=50)
    ksp.setMonitor(monitor)
    ksp.setFromOptions()
    x2 = A2.createVecRight()
    ksp.solve(b2, x2)
    assert ksp.getConvergedReason() > 0
    assert x0.norm() == pytest.approx(x2.norm(), 1e-8)
Beispiel #30
0
    def variational_forms(self, kn: df.Constant) -> tp.Tuple[tp.Any, tp.Any]:
        """Create the variational forms corresponding to the given
        discretization of the given system of equations.

        *Arguments*
          kn (:py:class:`ufl.Expr` or float)
            The time step

        *Returns*
          (lhs, rhs) (:py:class:`tuple` of :py:class:`ufl.Form`)

        """
        # Extract theta parameter and conductivities
        theta = self._parameters["theta"]
        Mi = self._M_i
        Me = self._M_e

        # Define variational formulation
        if self._parameters["linear_solver_type"] == "direct":
            v, u, l = df.TrialFunctions(self.VUR)
            w, q, lamda = df.TestFunctions(self.VUR)
        else:
            v, u = df.TrialFunctions(self.VUR)
            w, q = df.TestFunctions(self.VUR)

        # Get physical parameters
        chi = self._parameters["Chi"]
        capacitance = self._parameters["Cm"]

        Dt_v = (v - self.v_) / kn
        Dt_v *= chi * capacitance
        v_mid = theta * v + (1.0 - theta) * self.v_

        # Set-up measure and rhs from stimulus
        dz = df.Measure("dx",
                        domain=self._mesh,
                        subdomain_data=self._cell_domains)
        db = df.Measure("ds",
                        domain=self._mesh,
                        subdomain_data=self._facet_domains)

        # Get domain tags
        cell_tags = map(int, set(
            self._cell_domains.array()))  # np.int64 does not work
        facet_tags = map(int, set(self._facet_domains.array()))

        # Loop over all domains
        G = Dt_v * w * dz()
        for key in cell_tags:
            G += df.inner(Mi[key] * df.grad(v_mid), df.grad(w)) * dz(key)
            G += df.inner(Mi[key] * df.grad(u), df.grad(w)) * dz(key)
            G += df.inner(Mi[key] * df.grad(v_mid), df.grad(q)) * dz(key)
            G += df.inner(
                (Mi[key] + Me[key]) * df.grad(u), df.grad(q)) * dz(key)

            if self._I_s is None:
                G -= chi * df.Constant(0) * w * dz(key)
            else:
                _is = self._I_s.get(key, df.Constant(0))
                G -= chi * _is * w * dz(key)

            # If Lagrangian multiplier
            if self._parameters["linear_solver_type"] == "direct":
                G += (lamda * u + l * q) * dz(key)

            if self._I_a:
                G -= chi * self._I_a[key] * q * dz(key)

        for key in facet_tags:
            if self._ect_current is not None:
                # Default to 0 if not defined for tag I do not I should apply `chi` here.
                G += self._ect_current.get(key, df.Constant(0)) * q * db(key)

        a, L = df.system(G)
        return a, L