コード例 #1
0
    def build_schur(self,
                    Atilde,
                    K,
                    list_split_mixed_ops,
                    list_split_trace_ops,
                    nested=False):
        """The Schur complement in the operators of the trace solve contains
        the inverse on a mixed system.  Users may want this inverse to be treated
        with another schur complement.

        Let the mixed matrix Atilde be called A here,
        then the "nested" options rewrites with a Schur decomposition
        as the following.

        .. code-block:: text

                A.inv = [[I, -A00.inv * A01]    *   [[A00.inv, 0    ]   *   [[I,             0]
                        [0,  I             ]]       [0,        S.inv]]      [-A10* A00.inv,  I]]
                        --------------------        -----------------       -------------------
                        block1                      block2                  block3

        with the (inner) schur complement S = A11 - A10 * A00.inv * A01
        """

        if nested:
            A00, A01, A10, A11 = list_split_mixed_ops
            K0, K1 = list_split_trace_ops
            broken_residual = self.broken_residual.split()
            split_broken_res = [
                AssembledVector(broken_residual[self.vidx]),
                AssembledVector(broken_residual[self.pidx])
            ]

            # inner schur complement
            S = (A11 - A10 * A00.inv * A01)
            # K * block1
            K_Ainv_block1 = [K0, -K0 * A00.inv * A01 + K1]
            # K * block1 * block2
            K_Ainv_block2 = [
                K_Ainv_block1[0] * A00.inv, K_Ainv_block1[1] * S.inv
            ]
            # K * block1 * block2 * block3
            K_Ainv_block3 = [
                K_Ainv_block2[0] - K_Ainv_block2[1] * A10 * A00.inv,
                K_Ainv_block2[1]
            ]
            # K * block1 * block2 * block3 * broken residual
            schur_rhs = (K_Ainv_block3[0] * split_broken_res[0] +
                         K_Ainv_block3[1] * split_broken_res[1])
            # K * block1 * block2 * block3 * K.T
            schur_comp = K_Ainv_block3[0] * K0.T + K_Ainv_block3[1] * K1.T
        else:
            schur_rhs = K * Atilde.inv * AssembledVector(self.broken_residual)
            schur_comp = K * Atilde.inv * K.T
        return schur_rhs, schur_comp
コード例 #2
0
    def _reconstruction_calls(self):
        """This generates the reconstruction calls for the unknowns using the
        Lagrange multipliers.
        """
        from firedrake import assemble

        # We always eliminate the velocity block first
        id0, id1 = (self.vidx, self.pidx)

        # TODO: When PyOP2 is able to write into mixed dats,
        # the reconstruction expressions can simplify into
        # one clean expression.

        # reuse work from trace operator build
        A, B, C, _ = self.schur_builder.list_split_mixed_ops
        K_0, K_1 = self.schur_builder.list_split_trace_ops
        Ahat = self.schur_builder.A00_inv_hat
        S = self.schur_builder.inner_S

        # Split functions and reconstruct each bit separately
        split_residual = self.broken_residual.split()
        split_sol = self.broken_solution.split()
        g = AssembledVector(split_residual[id0])
        f = AssembledVector(split_residual[id1])
        sigma = split_sol[id0]
        u = split_sol[id1]
        lambdar = AssembledVector(self.trace_solution)

        R = K_1.T - C * Ahat * K_0.T
        rhs = f - C * Ahat * g - R * lambdar
        if self.schur_builder.schur_approx or self.schur_builder.jacobi_S:
            Shat = self.schur_builder.inner_S_approx_inv_hat
            if self.schur_builder.preonly_S:
                S = Shat
            else:
                S = Shat * S
                rhs = Shat * rhs

        u_rec = S.solve(rhs, decomposition="PartialPivLU")
        self._sub_unknown = functools.partial(
            assemble,
            u_rec,
            tensor=u,
            form_compiler_parameters=self.ctx.fc_params,
            assembly_type="residual")

        sigma_rec = A.solve(g - B * AssembledVector(u) - K_0.T * lambdar,
                            decomposition="PartialPivLU")
        self._elim_unknown = functools.partial(
            assemble,
            sigma_rec,
            tensor=sigma,
            form_compiler_parameters=self.ctx.fc_params,
            assembly_type="residual")
コード例 #3
0
    def _reconstruction_calls(self, list_split_mixed_ops,
                              list_split_trace_ops):
        """This generates the reconstruction calls for the unknowns using the
        Lagrange multipliers.

        :arg split_mixed_op: a ``dict`` of split forms that make up the broken
                             mixed operator from the original problem.
        :arg split_trace_op: a ``dict`` of split forms that make up the trace
                             contribution in the hybridized mixed system.
        """
        from firedrake import assemble

        # We always eliminate the velocity block first
        id0, id1 = (self.vidx, self.pidx)

        # TODO: When PyOP2 is able to write into mixed dats,
        # the reconstruction expressions can simplify into
        # one clean expression.
        A, B, C, D = list_split_mixed_ops
        K_0, K_1 = list_split_trace_ops

        # Split functions and reconstruct each bit separately
        split_residual = self.broken_residual.split()
        split_sol = self.broken_solution.split()
        g = AssembledVector(split_residual[id0])
        f = AssembledVector(split_residual[id1])
        sigma = split_sol[id0]
        u = split_sol[id1]
        lambdar = AssembledVector(self.trace_solution)

        M = D - C * A.inv * B
        R = K_1.T - C * A.inv * K_0.T
        u_rec = M.solve(f - C * A.inv * g - R * lambdar,
                        decomposition="PartialPivLU")
        self._sub_unknown = functools.partial(
            assemble,
            u_rec,
            tensor=u,
            form_compiler_parameters=self.ctx.fc_params,
            assembly_type="residual")

        sigma_rec = A.solve(g - B * AssembledVector(u) - K_0.T * lambdar,
                            decomposition="PartialPivLU")
        self._elim_unknown = functools.partial(
            assemble,
            sigma_rec,
            tensor=sigma,
            form_compiler_parameters=self.ctx.fc_params,
            assembly_type="residual")
コード例 #4
0
    def build_schur(self, rhs):
        """The Schur complement in the operators of the trace solve contains
        the inverse on a mixed system.  Users may want this inverse to be treated
        with another Schur complement.

        Let the mixed matrix Atilde be called A here.
        Then, if a nested schur complement is requested, the inverse of Atilde
        is rewritten with help of a a Schur decomposition as follows.

        .. code-block:: text

                A.inv = [[I, -A00.inv * A01]    *   [[A00.inv, 0    ]   *   [[I,             0]
                        [0,  I             ]]       [0,        S.inv]]      [-A10* A00.inv,  I]]
                        --------------------        -----------------       -------------------
                        block1                      block2                  block3
                with the (inner) schur complement S = A11 - A10 * A00.inv * A01
        """

        if self.nested:
            _, A01, A10, _ = self.list_split_mixed_ops
            K0, K1 = self.list_split_trace_ops
            broken_residual = rhs.split()
            R = [
                AssembledVector(broken_residual[self.vidx]),
                AssembledVector(broken_residual[self.pidx])
            ]
            # K * block1
            K_Ainv_block1 = [K0, -K0 * self.A00_inv_hat * A01 + K1]
            # K * block1 * block2
            K_Ainv_block2 = [
                K_Ainv_block1[0] * self.A00_inv_hat,
                K_Ainv_block1[1] * self.inner_S_inv_hat
            ]
            # K * block1 * block2 * block3
            K_Ainv_block3 = [
                K_Ainv_block2[0] - K_Ainv_block2[1] * A10 * self.A00_inv_hat,
                K_Ainv_block2[1]
            ]
            # K * block1 * block2 * block3 * broken residual
            schur_rhs = (K_Ainv_block3[0] * R[0] + K_Ainv_block3[1] * R[1])
            # K * block1 * block2 * block3 * K.T
            schur_comp = K_Ainv_block3[0] * K0.T + K_Ainv_block3[1] * K1.T
        else:
            schur_rhs = self.K * self.Atilde.inv * AssembledVector(rhs)
            schur_comp = self.K * self.Atilde.inv * self.K.T
        return schur_rhs, schur_comp
コード例 #5
0
ファイル: preconditioners.py プロジェクト: lrshan22/firedrake
    def _reconstruction_calls(self, split_mixed_op, split_trace_op):
        """This generates the reconstruction calls for the unknowns using the
        Lagrange multipliers.

        :arg split_mixed_op: a ``dict`` of split forms that make up the broken
                             mixed operator from the original problem.
        :arg split_trace_op: a ``dict`` of split forms that make up the trace
                             contribution in the hybridized mixed system.
        """
        from firedrake.assemble import create_assembly_callable

        # We always eliminate the velocity block first
        id0, id1 = (self.vidx, self.pidx)

        # TODO: When PyOP2 is able to write into mixed dats,
        # the reconstruction expressions can simplify into
        # one clean expression.
        A = Tensor(split_mixed_op[(id0, id0)])
        B = Tensor(split_mixed_op[(id0, id1)])
        C = Tensor(split_mixed_op[(id1, id0)])
        D = Tensor(split_mixed_op[(id1, id1)])
        K_0 = Tensor(split_trace_op[(0, id0)])
        K_1 = Tensor(split_trace_op[(0, id1)])

        # Split functions and reconstruct each bit separately
        split_residual = self.broken_residual.split()
        split_sol = self.broken_solution.split()
        g = AssembledVector(split_residual[id0])
        f = AssembledVector(split_residual[id1])
        sigma = split_sol[id0]
        u = split_sol[id1]
        lambdar = AssembledVector(self.trace_solution)

        M = D - C * A.inv * B
        R = K_1.T - C * A.inv * K_0.T
        u_rec = M.inv * (f - C * A.inv * g - R * lambdar)
        self._sub_unknown = create_assembly_callable(u_rec,
                                                     tensor=u,
                                                     form_compiler_parameters=self.cxt.fc_params)

        sigma_rec = A.inv * (g - B * AssembledVector(u) - K_0.T * lambdar)
        self._elim_unknown = create_assembly_callable(sigma_rec,
                                                      tensor=sigma,
                                                      form_compiler_parameters=self.cxt.fc_params)
コード例 #6
0
ファイル: preconditioners.py プロジェクト: lrshan22/firedrake
def create_schur_nullspace(P, forward, V, V_d, TraceSpace, comm):
    """Gets the nullspace vectors corresponding to the Schur complement
    system for the multipliers.

    :arg P: The mixed operator from the ImplicitMatrixContext.
    :arg forward: A Slate expression denoting the forward elimination
                  operator.
    :arg V: The original "unbroken" space.
    :arg V_d: The broken space.
    :arg TraceSpace: The space of approximate traces.

    Returns: A nullspace (if there is one) for the Schur-complement system.
    """
    from firedrake import assemble, Function, project, AssembledVector

    nullspace = P.getNullSpace()
    if nullspace.handle == 0:
        # No nullspace
        return None

    vecs = nullspace.getVecs()
    tmp = Function(V)
    tmp_b = Function(V_d)
    tnsp_tmp = Function(TraceSpace)
    forward_action = forward * AssembledVector(tmp_b)
    new_vecs = []
    for v in vecs:
        with tmp.dat.vec_wo as t:
            v.copy(t)

        project(tmp, tmp_b)
        assemble(forward_action, tensor=tnsp_tmp)
        with tnsp_tmp.dat.vec_ro as v:
            new_vecs.append(v.copy())

    # Normalize
    for v in new_vecs:
        v.normalize()
    schur_nullspace = PETSc.NullSpace().create(vectors=new_vecs, comm=comm)

    return schur_nullspace
コード例 #7
0
    def initialize(self, pc):
        """Set up the problem context. Take the original
        mixed problem and reformulate the problem as a
        hybridized mixed system.

        A KSP is created for the Lagrange multiplier system.
        """
        from firedrake import (FunctionSpace, Function, Constant,
                               TrialFunction, TrialFunctions, TestFunction,
                               DirichletBC)
        from firedrake.assemble import (allocate_matrix,
                                        create_assembly_callable)
        from firedrake.formmanipulation import split_form
        from ufl.algorithms.replace import replace

        # Extract the problem context
        prefix = pc.getOptionsPrefix() + "hybridization_"
        _, P = pc.getOperators()
        self.ctx = P.getPythonContext()

        if not isinstance(self.ctx, ImplicitMatrixContext):
            raise ValueError(
                "The python context must be an ImplicitMatrixContext")

        test, trial = self.ctx.a.arguments()

        V = test.function_space()
        mesh = V.mesh()

        if len(V) != 2:
            raise ValueError("Expecting two function spaces.")

        if all(Vi.ufl_element().value_shape() for Vi in V):
            raise ValueError("Expecting an H(div) x L2 pair of spaces.")

        # Automagically determine which spaces are vector and scalar
        for i, Vi in enumerate(V):
            if Vi.ufl_element().sobolev_space().name == "HDiv":
                self.vidx = i
            else:
                assert Vi.ufl_element().sobolev_space().name == "L2"
                self.pidx = i

        # Create the space of approximate traces.
        W = V[self.vidx]
        if W.ufl_element().family() == "Brezzi-Douglas-Marini":
            tdegree = W.ufl_element().degree()

        else:
            try:
                # If we have a tensor product element
                h_deg, v_deg = W.ufl_element().degree()
                tdegree = (h_deg - 1, v_deg - 1)

            except TypeError:
                tdegree = W.ufl_element().degree() - 1

        TraceSpace = FunctionSpace(mesh, "HDiv Trace", tdegree)

        # Break the function spaces and define fully discontinuous spaces
        broken_elements = ufl.MixedElement(
            [ufl.BrokenElement(Vi.ufl_element()) for Vi in V])
        V_d = FunctionSpace(mesh, broken_elements)

        # Set up the functions for the original, hybridized
        # and schur complement systems
        self.broken_solution = Function(V_d)
        self.broken_residual = Function(V_d)
        self.trace_solution = Function(TraceSpace)
        self.unbroken_solution = Function(V)
        self.unbroken_residual = Function(V)

        shapes = (V[self.vidx].finat_element.space_dimension(),
                  np.prod(V[self.vidx].shape))
        domain = "{[i,j]: 0 <= i < %d and 0 <= j < %d}" % shapes
        instructions = """
        for i, j
            w[i,j] = w[i,j] + 1
        end
        """
        self.weight = Function(V[self.vidx])
        par_loop((domain, instructions),
                 ufl.dx, {"w": (self.weight, INC)},
                 is_loopy_kernel=True)

        instructions = """
        for i, j
            vec_out[i,j] = vec_out[i,j] + vec_in[i,j]/w[i,j]
        end
        """
        self.average_kernel = (domain, instructions)

        # Create the symbolic Schur-reduction:
        # Original mixed operator replaced with "broken"
        # arguments
        arg_map = {test: TestFunction(V_d), trial: TrialFunction(V_d)}
        Atilde = Tensor(replace(self.ctx.a, arg_map))
        gammar = TestFunction(TraceSpace)
        n = ufl.FacetNormal(mesh)
        sigma = TrialFunctions(V_d)[self.vidx]

        if mesh.cell_set._extruded:
            Kform = (gammar('+') * ufl.jump(sigma, n=n) * ufl.dS_h +
                     gammar('+') * ufl.jump(sigma, n=n) * ufl.dS_v)
        else:
            Kform = (gammar('+') * ufl.jump(sigma, n=n) * ufl.dS)

        # Here we deal with boundaries. If there are Neumann
        # conditions (which should be enforced strongly for
        # H(div)xL^2) then we need to add jump terms on the exterior
        # facets. If there are Dirichlet conditions (which should be
        # enforced weakly) then we need to zero out the trace
        # variables there as they are not active (otherwise the hybrid
        # problem is not well-posed).

        # If boundary conditions are contained in the ImplicitMatrixContext:
        if self.ctx.row_bcs:
            # Find all the subdomains with neumann BCS
            # These are Dirichlet BCs on the vidx space
            neumann_subdomains = set()
            for bc in self.ctx.row_bcs:
                if bc.function_space().index == self.pidx:
                    raise NotImplementedError(
                        "Dirichlet conditions for scalar variable not supported. Use a weak bc"
                    )
                if bc.function_space().index != self.vidx:
                    raise NotImplementedError(
                        "Dirichlet bc set on unsupported space.")
                # append the set of sub domains
                subdom = bc.sub_domain
                if isinstance(subdom, str):
                    neumann_subdomains |= set([subdom])
                else:
                    neumann_subdomains |= set(
                        as_tuple(subdom, numbers.Integral))

            # separate out the top and bottom bcs
            extruded_neumann_subdomains = neumann_subdomains & {
                "top", "bottom"
            }
            neumann_subdomains = neumann_subdomains - extruded_neumann_subdomains

            integrand = gammar * ufl.dot(sigma, n)
            measures = []
            trace_subdomains = []
            if mesh.cell_set._extruded:
                ds = ufl.ds_v
                for subdomain in sorted(extruded_neumann_subdomains):
                    measures.append({
                        "top": ufl.ds_t,
                        "bottom": ufl.ds_b
                    }[subdomain])
                trace_subdomains.extend(
                    sorted({"top", "bottom"} - extruded_neumann_subdomains))
            else:
                ds = ufl.ds
            if "on_boundary" in neumann_subdomains:
                measures.append(ds)
            else:
                measures.extend((ds(sd) for sd in sorted(neumann_subdomains)))
                markers = [int(x) for x in mesh.exterior_facets.unique_markers]
                dirichlet_subdomains = set(markers) - neumann_subdomains
                trace_subdomains.extend(sorted(dirichlet_subdomains))

            for measure in measures:
                Kform += integrand * measure

            trace_bcs = [
                DirichletBC(TraceSpace, Constant(0.0), subdomain)
                for subdomain in trace_subdomains
            ]

        else:
            # No bcs were provided, we assume weak Dirichlet conditions.
            # We zero out the contribution of the trace variables on
            # the exterior boundary. Extruded cells will have both
            # horizontal and vertical facets
            trace_subdomains = ["on_boundary"]
            if mesh.cell_set._extruded:
                trace_subdomains.extend(["bottom", "top"])
            trace_bcs = [
                DirichletBC(TraceSpace, Constant(0.0), subdomain)
                for subdomain in trace_subdomains
            ]

        # Make a SLATE tensor from Kform
        K = Tensor(Kform)

        # Assemble the Schur complement operator and right-hand side
        self.schur_rhs = Function(TraceSpace)
        self._assemble_Srhs = create_assembly_callable(
            K * Atilde.inv * AssembledVector(self.broken_residual),
            tensor=self.schur_rhs,
            form_compiler_parameters=self.ctx.fc_params)

        mat_type = PETSc.Options().getString(prefix + "mat_type", "aij")

        schur_comp = K * Atilde.inv * K.T
        self.S = allocate_matrix(schur_comp,
                                 bcs=trace_bcs,
                                 form_compiler_parameters=self.ctx.fc_params,
                                 mat_type=mat_type,
                                 options_prefix=prefix)
        self._assemble_S = create_assembly_callable(
            schur_comp,
            tensor=self.S,
            bcs=trace_bcs,
            form_compiler_parameters=self.ctx.fc_params,
            mat_type=mat_type)

        with timed_region("HybridOperatorAssembly"):
            self._assemble_S()

        Smat = self.S.petscmat

        nullspace = self.ctx.appctx.get("trace_nullspace", None)
        if nullspace is not None:
            nsp = nullspace(TraceSpace)
            Smat.setNullSpace(nsp.nullspace(comm=pc.comm))

        # Set up the KSP for the system of Lagrange multipliers
        trace_ksp = PETSc.KSP().create(comm=pc.comm)
        trace_ksp.setOptionsPrefix(prefix)
        trace_ksp.setOperators(Smat)
        trace_ksp.setUp()
        trace_ksp.setFromOptions()
        self.trace_ksp = trace_ksp

        split_mixed_op = dict(split_form(Atilde.form))
        split_trace_op = dict(split_form(K.form))

        # Generate reconstruction calls
        self._reconstruction_calls(split_mixed_op, split_trace_op)
コード例 #8
0
    def initialize(self, pc):
        """Set up the problem context. Take the original
        mixed problem and reformulate the problem as a
        hybridized mixed system.

        A KSP is created for the Lagrange multiplier system.
        """
        from firedrake import (FunctionSpace, Function, Constant,
                               TrialFunction, TrialFunctions, TestFunction,
                               DirichletBC, assemble)
        from firedrake.assemble import (allocate_matrix,
                                        create_assembly_callable)
        from firedrake.formmanipulation import split_form
        from ufl.algorithms.replace import replace

        # Extract the problem context
        prefix = pc.getOptionsPrefix() + "hybridization_"
        _, P = pc.getOperators()
        self.cxt = P.getPythonContext()

        if not isinstance(self.cxt, ImplicitMatrixContext):
            raise ValueError("The python context must be an ImplicitMatrixContext")

        test, trial = self.cxt.a.arguments()

        V = test.function_space()
        mesh = V.mesh()

        if len(V) != 2:
            raise ValueError("Expecting two function spaces.")

        if all(Vi.ufl_element().value_shape() for Vi in V):
            raise ValueError("Expecting an H(div) x L2 pair of spaces.")

        # Automagically determine which spaces are vector and scalar
        for i, Vi in enumerate(V):
            if Vi.ufl_element().sobolev_space().name == "HDiv":
                self.vidx = i
            else:
                assert Vi.ufl_element().sobolev_space().name == "L2"
                self.pidx = i

        # Create the space of approximate traces.
        W = V[self.vidx]
        if W.ufl_element().family() == "Brezzi-Douglas-Marini":
            tdegree = W.ufl_element().degree()

        else:
            try:
                # If we have a tensor product element
                h_deg, v_deg = W.ufl_element().degree()
                tdegree = (h_deg - 1, v_deg - 1)

            except TypeError:
                tdegree = W.ufl_element().degree() - 1

        TraceSpace = FunctionSpace(mesh, "HDiv Trace", tdegree)

        # Break the function spaces and define fully discontinuous spaces
        broken_elements = ufl.MixedElement([ufl.BrokenElement(Vi.ufl_element()) for Vi in V])
        V_d = FunctionSpace(mesh, broken_elements)

        # Set up the functions for the original, hybridized
        # and schur complement systems
        self.broken_solution = Function(V_d)
        self.broken_residual = Function(V_d)
        self.trace_solution = Function(TraceSpace)
        self.unbroken_solution = Function(V)
        self.unbroken_residual = Function(V)

        # Set up the KSP for the hdiv residual projection
        hdiv_mass_ksp = PETSc.KSP().create(comm=pc.comm)
        hdiv_mass_ksp.setOptionsPrefix(prefix + "hdiv_residual_")

        # HDiv mass operator
        p = TrialFunction(V[self.vidx])
        q = TestFunction(V[self.vidx])
        mass = ufl.dot(p, q)*ufl.dx
        # TODO: Bcs?
        M = assemble(mass, bcs=None, form_compiler_parameters=self.cxt.fc_params)
        M.force_evaluation()
        Mmat = M.petscmat

        hdiv_mass_ksp.setOperators(Mmat)
        hdiv_mass_ksp.setUp()
        hdiv_mass_ksp.setFromOptions()
        self.hdiv_mass_ksp = hdiv_mass_ksp

        # Storing the result of A.inv * r, where A is the HDiv
        # mass matrix and r is the HDiv residual
        self._primal_r = Function(V[self.vidx])

        tau = TestFunction(V_d[self.vidx])
        self._assemble_broken_r = create_assembly_callable(
            ufl.dot(self._primal_r, tau)*ufl.dx,
            tensor=self.broken_residual.split()[self.vidx],
            form_compiler_parameters=self.cxt.fc_params)

        # Create the symbolic Schur-reduction:
        # Original mixed operator replaced with "broken"
        # arguments
        arg_map = {test: TestFunction(V_d),
                   trial: TrialFunction(V_d)}
        Atilde = Tensor(replace(self.cxt.a, arg_map))
        gammar = TestFunction(TraceSpace)
        n = ufl.FacetNormal(mesh)
        sigma = TrialFunctions(V_d)[self.vidx]

        if mesh.cell_set._extruded:
            Kform = (gammar('+') * ufl.dot(sigma, n) * ufl.dS_h +
                     gammar('+') * ufl.dot(sigma, n) * ufl.dS_v)
        else:
            Kform = (gammar('+') * ufl.dot(sigma, n) * ufl.dS)

        # Here we deal with boundaries. If there are Neumann
        # conditions (which should be enforced strongly for
        # H(div)xL^2) then we need to add jump terms on the exterior
        # facets. If there are Dirichlet conditions (which should be
        # enforced weakly) then we need to zero out the trace
        # variables there as they are not active (otherwise the hybrid
        # problem is not well-posed).

        # If boundary conditions are contained in the ImplicitMatrixContext:
        if self.cxt.row_bcs:
            # Find all the subdomains with neumann BCS
            # These are Dirichlet BCs on the vidx space
            neumann_subdomains = set()
            for bc in self.cxt.row_bcs:
                if bc.function_space().index == self.pidx:
                    raise NotImplementedError("Dirichlet conditions for scalar variable not supported. Use a weak bc")
                if bc.function_space().index != self.vidx:
                    raise NotImplementedError("Dirichlet bc set on unsupported space.")
                # append the set of sub domains
                subdom = bc.sub_domain
                if isinstance(subdom, str):
                    neumann_subdomains |= set([subdom])
                else:
                    neumann_subdomains |= set(as_tuple(subdom, int))

            # separate out the top and bottom bcs
            extruded_neumann_subdomains = neumann_subdomains & {"top", "bottom"}
            neumann_subdomains = neumann_subdomains.difference(extruded_neumann_subdomains)

            integrand = gammar * ufl.dot(sigma, n)
            measures = []
            trace_subdomains = []
            if mesh.cell_set._extruded:
                ds = ufl.ds_v
                for subdomain in extruded_neumann_subdomains:
                    measures.append({"top": ufl.ds_t, "bottom": ufl.ds_b}[subdomain])
                trace_subdomains.extend(sorted({"top", "bottom"} - extruded_neumann_subdomains))
            else:
                ds = ufl.ds
            if "on_boundary" in neumann_subdomains:
                measures.append(ds)
            else:
                measures.append(ds(tuple(neumann_subdomains)))
                dirichlet_subdomains = set(mesh.exterior_facets.unique_markers) - neumann_subdomains
                trace_subdomains.append(sorted(dirichlet_subdomains))

            for measure in measures:
                Kform += integrand*measure

            trace_bcs = [DirichletBC(TraceSpace, Constant(0.0), subdomain) for subdomain in trace_subdomains]

        else:
            # No bcs were provided, we assume weak Dirichlet conditions.
            # We zero out the contribution of the trace variables on
            # the exterior boundary. Extruded cells will have both
            # horizontal and vertical facets
            trace_subdomains = ["on_boundary"]
            if mesh.cell_set._extruded:
                trace_subdomains.extend(["bottom", "top"])
            trace_bcs = [DirichletBC(TraceSpace, Constant(0.0), subdomain) for subdomain in trace_subdomains]

        # Make a SLATE tensor from Kform
        K = Tensor(Kform)

        # Assemble the Schur complement operator and right-hand side
        self.schur_rhs = Function(TraceSpace)
        self._assemble_Srhs = create_assembly_callable(
            K * Atilde.inv * AssembledVector(self.broken_residual),
            tensor=self.schur_rhs,
            form_compiler_parameters=self.cxt.fc_params)

        schur_comp = K * Atilde.inv * K.T
        self.S = allocate_matrix(schur_comp, bcs=trace_bcs,
                                 form_compiler_parameters=self.cxt.fc_params)
        self._assemble_S = create_assembly_callable(schur_comp,
                                                    tensor=self.S,
                                                    bcs=trace_bcs,
                                                    form_compiler_parameters=self.cxt.fc_params)

        self._assemble_S()
        self.S.force_evaluation()
        Smat = self.S.petscmat

        # Nullspace for the multiplier problem
        nullspace = create_schur_nullspace(P, -K * Atilde,
                                           V, V_d, TraceSpace,
                                           pc.comm)
        if nullspace:
            Smat.setNullSpace(nullspace)

        # Set up the KSP for the system of Lagrange multipliers
        trace_ksp = PETSc.KSP().create(comm=pc.comm)
        trace_ksp.setOptionsPrefix(prefix)
        trace_ksp.setOperators(Smat)
        trace_ksp.setUp()
        trace_ksp.setFromOptions()
        self.trace_ksp = trace_ksp

        split_mixed_op = dict(split_form(Atilde.form))
        split_trace_op = dict(split_form(K.form))

        # Generate reconstruction calls
        self._reconstruction_calls(split_mixed_op, split_trace_op)

        # NOTE: The projection stage *might* be replaced by a Fortin
        # operator. We may want to allow the user to specify if they
        # wish to use a Fortin operator over a projection, or vice-versa.
        # In a future add-on, we can add a switch which chooses either
        # the Fortin reconstruction or the usual KSP projection.

        # Set up the projection KSP
        hdiv_projection_ksp = PETSc.KSP().create(comm=pc.comm)
        hdiv_projection_ksp.setOptionsPrefix(prefix + 'hdiv_projection_')

        # Reuse the mass operator from the hdiv_mass_ksp
        hdiv_projection_ksp.setOperators(Mmat)

        # Construct the RHS for the projection stage
        self._projection_rhs = Function(V[self.vidx])
        self._assemble_projection_rhs = create_assembly_callable(
            ufl.dot(self.broken_solution.split()[self.vidx], q)*ufl.dx,
            tensor=self._projection_rhs,
            form_compiler_parameters=self.cxt.fc_params)

        # Finalize ksp setup
        hdiv_projection_ksp.setUp()
        hdiv_projection_ksp.setFromOptions()
        self.hdiv_projection_ksp = hdiv_projection_ksp
コード例 #9
0
    def _slate_expressions(self):
        """Returns all the relevant Slate expressions
        for the static condensation and local recovery
        procedures.
        """
        # This operator has the form:
        # | A  B  C |
        # | D  E  F |
        # | G  H  J |
        # NOTE: It is often the case that D = B.T,
        # G = C.T, H = F.T, and J = 0, but we're not making
        # that assumption here.
        _O = Tensor(self.cxt.a)
        O = _O.blocks

        # Extract sub-block:
        # | A B |
        # | D E |
        # which has block row indices (0, 1) and block
        # column indices (0, 1) as well.
        M = O[:2, :2]

        # Extract sub-block:
        # | C |
        # | F |
        # which has block row indices (0, 1) and block
        # column indices (2,)
        K = O[:2, 2]

        # Extract sub-block:
        # | G H |
        # which has block row indices (2,) and block column
        # indices (0, 1)
        L = O[2, :2]

        # And the final block J has block row-column
        # indices (2, 2)
        J = O[2, 2]

        # Schur complement for traces
        S = J - L * M.inv * K

        # Create mixed function for residual computation.
        # This projects the non-trace residual bits into
        # the trace space:
        # -L * M.inv * | v1 v2 |^T
        _R = AssembledVector(self.residual)
        R = _R.blocks
        v1v2 = R[:2]
        v3 = R[2]
        r_lambda = v3 - L * M.inv * v1v2

        # Reconstruction expressions
        q_h, u_h, lambda_h = self.solution.split()

        # Local tensors needed for reconstruction
        A = O[0, 0]
        B = O[0, 1]
        C = O[0, 2]
        D = O[1, 0]
        E = O[1, 1]
        F = O[1, 2]
        Se = E - D * A.inv * B
        Sf = F - D * A.inv * C

        v1, v2, v3 = self.residual.split()

        # Solve locally using Cholesky factorizations
        # (Se and A are symmetric positive definite)
        u_h_expr = Se.solve(AssembledVector(v2) -
                            D * A.inv * AssembledVector(v1) -
                            Sf * AssembledVector(lambda_h),
                            decomposition="LLT")

        q_h_expr = A.solve(AssembledVector(v1) - B * AssembledVector(u_h) -
                           C * AssembledVector(lambda_h),
                           decomposition="LLT")

        return (S, r_lambda, u_h_expr, q_h_expr)
コード例 #10
0
    def initialize(self, pc):
        """Set up the problem context. Take the original
        H1-problem and partition the spaces/functions
        into 'interior' and 'facet' parts.

        A KSP is created for the reduced system after
        static condensation is applied.
        """
        from firedrake import (FunctionSpace, Function, TrialFunction,
                               TestFunction)
        from firedrake.assemble import (allocate_matrix,
                                        create_assembly_callable)
        from ufl.algorithms.replace import replace

        # Extract python context
        prefix = pc.getOptionsPrefix() + "static_condensation_"
        _, P = pc.getOperators()
        self.cxt = P.getPythonContext()

        if not isinstance(self.cxt, ImplicitMatrixContext):
            raise ValueError("Context must be an ImplicitMatrixContext")

        test, trial = self.cxt.a.arguments()
        V = test.function_space()
        mesh = V.mesh()

        if len(V) > 1:
            raise ValueError("Cannot use this PC for mixed problems.")

        if V.ufl_element().sobolev_space().name != "H1":
            raise ValueError("Expecting an H1-conforming element.")

        if not V.ufl_element().cell().is_simplex():
            raise NotImplementedError("Only simplex meshes are implemented.")

        top_dim = V.finat_element._element.ref_el.get_dimension()
        if not V.finat_element.entity_dofs()[top_dim][0]:
            raise RuntimeError("There are no interior dofs to eliminate.")

        # We decompose the space into an interior part and facet part
        interior_element = V.ufl_element()["interior"]
        facet_element = V.ufl_element()["facet"]
        V_int = FunctionSpace(mesh, interior_element)
        V_facet = FunctionSpace(mesh, facet_element)

        # Get transfer kernel for moving data
        self._transfer_kernel = get_transfer_kernels({
            'h1-space': V,
            'interior-space': V_int,
            'facet-space': V_facet
        })

        # Set up functions for the H1 functions and the interior/trace parts
        self.trace_solution = Function(V_facet)
        self.interior_solution = Function(V_int)
        self.h1_solution = Function(V)
        self.h1_residual = Function(V)
        self.interior_residual = Function(V_int)
        self.trace_residual = Function(V_facet)

        # TODO: Handle strong bcs in Slate
        if self.cxt.row_bcs:
            raise NotImplementedError("Strong bcs not implemented yet")

        self.bcs = None

        A00 = Tensor(
            replace(self.cxt.a, {
                test: TestFunction(V_int),
                trial: TrialFunction(V_int)
            }))
        A01 = Tensor(
            replace(self.cxt.a, {
                test: TestFunction(V_int),
                trial: TrialFunction(V_facet)
            }))
        A10 = Tensor(
            replace(self.cxt.a, {
                test: TestFunction(V_facet),
                trial: TrialFunction(V_int)
            }))
        A11 = Tensor(
            replace(self.cxt.a, {
                test: TestFunction(V_facet),
                trial: TrialFunction(V_facet)
            }))

        # Schur complement operator
        S = A11 - A10 * A00.inv * A01
        self.S = allocate_matrix(S,
                                 bcs=self.bcs,
                                 form_compiler_parameters=self.cxt.fc_params)
        self._assemble_S = create_assembly_callable(
            S,
            tensor=self.S,
            bcs=self.bcs,
            form_compiler_parameters=self.cxt.fc_params)

        self._assemble_S()
        Smat = self.S.petscmat

        # Nullspace for the reduced system
        nullspace = create_sc_nullspace(P, V, V_facet, pc.comm)

        if nullspace:
            Smat.setNullSpace(nullspace)

        # Set up KSP for the reduced problem
        sc_ksp = PETSc.KSP().create(comm=pc.comm)
        sc_ksp.setOptionsPrefix(prefix)
        sc_ksp.setOperators(Smat)
        sc_ksp.setUp()
        sc_ksp.setFromOptions()
        self.sc_ksp = sc_ksp

        # Set up rhs for the reduced problem
        F0 = AssembledVector(self.interior_residual)
        self.sc_rhs = Function(V_facet)
        self.sc_rhs_thunk = Function(V_facet)
        self._assemble_sc_rhs_thunk = create_assembly_callable(
            -A10 * A00.inv * F0,
            tensor=self.sc_rhs_thunk,
            form_compiler_parameters=self.cxt.fc_params)

        # Reconstruction calls
        u_facet = AssembledVector(self.trace_solution)
        self._assemble_interior_u = create_assembly_callable(
            A00.inv * (F0 - A01 * u_facet),
            tensor=self.interior_solution,
            form_compiler_parameters=self.cxt.fc_params)