Exemplo n.º 1
0
    def __init__(self, space):
        """
        Initialise limiter

        :param space : FunctionSpace instance
        """

        self.P1DG = space
        self.P1CG = FunctionSpace(self.P1DG.mesh(), 'CG',
                                  1)  # for min/max limits
        self.P0 = FunctionSpace(self.P1DG.mesh(), 'DG', 0)  # for centroids

        # Storage containers for cell means, max and mins
        self.centroids = Function(self.P0)
        self.max_field = Function(self.P1CG)
        self.min_field = Function(self.P1CG)

        self.centroid_solver = self._construct_centroid_solver()

        # Update min and max loop
        self._min_max_loop = """
for(int i = 0; i < maxq.dofs; i++) {
    maxq[i][0] = fmax(maxq[i][0],q[0][0]);
    minq[i][0] = fmin(minq[i][0],q[0][0]);
}
                             """
        # Perform limiting loop
        self._limit_kernel = """
Exemplo n.º 2
0
    def __init__(self, equation):
        """
        Initialise limiter

        :param space : equation, as we need the broken space attached to it
        """

        self.Vt = equation.space
        # check this is the right space, only currently working for 2D extruded mesh
        if self.Vt.extruded and self.Vt.mesh().topological_dimension() == 2:
            # check that horizontal degree is 1 and vertical degree is 2
            if self.Vt.ufl_element().degree()[0] is not 1 or \
               self.Vt.ufl_element().degree()[1] is not 2:
                raise ValueError('This is not the right limiter for this space.')
            # check that continuity of the spaces is correct
            # this will fail if the space does not use broken elements
            if self.Vt.ufl_element()._element.sobolev_space()[0].name is not 'L2' or \
               self.Vt.ufl_element()._element.sobolev_space()[1].name is not 'H1':
                raise ValueError('This is not the right limiter for this space.')
        else:
            logger.warning('This limiter may not work for the space you are using.')

        self.Q1DG = FunctionSpace(self.Vt.mesh(), 'DG', 1)  # space with only vertex DOFs
        self.vertex_limiter = VertexBasedLimiter(self.Q1DG)
        self.theta_hat = Function(self.Q1DG)  # theta function with only vertex DOFs
        self.w = Function(self.Vt)
        self.result = Function(self.Vt)
        par_loop(_weight_kernel, dx, {"weight": (self.w, INC)})
    def __init__(self, space):
        """
        Initialise limiter

        :param space : FunctionSpace instance
        """

        self.P1DG = space
        self.P1CG = FunctionSpace(self.P1DG.mesh(), 'CG', 1)  # for min/max limits
        self.P0 = FunctionSpace(self.P1DG.mesh(), 'DG', 0)  # for centroids

        # Storage containers for cell means, max and mins
        self.centroids = Function(self.P0)
        self.max_field = Function(self.P1CG)
        self.min_field = Function(self.P1CG)

        self.centroid_solver = self._construct_centroid_solver()

        # Update min and max loop
        self._min_max_loop = """
for(int i = 0; i < maxq.dofs; i++) {
    maxq[i][0] = fmax(maxq[i][0],q[0][0]);
    minq[i][0] = fmin(minq[i][0],q[0][0]);
}
                             """
        # Perform limiting loop
        self._limit_kernel = """
Exemplo n.º 4
0
def test_ForcingCovariance_init(fs):
    "test init method of ForcingCovariance"

    # note: tests only handle case of a single process
    # need to think about testing more complicated cases

    # simple example in 1D

    sigma = np.log(1.)
    l = np.log(0.1)

    fc = ForcingCovariance(fs, sigma, l)

    n = Function(fs).vector().size()
    n_local = Function(fs).vector().local_size()

    M = PETSc.Mat().create()
    M.setSizes(((n_local, -1), (n_local, -1)))
    M.setFromOptions()
    M.setUp()
    start, end = M.getOwnershipRange()

    assert fc.nx == n
    assert fc.nx_local == n_local
    assert fc.function_space == fs
    assert_allclose(fc.sigma, sigma)
    assert_allclose(fc.l, l)
    assert_allclose(fc.cutoff, 1.e-3)
    assert_allclose(fc.regularization, 1.e-8)
    assert fc.local_startind == start
    assert fc.local_endind == end
    assert not fc.is_assembled
Exemplo n.º 5
0
    def predict_mean(self, coords, scale_mean=True):
        r"""
        Compute the predictive mean

        This method computes the predictive mean of data values at unmeasured locations. It returns
        the vector of predicted sensor values on the root process as numpy array. It requires only a
        small overhead above the computational work of finding the posterior mean (i.e. you get
        the mean value at new sensor locations for "free" once you have solved the posterior).

        The optional ``scale_mean`` argument determines if the solution is to be re-scaled
        by the model discrepancy scaling factor. This value is by default ``True``.
        To re-scale to match the FEM solution, pass ``scale_mean=False``.

        :param coords: Spatial coordinates at which the mean will be predicted. Must be a
                       2D Numpy array (or a 1D array, which will assume the second axis has length
                       1)
        :type coords: ndarray
        :param scale_mean: Boolean indicating if the mean should be scaled by the model
                           discrepancy scaling factor. Optional, default is ``True``
        :type scale_mean: bool
        :returns: FEM prediction at specified sensor locations as a numpy array on the root process.
                  All other processes will have a numpy array of length 0.
        :rtype: ndarray
        """

        if not isinstance(bool(scale_mean), bool):
            raise TypeError("scale_mean argument must be boolean-like")

        coords = np.array(coords, dtype=np.float64)
        if coords.ndim == 1:
            coords = np.reshape(coords, (-1, 1))
        assert coords.ndim == 2, "coords must be a 1d or 2d array"
        assert coords.shape[1] == self.data.get_n_dim(
        ), "axis 1 of coords must be the same length as the FEM dimension"

        if self.Cu is None:
            self.solve_prior()

        if self.params is None:
            raise ValueError("must set parameter values to make predictions")

        rho = np.exp(self.params[0])

        if scale_mean:
            scalefact = rho
        else:
            scalefact = 1.

        x = Function(self.G.function_space)

        self.solve_posterior(x)

        im = InterpolationMatrix(self.G.function_space, coords)

        mu = scalefact * im.interp_mesh_to_data(x.vector())

        im.destroy()

        return mu
def test_InterpolationMatrix(fs, coords):
    "test InterpolationMatrix with multiple processes"

    n_proc = COMM_WORLD.size

    nd = len(coords)

    vec = Function(fs).vector()

    im = InterpolationMatrix(fs, coords)

    if COMM_WORLD.rank == 0:
        gathered_sizes = (nd, nd)
    else:
        gathered_sizes = (0, 0)

    assert im.n_data == nd
    assert im.n_data_local == nd // n_proc
    assert im.n_mesh == vec.size()
    assert im.n_mesh_local == vec.local_size()
    assert_allclose(im.coords, coords)
    assert im.function_space == fs
    assert im.meshspace_vector.size() == vec.size()
    assert im.meshspace_vector.local_size() == vec.local_size()
    assert im.dataspace_distrib.getSizes() == (nd // n_proc, nd)
    assert im.dataspace_gathered.getSizes() == gathered_sizes
    assert im.interp.getSizes() == ((vec.local_size(), vec.size()),
                                    (nd // n_proc, nd))
    assert not im.is_assembled
Exemplo n.º 7
0
def solve_forcing_covariance(G, ls, rhs):
    """
    Solve the forcing covariance part of the stat FEM

    This function performs the basic solve needed to determine the
    prior covariance matrix in the stat-fem method. Two solves of 
    the FEM are required, in addition to a sparse matrix multiplication.
    The arguments provide the sparse Forcing Covariance matrix,
    the Firedrake Linear Solver object representing the FEM, and
    the RHS to be solved for this particular iteration.

    Note that this solve function temporarily turns off the BCs for
    the stiffness matrix. This is because Dirichlet BCs will enforce
    strong boundary conditions on the FEM solves, which is not desired
    here.

    :param G: Forcing covariance matrix to be used in the solve.
    :type G: ForcingCovariance
    :param ls: Firedrake Linear Solver to be used in the solve.
    :type ls: Firedrake LinearSolver
    :param rhs: RHS vector to be used in the solve
    :type rhs: Firedrake Vector
    :returns: Solution to :math:`A^{-1}GA^{-1}b` where :math:`A` is the FEM
              stiffness matrix and :math:`b` is the RHS vector.
    :rtype: Firedrake Vector
    """

    if not isinstance(G, ForcingCovariance):
        raise TypeError("G must be a ForcingCovariance object")
    if not isinstance(ls, LinearSolver):
        raise TypeError("ls must be a firedrake LinearSolver")
    if not isinstance(rhs, Vector):
        raise TypeError("rhs must be a firedrake vector")

    # turn off BC application temporarily

    bcs = ls.A.bcs
    ls.A.bcs = None
    
    rhs_working = rhs.copy()
    x = Function(G.function_space).vector()
    ls.solve(x, rhs_working)
    G.mult(x, rhs_working)
    ls.solve(x, rhs_working)

    # turn BCs back on

    ls.A.bcs = bcs
    
    return x.copy()
Exemplo n.º 8
0
def test_solve_forcing_covariance(comm, fs, A, b, fc, A_numpy, cov):
    "test solve_forcing_covariance"

    rhs = Function(fs).vector()
    rhs.set_local(np.ones(fc.get_nx_local()))

    ls = LinearSolver(A)

    result = solve_forcing_covariance(fc, ls, rhs)

    result_actual = result.gather()

    result_expected = np.linalg.solve(A_numpy, np.ones(nx + 1))
    result_expected = np.dot(cov, result_expected)
    result_expected = np.linalg.solve(A_numpy, result_expected)

    assert_allclose(result_expected, result_actual, atol=1.e-10)
    def __init__(self, space):
        """
        Initialise limiter

        :param space : FunctionSpace instance
        """

        self.P1DG = space
        self.P1CG = FunctionSpace(self.P1DG.mesh(), 'CG', 1)  # for min/max limits
        self.P0 = FunctionSpace(self.P1DG.mesh(), 'DG', 0)  # for centroids

        # Storage containers for cell means, max and mins
        self.centroids = Function(self.P0)
        self.centroids_rhs = Function(self.P0)
        self.max_field = Function(self.P1CG)
        self.min_field = Function(self.P1CG)

        self.centroid_solver = self._construct_centroid_solver()

        # Update min and max loop
        domain = "{[i]: 0 <= i < maxq.dofs}"
        instructions = """
        for i
            maxq[i] = fmax(maxq[i], q[0])
            minq[i] = fmin(minq[i], q[0])
        end
        """
        self._min_max_loop = (domain, instructions)

        # Perform limiting loop
        domain = "{[i, ii]: 0 <= i < q.dofs and 0 <= ii < q.dofs}"
        instructions = """
        <float64> alpha = 1
        <float64> qavg = qbar[0, 0]
        for i
            <float64> _alpha1 = fmin(alpha, fmin(1, (qmax[i] - qavg)/(q[i] - qavg)))
            <float64> _alpha2 = fmin(alpha, fmin(1, (qavg - qmin[i])/(qavg - q[i])))
            alpha = if(q[i] > qavg, _alpha1, if(q[i] < qavg, _alpha2, alpha))
        end
        for ii
            q[ii] = qavg + alpha * (q[ii] - qavg)
        end
        """
        self._limit_kernel = (domain, instructions)
Exemplo n.º 10
0
    def solve_prior(self):
        r"""
        Solve base (prior) FEM plus covariance interpolated to the data locations

        This method solves the prior FEM and covariance interpolated to the sensor locations.
        It does not require setting parameter values, as the model discrepancy does not
        influence these results. The covariance is cached as it is expensive to compute
        and is re-used in all other solves.

        In addition to caching the results, the method returns solution as numpy arrays
        on the root process (rank 0).

        Note that unlike the solve done in the meshspace, this uses a return value rather than a
        Firedrake/PETSc style interface to place the solution in a pre-allocated ``Function``.
        This is because each process has a different array size, so would require correctly
        pre-allocating arrays of different lengths on each process.

        :returns: FEM prior mean and covariance (as a tuple of numpy arrays) on the root process.
                  Non-root processes return numpy arrays of shape ``(0,)`` (mean) and ``(0, 0)``
                  (covariance).
        :rtype: tuple of ndarrays
        """

        # form interpolated prior covariance across all ensemble processes

        self.Cu = interp_covariance_to_data(self.im, self.G, self.solver,
                                            self.im, self.ensemble_comm)

        # solve base FEM (prior mean) and interpolate to data space on root

        self.x = Function(self.G.function_space)

        if self.ensemble_comm.rank == 0:
            self.solver.solve(self.x, self.b)
            self.mu = self.im.interp_mesh_to_data(self.x.vector())
        else:
            self.mu = np.zeros(0)

        return self.mu, self.Cu
Exemplo n.º 11
0
def test_ForcingCovariance_mult(fs, fc, cov):
    "test the multiplication method of ForcingCovariance"

    fc.assemble()

    x = Function(fs).vector()
    x.set_local(np.ones(x.local_size()))

    y = Function(fs).vector()

    fc.mult(x, y)

    ygathered = y.gather()

    assert_allclose(ygathered, np.dot(cov, np.ones(nx + 1)))
Exemplo n.º 12
0
    def __init__(self, space):
        """
        Initialise limiter

        :param space : FunctionSpace instance
        """

        if utils.complex_mode:
            raise ValueError(
                "We haven't decided what limiting complex valued fields means. Please get in touch if you have need."
            )

        self.P1DG = space
        self.P1CG = FunctionSpace(self.P1DG.mesh(), 'CG',
                                  1)  # for min/max limits
        self.P0 = FunctionSpace(self.P1DG.mesh(), 'DG', 0)  # for centroids

        # Storage containers for cell means, max and mins
        self.centroids = Function(self.P0)
        self.centroids_rhs = Function(self.P0)
        self.max_field = Function(self.P1CG)
        self.min_field = Function(self.P1CG)

        self.centroid_solver = self._construct_centroid_solver()

        # Update min and max loop
        domain = "{[i]: 0 <= i < maxq.dofs}"
        instructions = """
        for i
            maxq[i] = fmax(maxq[i], q[0])
            minq[i] = fmin(minq[i], q[0])
        end
        """
        self._min_max_loop = (domain, instructions)

        # Perform limiting loop
        domain = "{[i, ii]: 0 <= i < q.dofs and 0 <= ii < q.dofs}"
        instructions = """
        <float64> alpha = 1
        <float64> qavg = qbar[0, 0]
        for i
            <float64> _alpha1 = fmin(alpha, fmin(1, (qmax[i] - qavg)/(q[i] - qavg)))
            <float64> _alpha2 = fmin(alpha, fmin(1, (qavg - qmin[i])/(qavg - q[i])))
            alpha = _alpha1 if q[i] > qavg else (_alpha2 if q[i] < qavg else  alpha)
        end
        for ii
            q[ii] = qavg + alpha * (q[ii] - qavg)
        end
        """
        self._limit_kernel = (domain, instructions)
Exemplo n.º 13
0
    def new_snes_ctx(pc, op, bcs, mat_type, fcp=None):
        """ Create a new SNES contex for nested preconditioning
        """
        from firedrake.variational_solver import NonlinearVariationalProblem
        from firedrake.function import Function
        from firedrake.ufl_expr import action
        from firedrake.dmhooks import get_appctx
        from firedrake.solving_utils import _SNESContext

        dm = pc.getDM()
        old_appctx = get_appctx(dm).appctx
        u = Function(op.arguments()[-1].function_space())
        F = action(op, u)
        nprob = NonlinearVariationalProblem(F,
                                            u,
                                            bcs=bcs,
                                            J=op,
                                            form_compiler_parameters=fcp)
        nctx = _SNESContext(nprob, mat_type, mat_type, old_appctx)
        return nctx
Exemplo n.º 14
0
 def func(self, *args, **kwargs):
     if len(args) > 0 and isinstance(
             args[0], FunctionSpace):
         # Extract size from V
         if 'size' in kwargs.keys():
             raise TypeError(
                 "Cannot specify 'size' when generating a random function from 'V'"
             )
         V = args[0]
         f = Function(V)
         args = args[1:]
         with f.dat.vec_wo as v:
             kwargs['size'] = (v.local_size, )
             v.array[:] = getattr(self, c_a)(*args,
                                             **kwargs)
         return f
     else:
         # forward to the original implementation
         return getattr(super(_Wrapper, self),
                        c_a)(*args, **kwargs)
Exemplo n.º 15
0
def test_solve_forcing_covariance_parallel(my_ensemble, comm, fs, A, b, fc,
                                           A_numpy, cov):
    "test that solve_forcing_covariance can be called independently from an ensemble process"

    if my_ensemble.ensemble_comm.rank == 0:

        rhs = Function(fs).vector()
        rhs.set_local(np.ones(fc.get_nx_local()))

        ls = LinearSolver(A)

        result = solve_forcing_covariance(fc, ls, rhs)

        result_actual = result.gather()

        result_expected = np.linalg.solve(A_numpy, np.ones(nx + 1))
        result_expected = np.dot(cov, result_expected)
        result_expected = np.linalg.solve(A_numpy, result_expected)

        assert_allclose(result_expected, result_actual, atol=1.e-10)

    elif my_ensemble.ensemble_comm.rank == 1:

        rhs = Function(fs).vector()
        rhs.set_local(0.5 * np.ones(fc.get_nx_local()))

        ls = LinearSolver(A)

        result = solve_forcing_covariance(fc, ls, rhs)

        result_actual = result.gather()

        result_expected = np.linalg.solve(A_numpy, 0.5 * np.ones(nx + 1))
        result_expected = np.dot(cov, result_expected)
        result_expected = np.linalg.solve(A_numpy, result_expected)

        assert_allclose(result_expected, result_actual, atol=1.e-10)
Exemplo n.º 16
0
    def __init__(self, spaces, name=None):
        """
        :param spaces: a list (or tuple) of :class:`FunctionSpace`\s

        The function space may be created as ::

            V = MixedFunctionSpace(spaces)

        ``spaces`` may consist of multiple occurances of the same space: ::

            P1  = FunctionSpace(mesh, "CG", 1)
            P2v = VectorFunctionSpace(mesh, "Lagrange", 2)

            ME  = MixedFunctionSpace([P2v, P1, P1, P1])
        """

        if self._initialized:
            return
        self._spaces = [
            IndexedFunctionSpace(s, i, self)
            for i, s in enumerate(flatten(spaces))
        ]
        self._mesh = self._spaces[0].mesh()
        self._ufl_element = ufl.MixedElement(
            *[fs.ufl_element() for fs in self._spaces])
        self.name = name or '_'.join(str(s.name) for s in self._spaces)
        self.rank = 1
        self._index = None
        self._initialized = True
        dm = PETSc.DMShell().create()
        from firedrake.function import Function
        with Function(self).dat.vec_ro as v:
            dm.setGlobalVector(v.duplicate())
        dm.setAttr('__fs__', weakref.ref(self))
        dm.setCreateFieldDecomposition(self.create_field_decomp)
        dm.setCreateSubDM(self.create_subdm)
        self._dm = dm
        self._ises = self.dof_dset.field_ises
        self._subspaces = []
Exemplo n.º 17
0
    def __init__(self, space):
        """
        Initialise limiter

        :param space : FunctionSpace instance
        """

        self.P1DG = space
        self.P1CG = FunctionSpace(self.P1DG.mesh(), 'CG',
                                  1)  # for min/max limits
        self.P0 = FunctionSpace(self.P1DG.mesh(), 'DG', 0)  # for centroids

        # Storage containers for cell means, max and mins
        self.centroids = Function(self.P0)
        self.centroids_rhs = Function(self.P0)
        self.max_field = Function(self.P1CG)
        self.min_field = Function(self.P1CG)

        self.centroid_solver = self._construct_centroid_solver()

        # Update min and max loop
        domain = "{[i]: 0 <= i < maxq.dofs}"
        instructions = """
        for i
            maxq[i] = fmax(maxq[i], q[0])
            minq[i] = fmin(minq[i], q[0])
        end
        """
        self._min_max_loop = (domain, instructions)

        # Perform limiting loop
        domain = "{[i, ii]: 0 <= i < q.dofs and 0 <= ii < q.dofs}"
        instructions = """
        <float64> alpha = 1
        <float64> qavg = qbar[0, 0]
        for i
            <float64> _alpha1 = fmin(alpha, fmin(1, (qmax[i] - qavg)/(q[i] - qavg)))
            <float64> _alpha2 = fmin(alpha, fmin(1, (qavg - qmin[i])/(qavg - q[i])))
            alpha = if(q[i] > qavg, _alpha1, if(q[i] < qavg, _alpha2, alpha))
        end
        for ii
            q[ii] = qavg + alpha * (q[ii] - qavg)
        end
        """
        self._limit_kernel = (domain, instructions)
def test_InterpolationMatrix_interp_mesh_to_data(fs, coords, meshcoords):
    "test method to interpolate from distributed mesh to data gathered at root"

    # simple 1D test

    nd = len(coords)

    im = InterpolationMatrix(fs, coords)
    im.assemble()

    input_ordered = np.array([3., 2., 7., 4., 0., 0., 2., 1., 1., 1., 5.])

    f = Function(fs).vector()

    meshcoords_ordered = np.linspace(0., 1., 11)

    with f.dat.vec as vec:
        imin, imax = vec.getOwnershipRange()
        for i in range(imin, imax):
            vec.setValue(
                i,
                input_ordered[np.where(meshcoords_ordered == meshcoords[i])])

    if COMM_WORLD.rank == 0:
        expected = np.array([1., 0., 5.5, 3.25])
    else:
        expected = np.zeros(0)

    out = im.interp_mesh_to_data(f)

    assert_allclose(out, expected, atol=1.e-10)

    # failure due to bad input sizes

    mesh2 = UnitIntervalMesh(12)
    V2 = FunctionSpace(mesh2, "CG", 1)

    f2 = Function(V2).vector()
    f2.set_local(np.ones(f2.local_size()))

    with pytest.raises(AssertionError):
        im.interp_mesh_to_data(f2)

    im.destroy()
Exemplo n.º 19
0
class ThetaLimiter(object):
    """
    A vertex based limiter for fields in the DG1xCG2 space,
    i.e. temperature variables. This acts like the vertex-based
    limiter implemented in Firedrake, but in addition corrects
    the central nodes to prevent new maxima or minima forming.
    """

    def __init__(self, equation):
        """
        Initialise limiter

        :param space : equation, as we need the broken space attached to it
        """

        self.Vt = equation.space
        # check this is the right space, only currently working for 2D extruded mesh
        if self.Vt.extruded and self.Vt.mesh().topological_dimension() == 2:
            # check that horizontal degree is 1 and vertical degree is 2
            if self.Vt.ufl_element().degree()[0] is not 1 or \
               self.Vt.ufl_element().degree()[1] is not 2:
                raise ValueError('This is not the right limiter for this space.')
            # check that continuity of the spaces is correct
            # this will fail if the space does not use broken elements
            if self.Vt.ufl_element()._element.sobolev_space()[0].name is not 'L2' or \
               self.Vt.ufl_element()._element.sobolev_space()[1].name is not 'H1':
                raise ValueError('This is not the right limiter for this space.')
        else:
            logger.warning('This limiter may not work for the space you are using.')

        self.Q1DG = FunctionSpace(self.Vt.mesh(), 'DG', 1)  # space with only vertex DOFs
        self.vertex_limiter = VertexBasedLimiter(self.Q1DG)
        self.theta_hat = Function(self.Q1DG)  # theta function with only vertex DOFs
        self.w = Function(self.Vt)
        self.result = Function(self.Vt)
        par_loop(_weight_kernel, dx, {"weight": (self.w, INC)})

    def copy_vertex_values(self, field):
        """
        Copies the vertex values from temperature space to
        Q1DG space which only has vertices.
        """
        par_loop(_copy_into_Q1DG_loop, dx,
                 {"theta": (field, READ),
                  "theta_hat": (self.theta_hat, RW)})

    def copy_vertex_values_back(self, field):
        """
        Copies the vertex values back from the Q1DG space to
        the original temperature space.
        """
        par_loop(_copy_from_Q1DG_loop, dx,
                 {"theta": (field, RW),
                  "theta_hat": (self.theta_hat, READ)})

    def check_midpoint_values(self, field):
        """
        Checks the midpoint field values are less than the maximum
        and more than the minimum values. Amends them to the average
        if they are not.
        """
        par_loop(_check_midpoint_values_loop, dx,
                 {"theta": (field, RW)})

    def remap_to_embedded_space(self, field):
        """
        Remap from DG space to embedded DG space.
        """

        self.result.assign(0.)
        par_loop(_average_kernel, dx, {"vrec": (self.result, INC),
                                       "v_b": (field, READ),
                                       "weight": (self.w, READ)})
        field.assign(self.result)

    def apply(self, field):
        """
        The application of the limiter to the theta-space field.
        """
        assert field.function_space() == self.Vt, \
            'Given field does not belong to this objects function space'

        self.copy_vertex_values(field)
        self.vertex_limiter.apply(self.theta_hat)
        self.copy_vertex_values_back(field)
        self.check_midpoint_values(field)
        self.remap_to_embedded_space(field)
Exemplo n.º 20
0
def test_ForcingCovariance_get_nx_local(fs, fc):
    "test the get_nx_local method of ForcingCovariance"

    n_local = Function(fs).vector().local_size()

    assert fc.get_nx_local() == n_local
class VertexBasedLimiter(Limiter):
    """
    A vertex based limiter for P1DG fields.

    This limiter implements the vertex-based limiting scheme described in
    Dmitri Kuzmin, "A vertex-based hierarchical slope limiter for p-adaptive
    discontinuous Galerkin methods". J. Comp. Appl. Maths (2010)
    http://dx.doi.org/10.1016/j.cam.2009.05.028
    """

    def __init__(self, space):
        """
        Initialise limiter

        :param space : FunctionSpace instance
        """

        self.P1DG = space
        self.P1CG = FunctionSpace(self.P1DG.mesh(), 'CG', 1)  # for min/max limits
        self.P0 = FunctionSpace(self.P1DG.mesh(), 'DG', 0)  # for centroids

        # Storage containers for cell means, max and mins
        self.centroids = Function(self.P0)
        self.centroids_rhs = Function(self.P0)
        self.max_field = Function(self.P1CG)
        self.min_field = Function(self.P1CG)

        self.centroid_solver = self._construct_centroid_solver()

        # Update min and max loop
        domain = "{[i]: 0 <= i < maxq.dofs}"
        instructions = """
        for i
            maxq[i] = fmax(maxq[i], q[0])
            minq[i] = fmin(minq[i], q[0])
        end
        """
        self._min_max_loop = (domain, instructions)

        # Perform limiting loop
        domain = "{[i, ii]: 0 <= i < q.dofs and 0 <= ii < q.dofs}"
        instructions = """
        <float64> alpha = 1
        <float64> qavg = qbar[0, 0]
        for i
            <float64> _alpha1 = fmin(alpha, fmin(1, (qmax[i] - qavg)/(q[i] - qavg)))
            <float64> _alpha2 = fmin(alpha, fmin(1, (qavg - qmin[i])/(qavg - q[i])))
            alpha = if(q[i] > qavg, _alpha1, if(q[i] < qavg, _alpha2, alpha))
        end
        for ii
            q[ii] = qavg + alpha * (q[ii] - qavg)
        end
        """
        self._limit_kernel = (domain, instructions)

    def _construct_centroid_solver(self):
        """
        Constructs a linear problem for computing the centroids

        :return: LinearSolver instance
        """
        u = TrialFunction(self.P0)
        v = TestFunction(self.P0)
        a = assemble(u * v * dx)
        return LinearSolver(a, solver_parameters={'ksp_type': 'preonly',
                                                  'pc_type': 'bjacobi',
                                                  'sub_pc_type': 'ilu'})

    def _update_centroids(self, field):
        """
        Update centroid values
        """
        assemble(TestFunction(self.P0) * field * dx, tensor=self.centroids_rhs)
        self.centroid_solver.solve(self.centroids, self.centroids_rhs)

    def compute_bounds(self, field):
        """
        Only computes min and max bounds of neighbouring cells
        """
        self._update_centroids(field)
        self.max_field.assign(-1.0e10)  # small number
        self.min_field.assign(1.0e10)  # big number

        par_loop(self._min_max_loop,
                 dx,
                 {"maxq": (self.max_field, MAX),
                  "minq": (self.min_field, MIN),
                  "q": (self.centroids, READ)},
                 is_loopy_kernel=True)

    def apply_limiter(self, field):
        """
        Only applies limiting loop on the given field
        """
        par_loop(self._limit_kernel, dx,
                 {"qbar": (self.centroids, READ),
                  "q": (field, RW),
                  "qmax": (self.max_field, READ),
                  "qmin": (self.min_field, READ)},
                 is_loopy_kernel=True)

    def apply(self, field):
        """
        Re-computes centroids and applies limiter to given field
        """
        assert field.function_space() == self.P1DG, \
            'Given field does not belong to this objects function space'

        self.compute_bounds(field)
        self.apply_limiter(field)
Exemplo n.º 22
0
    def from_meshmode(self, mm_field, out=None):
        r"""
        Transport meshmode field from :attr:`discr` into an
        appropriate firedrake function space.

        If *out* is *None*, values at any firedrake
        nodes associated to NO meshmode nodes are zeroed out.
        If *out* is supplied, values at nodes associated to NO meshmode nodes
        are not modified.

        :arg mm_field: Either

            * A :class:`~meshmode.dof_array.DOFArray` representing
              a field of shape *tuple()* on :attr:`discr`
            * A :class:`numpy.ndarray` of dtype "object" with
              entries of class :class:`~meshmode.dof_array.DOFArray`
              representing a field of shape *mm_field.shape*
              on :attr:`discr`

            See :class:`~meshmode.dof.DOFArray` for further requirements.
            The :attr:`group_nr` entry of each
            :class:`~meshmode.dof_array.DOFArray`
            must be of shape *(nelements, nunit_dofs)* and
            the *element_dtype* must match that used for
            :class:`firedrake.function.Function`\ s

        :arg out: If *None* then ignored, otherwise a
            :class:`firedrake.function.Function`
            of the right function space for the transported data
            to be stored in. The shape of its function space must
            match the shape of *mm_field*

        :return: a :class:`firedrake.function.Function` holding the transported
            data (*out*, if *out* was not *None*)
        """
        # All firedrake functions are the same dtype
        dtype = self.firedrake_fspace().mesh().coordinates.dat.data.dtype
        self._validate_field(mm_field, "mm_field", dtype=dtype)

        # get the shape of mm_field
        from meshmode.dof_array import DOFArray
        if not isinstance(mm_field, DOFArray):
            fspace_shape = mm_field.shape
        else:
            fspace_shape = ()

        # make sure out is a firedrake function in an appropriate
        # function space
        if out is not None:
            self._validate_function(out, "out", fspace_shape, dtype)
        else:
            from firedrake.function import Function
            # Translate shape so that don't always get a TensorFunctionSpace,
            # but instead get FunctionSpace or VectorFunctionSpace when
            # reasonable
            shape = fspace_shape
            if shape == ():
                shape = None
            elif len(shape) == 1:
                shape = shape[0]
            # make a function filled with zeros
            out = Function(self.firedrake_fspace(shape))
            out.dat.data[:] = 0.0

        out_data = out.dat.data
        # Handle firedrake dropping dimensions
        if len(out.dat.data.shape) != 1 + len(fspace_shape):
            shape = (out.dat.data.shape[0],) + fspace_shape
            out_data = out_data.reshape(shape)

        def resample_and_reorder(fd_data, dof_array):
            # pull data into numpy
            dof_np = dof_array.array_context.to_numpy(dof_array[self.group_nr])
            # resample the data and store in firedrake ordering
            # store resampled data in firedrake ordering
            fd_data[self.mm2fd_node_mapping] = \
                np.einsum("ij,kj->ik", dof_np, self._resampling_mat_mm2fd)

        # If scalar, just reorder and resample out
        if fspace_shape == ():
            resample_and_reorder(out_data, mm_field)
        else:
            # otherwise, have to grab each dofarray and the corresponding
            # data from *function_data*
            for multi_index in np.ndindex(fspace_shape):
                # have to be careful to take view and not copy
                index = (np.s_[:],) + multi_index
                fd_data = out_data[index]
                dof_array = mm_field[multi_index]
                resample_and_reorder(fd_data, dof_array)

        return out
Exemplo n.º 23
0
def test_ForcingCovariance_mult_parallel(my_ensemble, fs, fc, cov):
    "test that the multiplication method of ForcingCovariance can be called independently in an ensemble"

    fc.assemble()

    if my_ensemble.ensemble_comm.rank == 0:

        x = Function(fs).vector()
        x.set_local(np.ones(x.local_size()))

        y = Function(fs).vector()

        fc.mult(x, y)

        ygathered = y.gather()

        assert_allclose(ygathered, np.dot(cov, np.ones(nx + 1)))

    elif my_ensemble.ensemble_comm.rank == 1:

        x = Function(fs).vector()
        x.set_local(0.5 * np.ones(x.local_size()))

        y = Function(fs).vector()

        fc.mult(x, y)

        ygathered = y.gather()

        assert_allclose(ygathered, np.dot(cov, 0.5 * np.ones(nx + 1)))
Exemplo n.º 24
0
class SCPC(SCBase):

    needs_python_pmat = True

    """A Slate-based python preconditioner implementation of
    static condensation for problems with up to three fields.
    """

    @timed_function("SCPCInit")
    def initialize(self, pc):
        """Set up the problem context. This takes the incoming
        three-field system and constructs the static
        condensation operators using Slate expressions.

        A KSP is created for the reduced system. The eliminated
        variables are recovered via back-substitution.
        """

        from firedrake.assemble import (allocate_matrix,
                                        create_assembly_callable)
        from firedrake.bcs import DirichletBC
        from firedrake.function import Function
        from firedrake.functionspace import FunctionSpace
        from firedrake.interpolation import interpolate

        prefix = pc.getOptionsPrefix() + "condensed_field_"
        _, P = pc.getOperators()
        self.cxt = P.getPythonContext()
        if not isinstance(self.cxt, ImplicitMatrixContext):
            raise ValueError("Context must be an ImplicitMatrixContext")

        self.bilinear_form = self.cxt.a

        # Retrieve the mixed function space
        W = self.bilinear_form.arguments()[0].function_space()
        if len(W) > 3:
            raise NotImplementedError("Only supports up to three function spaces.")

        elim_fields = PETSc.Options().getString(pc.getOptionsPrefix()
                                                + "pc_sc_eliminate_fields",
                                                None)
        if elim_fields:
            elim_fields = [int(i) for i in elim_fields.split(',')]
        else:
            # By default, we condense down to the last field in the
            # mixed space.
            elim_fields = [i for i in range(0, len(W) - 1)]

        condensed_fields = list(set(range(len(W))) - set(elim_fields))
        if len(condensed_fields) != 1:
            raise NotImplementedError("Cannot condense to more than one field")

        c_field, = condensed_fields

        # Need to duplicate a space which is NOT
        # associated with a subspace of a mixed space.
        Vc = FunctionSpace(W.mesh(), W[c_field].ufl_element())
        bcs = []
        cxt_bcs = self.cxt.row_bcs
        for bc in cxt_bcs:
            if bc.function_space().index != c_field:
                raise NotImplementedError("Strong BC set on unsupported space")
            if isinstance(bc.function_arg, Function):
                bc_arg = interpolate(bc.function_arg, Vc)
            else:
                # Constants don't need to be interpolated
                bc_arg = bc.function_arg
            bcs.append(DirichletBC(Vc, bc_arg, bc.sub_domain))

        mat_type = PETSc.Options().getString(prefix + "mat_type", "aij")

        self.c_field = c_field
        self.condensed_rhs = Function(Vc)
        self.residual = Function(W)
        self.solution = Function(W)

        # Get expressions for the condensed linear system
        A = Tensor(self.bilinear_form)
        reduced_sys = self.condensed_system(A, self.residual, elim_fields)
        S_expr = reduced_sys.lhs
        r_expr = reduced_sys.rhs

        # Construct the condensed right-hand side
        self._assemble_Srhs = create_assembly_callable(
            r_expr,
            tensor=self.condensed_rhs,
            form_compiler_parameters=self.cxt.fc_params)

        # Allocate and set the condensed operator
        self.S = allocate_matrix(S_expr,
                                 bcs=bcs,
                                 form_compiler_parameters=self.cxt.fc_params,
                                 mat_type=mat_type)
        self._assemble_S = create_assembly_callable(
            S_expr,
            tensor=self.S,
            bcs=bcs,
            form_compiler_parameters=self.cxt.fc_params,
            mat_type=mat_type)

        self._assemble_S()
        self.S.force_evaluation()
        Smat = self.S.petscmat

        # Get nullspace for the condensed operator (if any).
        # This is provided as a user-specified callback which
        # returns the basis for the nullspace.
        nullspace = self.cxt.appctx.get("condensed_field_nullspace", None)
        if nullspace is not None:
            nsp = nullspace(Vc)
            Smat.setNullSpace(nsp.nullspace(comm=pc.comm))

        # Set up ksp for the condensed problem
        c_ksp = PETSc.KSP().create(comm=pc.comm)
        c_ksp.incrementTabLevel(1, parent=pc)
        c_ksp.setOptionsPrefix(prefix)
        c_ksp.setOperators(Smat)
        c_ksp.setUp()
        c_ksp.setFromOptions()
        self.condensed_ksp = c_ksp

        # Set up local solvers for backwards substitution
        self.local_solvers = self.local_solver_calls(A, self.residual,
                                                     self.solution,
                                                     elim_fields)

    def condensed_system(self, A, rhs, elim_fields):
        """Forms the condensed linear system by eliminating
        specified unknowns.

        :arg A: A Slate Tensor containing the mixed bilinear form.
        :arg rhs: A firedrake function for the right-hand side.
        :arg elim_fields: An iterable of field indices to eliminate.
        """

        from firedrake.slate.static_condensation.la_utils import condense_and_forward_eliminate

        return condense_and_forward_eliminate(A, rhs, elim_fields)

    def local_solver_calls(self, A, rhs, x, elim_fields):
        """Provides solver callbacks for inverting local operators
        and reconstructing eliminated fields.

        :arg A: A Slate Tensor containing the mixed bilinear form.
        :arg rhs: A firedrake function for the right-hand side.
        :arg x: A firedrake function for the solution.
        :arg elim_fields: An iterable of eliminated field indices
                          to recover.
        """

        from firedrake.slate.static_condensation.la_utils import backward_solve
        from firedrake.assemble import create_assembly_callable

        fields = x.split()
        systems = backward_solve(A, rhs, x, reconstruct_fields=elim_fields)

        local_solvers = []
        for local_system in systems:
            Ae = local_system.lhs
            be = local_system.rhs
            i, = local_system.field_idx
            local_solve = Ae.solve(be, decomposition="PartialPivLU")
            solve_call = create_assembly_callable(
                local_solve,
                tensor=fields[i],
                form_compiler_parameters=self.cxt.fc_params)
            local_solvers.append(solve_call)

        return local_solvers

    @timed_function("SCPCUpdate")
    def update(self, pc):
        """Update by assembling into the KSP operator. No
        need to reconstruct symbolic objects.
        """

        self._assemble_S()
        self.S.force_evaluation()

    def forward_elimination(self, pc, x):
        """Perform the forward elimination of fields and
        provide the reduced right-hand side for the condensed
        system.

        :arg pc: a Preconditioner instance.
        :arg x: a PETSc vector containing the incoming right-hand side.
        """

        with self.residual.dat.vec_wo as v:
            x.copy(v)

        # Now assemble residual for the reduced problem
        self._assemble_Srhs()

    def sc_solve(self, pc):
        """Solve the condensed linear system for the
        condensed field.

        :arg pc: a Preconditioner instance.
        """

        with self.condensed_rhs.dat.vec_ro as rhs:
            if self.condensed_ksp.getInitialGuessNonzero():
                acc = self.solution.split()[self.c_field].dat.vec
            else:
                acc = self.solution.split()[self.c_field].dat.vec_wo
            with acc as sol:
                self.condensed_ksp.solve(rhs, sol)

    def backward_substitution(self, pc, y):
        """Perform the backwards recovery of eliminated fields.

        :arg pc: a Preconditioner instance.
        :arg y: a PETSc vector for placing the resulting fields.
        """

        # Recover eliminated unknowns
        for local_solver_call in self.local_solvers:
            local_solver_call()

        with self.solution.dat.vec_ro as w:
            w.copy(y)

    def view(self, pc, viewer=None):
        """Viewer calls for the various configurable objects in this PC."""

        viewer.printfASCII("Static condensation preconditioner\n")
        viewer.printfASCII("KSP to solve the reduced system:\n")
        self.condensed_ksp.view(viewer=viewer)
Exemplo n.º 25
0
    def initialize(self, pc):
        """Set up the problem context. This takes the incoming
        three-field system and constructs the static
        condensation operators using Slate expressions.

        A KSP is created for the reduced system. The eliminated
        variables are recovered via back-substitution.
        """

        from firedrake.assemble import (allocate_matrix,
                                        create_assembly_callable)
        from firedrake.bcs import DirichletBC
        from firedrake.function import Function
        from firedrake.functionspace import FunctionSpace
        from firedrake.interpolation import interpolate

        prefix = pc.getOptionsPrefix() + "condensed_field_"
        _, P = pc.getOperators()
        self.cxt = P.getPythonContext()
        if not isinstance(self.cxt, ImplicitMatrixContext):
            raise ValueError("Context must be an ImplicitMatrixContext")

        self.bilinear_form = self.cxt.a

        # Retrieve the mixed function space
        W = self.bilinear_form.arguments()[0].function_space()
        if len(W) > 3:
            raise NotImplementedError("Only supports up to three function spaces.")

        elim_fields = PETSc.Options().getString(pc.getOptionsPrefix()
                                                + "pc_sc_eliminate_fields",
                                                None)
        if elim_fields:
            elim_fields = [int(i) for i in elim_fields.split(',')]
        else:
            # By default, we condense down to the last field in the
            # mixed space.
            elim_fields = [i for i in range(0, len(W) - 1)]

        condensed_fields = list(set(range(len(W))) - set(elim_fields))
        if len(condensed_fields) != 1:
            raise NotImplementedError("Cannot condense to more than one field")

        c_field, = condensed_fields

        # Need to duplicate a space which is NOT
        # associated with a subspace of a mixed space.
        Vc = FunctionSpace(W.mesh(), W[c_field].ufl_element())
        bcs = []
        cxt_bcs = self.cxt.row_bcs
        for bc in cxt_bcs:
            if bc.function_space().index != c_field:
                raise NotImplementedError("Strong BC set on unsupported space")
            if isinstance(bc.function_arg, Function):
                bc_arg = interpolate(bc.function_arg, Vc)
            else:
                # Constants don't need to be interpolated
                bc_arg = bc.function_arg
            bcs.append(DirichletBC(Vc, bc_arg, bc.sub_domain))

        mat_type = PETSc.Options().getString(prefix + "mat_type", "aij")

        self.c_field = c_field
        self.condensed_rhs = Function(Vc)
        self.residual = Function(W)
        self.solution = Function(W)

        # Get expressions for the condensed linear system
        A = Tensor(self.bilinear_form)
        reduced_sys = self.condensed_system(A, self.residual, elim_fields)
        S_expr = reduced_sys.lhs
        r_expr = reduced_sys.rhs

        # Construct the condensed right-hand side
        self._assemble_Srhs = create_assembly_callable(
            r_expr,
            tensor=self.condensed_rhs,
            form_compiler_parameters=self.cxt.fc_params)

        # Allocate and set the condensed operator
        self.S = allocate_matrix(S_expr,
                                 bcs=bcs,
                                 form_compiler_parameters=self.cxt.fc_params,
                                 mat_type=mat_type)
        self._assemble_S = create_assembly_callable(
            S_expr,
            tensor=self.S,
            bcs=bcs,
            form_compiler_parameters=self.cxt.fc_params,
            mat_type=mat_type)

        self._assemble_S()
        self.S.force_evaluation()
        Smat = self.S.petscmat

        # Get nullspace for the condensed operator (if any).
        # This is provided as a user-specified callback which
        # returns the basis for the nullspace.
        nullspace = self.cxt.appctx.get("condensed_field_nullspace", None)
        if nullspace is not None:
            nsp = nullspace(Vc)
            Smat.setNullSpace(nsp.nullspace(comm=pc.comm))

        # Set up ksp for the condensed problem
        c_ksp = PETSc.KSP().create(comm=pc.comm)
        c_ksp.incrementTabLevel(1, parent=pc)
        c_ksp.setOptionsPrefix(prefix)
        c_ksp.setOperators(Smat)
        c_ksp.setUp()
        c_ksp.setFromOptions()
        self.condensed_ksp = c_ksp

        # Set up local solvers for backwards substitution
        self.local_solvers = self.local_solver_calls(A, self.residual,
                                                     self.solution,
                                                     elim_fields)
Exemplo n.º 26
0
class HybridSCPC(PCBase):
    """A Slate-based python preconditioner implementation of
    static condensation for three-field hybridized problems. This
    applies to the mixed-hybrid methods, such as the RT-H and BDM-H
    methods, as well as hybridized-DG discretizations like the LDG-H
    method.
    """

    needs_python_pmat = True

    @timed_function("HybridSCInit")
    def initialize(self, pc):
        """Set up the problem context. This takes the incoming
        three-field hybridized system and constructs the static
        condensation operators using Slate expressions.

        A KSP is created for the reduced system for the Lagrange
        multipliers. The scalar and flux fields are reconstructed
        locally.
        """
        from firedrake.assemble import (allocate_matrix,
                                        create_assembly_callable)
        from firedrake.bcs import DirichletBC
        from firedrake.function import Function
        from firedrake.functionspace import FunctionSpace
        from firedrake.interpolation import interpolate

        prefix = pc.getOptionsPrefix() + "hybrid_sc_"
        _, P = pc.getOperators()
        self.cxt = P.getPythonContext()
        if not isinstance(self.cxt, ImplicitMatrixContext):
            raise ValueError("Context must be an ImplicitMatrixContext")

        # Retrieve the mixed function space, which is expected to
        # be of the form: W = (DG_k)^n \times DG_k \times DG_trace
        W = self.cxt.a.arguments()[0].function_space()
        if len(W) != 3:
            raise RuntimeError("Expecting three function spaces.")

        # Assert a specific ordering of the spaces
        # TODO: Clean this up
        assert W[2].ufl_element().family() == "HDiv Trace"

        # Extract trace space
        T = W[2]

        # Need to duplicate a trace space which is NOT
        # associated with a subspace of a mixed space.
        Tr = FunctionSpace(T.mesh(), T.ufl_element())
        bcs = []
        cxt_bcs = self.cxt.row_bcs
        for bc in cxt_bcs:
            assert bc.function_space() == T, (
                "BCs should be imposing vanishing conditions on traces")
            if isinstance(bc.function_arg, Function):
                bc_arg = interpolate(bc.function_arg, Tr)
            else:
                # Constants don't need to be interpolated
                bc_arg = bc.function_arg
            bcs.append(DirichletBC(Tr, bc_arg, bc.sub_domain))

        mat_type = PETSc.Options().getString(prefix + "mat_type", "aij")

        self.r_lambda = Function(T)
        self.residual = Function(W)
        self.solution = Function(W)

        # Perform symbolics only once
        S_expr, r_lambda_expr, u_h_expr, q_h_expr = self._slate_expressions

        self.S = allocate_matrix(S_expr,
                                 bcs=bcs,
                                 form_compiler_parameters=self.cxt.fc_params,
                                 mat_type=mat_type)
        self._assemble_S = create_assembly_callable(
            S_expr,
            tensor=self.S,
            bcs=bcs,
            form_compiler_parameters=self.cxt.fc_params,
            mat_type=mat_type)

        self._assemble_S()
        Smat = self.S.petscmat

        # Set up ksp for the trace problem
        trace_ksp = PETSc.KSP().create(comm=pc.comm)
        trace_ksp.incrementTabLevel(1, parent=pc)
        trace_ksp.setOptionsPrefix(prefix)
        trace_ksp.setOperators(Smat)
        trace_ksp.setUp()
        trace_ksp.setFromOptions()
        self.trace_ksp = trace_ksp

        self._assemble_Srhs = create_assembly_callable(
            r_lambda_expr,
            tensor=self.r_lambda,
            form_compiler_parameters=self.cxt.fc_params)

        q_h, u_h, lambda_h = self.solution.split()

        # Assemble u_h using lambda_h
        self._assemble_u = create_assembly_callable(
            u_h_expr, tensor=u_h, form_compiler_parameters=self.cxt.fc_params)

        # Recover q_h using both u_h and lambda_h
        self._assemble_q = create_assembly_callable(
            q_h_expr, tensor=q_h, form_compiler_parameters=self.cxt.fc_params)

    @cached_property
    def _slate_expressions(self):
        """Returns all the relevant Slate expressions
        for the static condensation and local recovery
        procedures.
        """
        # This operator has the form:
        # | A  B  C |
        # | D  E  F |
        # | G  H  J |
        # NOTE: It is often the case that D = B.T,
        # G = C.T, H = F.T, and J = 0, but we're not making
        # that assumption here.
        _O = Tensor(self.cxt.a)
        O = _O.blocks

        # Extract sub-block:
        # | A B |
        # | D E |
        # which has block row indices (0, 1) and block
        # column indices (0, 1) as well.
        M = O[:2, :2]

        # Extract sub-block:
        # | C |
        # | F |
        # which has block row indices (0, 1) and block
        # column indices (2,)
        K = O[:2, 2]

        # Extract sub-block:
        # | G H |
        # which has block row indices (2,) and block column
        # indices (0, 1)
        L = O[2, :2]

        # And the final block J has block row-column
        # indices (2, 2)
        J = O[2, 2]

        # Schur complement for traces
        S = J - L * M.inv * K

        # Create mixed function for residual computation.
        # This projects the non-trace residual bits into
        # the trace space:
        # -L * M.inv * | v1 v2 |^T
        _R = AssembledVector(self.residual)
        R = _R.blocks
        v1v2 = R[:2]
        v3 = R[2]
        r_lambda = v3 - L * M.inv * v1v2

        # Reconstruction expressions
        q_h, u_h, lambda_h = self.solution.split()

        # Local tensors needed for reconstruction
        A = O[0, 0]
        B = O[0, 1]
        C = O[0, 2]
        D = O[1, 0]
        E = O[1, 1]
        F = O[1, 2]
        Se = E - D * A.inv * B
        Sf = F - D * A.inv * C

        v1, v2, v3 = self.residual.split()

        # Solve locally using Cholesky factorizations
        # (Se and A are symmetric positive definite)
        u_h_expr = Se.solve(AssembledVector(v2) -
                            D * A.inv * AssembledVector(v1) -
                            Sf * AssembledVector(lambda_h),
                            decomposition="LLT")

        q_h_expr = A.solve(AssembledVector(v1) - B * AssembledVector(u_h) -
                           C * AssembledVector(lambda_h),
                           decomposition="LLT")

        return (S, r_lambda, u_h_expr, q_h_expr)

    @timed_function("HybridSCUpdate")
    def update(self, pc):
        """Update by assembling into the KSP operator. No
        need to reconstruct symbolic objects.
        """
        self._assemble_S()

    def apply(self, pc, x, y):
        """Solve the reduced system for the Lagrange multipliers.
        The system is assembled using operators constructed from
        the Slate expressions in the initialize method of this PC.
        Recovery of the scalar and flux fields are assembled cell-wise
        from Slate expressions describing the local problem.
        """
        with self.residual.dat.vec_wo as v:
            x.copy(v)

        with timed_region("HybridSCRHS"):
            # Now assemble residual for the reduced problem
            self._assemble_Srhs()

        with timed_region("HybridSCSolve"):
            # Solve the system for the Lagrange multipliers
            with self.r_lambda.dat.vec_ro as b:
                if self.trace_ksp.getInitialGuessNonzero():
                    acc = self.solution.split()[2].dat.vec
                else:
                    acc = self.solution.split()[2].dat.vec_wo
                with acc as x_trace:
                    self.trace_ksp.solve(b, x_trace)

        with timed_region("HybridSCReconstruct"):
            # Recover u_h and q_h
            self._assemble_u()
            self._assemble_q()

        with self.solution.dat.vec_ro as w:
            w.copy(y)

    def applyTranspose(self, pc, x, y):
        """Apply the transpose of the preconditioner."""
        raise NotImplementedError("Transpose application is not implemented.")

    def view(self, pc, viewer=None):
        viewer.printfASCII("Hybridized trace preconditioner\n")
        viewer.printfASCII("KSP to solve trace system:\n")
        self.trace_ksp.view(viewer=viewer)
Exemplo n.º 27
0
    def __init__(self,
                 function_space,
                 sigma,
                 l,
                 cutoff=1.e-3,
                 regularization=1.e-8,
                 cov=sqexp):
        """
        Create new forcing covariance

        Creates a new ForcingCovariance object from a function space, parameters, and
        covariance function. Required parameters are the function space and sigma and
        correlation length parameters needed to compute the covariance matrix.

        Note that this just initializes the object, and does not compute the matrix
        entries or assemble the final PETSc matrix. This is done using the ``assemble``
        method, though if you attempt to use an unassembled matrix assembly will
        automatically be done. However the domain decomposition is done here to determine
        the number of DOFs handled by each process.
        """

        # need to investigate parallelization here, load balancing likely to be uneven
        # if we just use the local ownership from the distributed matrix
        # since each row has an uneven amount of work
        # know that we have reduced bandwidth (though unclear if this translates to a low
        # bandwidth of the assembled covariance matrix)

        if not isinstance(function_space, WithGeometry):
            raise TypeError(
                "bad input type for function_space: must be a FunctionSpace")

        self.function_space = function_space

        self.comm = function_space.comm

        # extract mesh and process local information

        self.nx = Function(self.function_space).vector().size()
        self.nx_local = Function(self.function_space).vector().local_size()

        # set parameters and covariance

        assert regularization >= 0., "regularization parameter must be non-negative"

        self.sigma = sigma
        self.l = l
        self.cutoff = cutoff
        self.regularization = regularization
        self.cov = cov

        # get local ownership information of distributed matrix

        vtemp = PETSc.Vec().create(comm=self.comm)
        vtemp.setSizes((self.nx_local, -1))
        vtemp.setFromOptions()
        vtemp.setUp()

        self.local_startind, self.local_endind = vtemp.getOwnershipRange()

        vtemp.destroy()

        self.is_assembled = False

        self.G = None
Exemplo n.º 28
0
    def initialize(self, pc):
        """Set up the problem context. This takes the incoming
        three-field hybridized system and constructs the static
        condensation operators using Slate expressions.

        A KSP is created for the reduced system for the Lagrange
        multipliers. The scalar and flux fields are reconstructed
        locally.
        """
        from firedrake.assemble import (allocate_matrix,
                                        create_assembly_callable)
        from firedrake.bcs import DirichletBC
        from firedrake.function import Function
        from firedrake.functionspace import FunctionSpace
        from firedrake.interpolation import interpolate

        prefix = pc.getOptionsPrefix() + "hybrid_sc_"
        _, P = pc.getOperators()
        self.cxt = P.getPythonContext()
        if not isinstance(self.cxt, ImplicitMatrixContext):
            raise ValueError("Context must be an ImplicitMatrixContext")

        # Retrieve the mixed function space, which is expected to
        # be of the form: W = (DG_k)^n \times DG_k \times DG_trace
        W = self.cxt.a.arguments()[0].function_space()
        if len(W) != 3:
            raise RuntimeError("Expecting three function spaces.")

        # Assert a specific ordering of the spaces
        # TODO: Clean this up
        assert W[2].ufl_element().family() == "HDiv Trace"

        # Extract trace space
        T = W[2]

        # Need to duplicate a trace space which is NOT
        # associated with a subspace of a mixed space.
        Tr = FunctionSpace(T.mesh(), T.ufl_element())
        bcs = []
        cxt_bcs = self.cxt.row_bcs
        for bc in cxt_bcs:
            assert bc.function_space() == T, (
                "BCs should be imposing vanishing conditions on traces")
            if isinstance(bc.function_arg, Function):
                bc_arg = interpolate(bc.function_arg, Tr)
            else:
                # Constants don't need to be interpolated
                bc_arg = bc.function_arg
            bcs.append(DirichletBC(Tr, bc_arg, bc.sub_domain))

        mat_type = PETSc.Options().getString(prefix + "mat_type", "aij")

        self.r_lambda = Function(T)
        self.residual = Function(W)
        self.solution = Function(W)

        # Perform symbolics only once
        S_expr, r_lambda_expr, u_h_expr, q_h_expr = self._slate_expressions

        self.S = allocate_matrix(S_expr,
                                 bcs=bcs,
                                 form_compiler_parameters=self.cxt.fc_params,
                                 mat_type=mat_type)
        self._assemble_S = create_assembly_callable(
            S_expr,
            tensor=self.S,
            bcs=bcs,
            form_compiler_parameters=self.cxt.fc_params,
            mat_type=mat_type)

        self._assemble_S()
        Smat = self.S.petscmat

        # Set up ksp for the trace problem
        trace_ksp = PETSc.KSP().create(comm=pc.comm)
        trace_ksp.incrementTabLevel(1, parent=pc)
        trace_ksp.setOptionsPrefix(prefix)
        trace_ksp.setOperators(Smat)
        trace_ksp.setUp()
        trace_ksp.setFromOptions()
        self.trace_ksp = trace_ksp

        self._assemble_Srhs = create_assembly_callable(
            r_lambda_expr,
            tensor=self.r_lambda,
            form_compiler_parameters=self.cxt.fc_params)

        q_h, u_h, lambda_h = self.solution.split()

        # Assemble u_h using lambda_h
        self._assemble_u = create_assembly_callable(
            u_h_expr, tensor=u_h, form_compiler_parameters=self.cxt.fc_params)

        # Recover q_h using both u_h and lambda_h
        self._assemble_q = create_assembly_callable(
            q_h_expr, tensor=q_h, form_compiler_parameters=self.cxt.fc_params)
Exemplo n.º 29
0
    def __init__(self, mesh, element, name=None, dim=1, rank=0):
        """
        :param mesh: :class:`Mesh` to build this space on
        :param element: :class:`ufl.FiniteElementBase` to build this space from
        :param name: user-defined name for this space
        :param dim: vector space dimension of a :class:`.VectorFunctionSpace`
        :param rank: rank of the space, not the value rank
        """

        self._ufl_element = element

        # Compute the FIAT version of the UFL element above
        self.fiat_element = fiat_utils.fiat_from_ufl_element(element)

        if isinstance(mesh, mesh_t.ExtrudedMesh):
            # Set up some extrusion-specific things
            # The bottom layer maps will come from element_dof_list
            # dof_count is the total number of dofs in the extruded mesh

            # Get the flattened version of the FIAT element
            self.flattened_element = fiat_utils.FlattenedElement(
                self.fiat_element)

            # Compute the number of DoFs per dimension on top/bottom and sides
            entity_dofs = self.fiat_element.entity_dofs()
            top_dim = mesh._plex.getDimension()
            self._xtr_hdofs = [
                len(entity_dofs[(d, 0)][0]) for d in range(top_dim + 1)
            ]
            self._xtr_vdofs = [
                len(entity_dofs[(d, 1)][0]) for d in range(top_dim + 1)
            ]

            # Compute the dofs per column
            self.dofs_per_column = eutils.compute_extruded_dofs(
                self.fiat_element, self.flattened_element.entity_dofs(),
                mesh._layers)

            # Compute the offset for the extrusion process
            self.offset = eutils.compute_offset(
                self.fiat_element.entity_dofs(),
                self.flattened_element.entity_dofs(),
                self.fiat_element.space_dimension())

            # Compute the top and bottom masks to identify boundary dofs
            #
            # Sorting the keys of the closure entity dofs, the whole cell
            # comes last [-1], before that the horizontal facet [-2], before
            # that vertical facets [-3]. We need the horizontal facets here.
            closure_dofs = self.fiat_element.entity_closure_dofs()
            b_mask = closure_dofs[sorted(closure_dofs.keys())[-2]][0]
            t_mask = closure_dofs[sorted(closure_dofs.keys())[-2]][1]
            self.bt_masks = (b_mask, t_mask)  # conversion to tuple

            self.extruded = True

            self._dofs_per_entity = self.dofs_per_column
        else:
            # If not extruded specific, set things to None/False, etc.
            self.offset = None
            self.bt_masks = None
            self.dofs_per_column = np.zeros(1, np.int32)
            self.extruded = False

            entity_dofs = self.fiat_element.entity_dofs()
            self._dofs_per_entity = [
                len(entity[0]) for d, entity in entity_dofs.iteritems()
            ]

        self.name = name
        self._dim = dim
        self._mesh = mesh
        self._index = None

        dm = PETSc.DMShell().create()
        dm.setAttr('__fs__', weakref.ref(self))
        dm.setPointSF(mesh._plex.getPointSF())
        # Create the PetscSection mapping topological entities to DoFs
        sec = mesh._plex.createSection([1],
                                       self._dofs_per_entity,
                                       perm=mesh._plex_renumbering)
        dm.setDefaultSection(sec)
        self._global_numbering = sec
        self._dm = dm
        self._ises = None
        self._halo = halo.Halo(dm)

        # Compute entity class offsets
        self.dof_classes = [0, 0, 0, 0]
        for d in range(mesh._plex.getDimension() + 1):
            ndofs = self._dofs_per_entity[d]
            for i in range(4):
                self.dof_classes[i] += ndofs * mesh._entity_classes[d, i]

        # Tell the DM about the layout of the global vector
        from firedrake.function import Function
        with Function(self).dat.vec_ro as v:
            self._dm.setGlobalVector(v.duplicate())

        self._node_count = self._global_numbering.getStorageSize()

        self.cell_node_list = mesh.create_cell_node_list(
            self._global_numbering, self.fiat_element)

        if mesh._plex.getStratumSize("interior_facets", 1) > 0:
            self.interior_facet_node_list = \
                dmplex.get_facet_nodes(mesh.interior_facets.facet_cell,
                                       self.cell_node_list)
        else:
            self.interior_facet_node_list = np.array([], dtype=np.int32)

        if mesh._plex.getStratumSize("exterior_facets", 1) > 0:
            self.exterior_facet_node_list = \
                dmplex.get_facet_nodes(mesh.exterior_facets.facet_cell,
                                       self.cell_node_list)
        else:
            self.exterior_facet_node_list = np.array([], dtype=np.int32)

        # Note: this is the function space rank. The value rank may be different.
        self.rank = rank

        # Empty map caches. This is a sui generis cache
        # implementation because of the need to support boundary
        # conditions.
        self._cell_node_map_cache = {}
        self._exterior_facet_map_cache = {}
        self._interior_facet_map_cache = {}
Exemplo n.º 30
0
class VertexBasedLimiter(Limiter):
    """
    A vertex based limiter for P1DG fields.

    This limiter implements the vertex-based limiting scheme described in
    Dmitri Kuzmin, "A vertex-based hierarchical slope limiter for p-adaptive
    discontinuous Galerkin methods". J. Comp. Appl. Maths (2010)
    http://dx.doi.org/10.1016/j.cam.2009.05.028
    """
    def __init__(self, space):
        """
        Initialise limiter

        :param space : FunctionSpace instance
        """

        if utils.complex_mode:
            raise ValueError(
                "We haven't decided what limiting complex valued fields means. Please get in touch if you have need."
            )

        self.P1DG = space
        self.P1CG = FunctionSpace(self.P1DG.mesh(), 'CG',
                                  1)  # for min/max limits
        self.P0 = FunctionSpace(self.P1DG.mesh(), 'DG', 0)  # for centroids

        # Storage containers for cell means, max and mins
        self.centroids = Function(self.P0)
        self.centroids_rhs = Function(self.P0)
        self.max_field = Function(self.P1CG)
        self.min_field = Function(self.P1CG)

        self.centroid_solver = self._construct_centroid_solver()

        # Update min and max loop
        domain = "{[i]: 0 <= i < maxq.dofs}"
        instructions = """
        for i
            maxq[i] = fmax(maxq[i], q[0])
            minq[i] = fmin(minq[i], q[0])
        end
        """
        self._min_max_loop = (domain, instructions)

        # Perform limiting loop
        domain = "{[i, ii]: 0 <= i < q.dofs and 0 <= ii < q.dofs}"
        instructions = """
        <float64> alpha = 1
        <float64> qavg = qbar[0, 0]
        for i
            <float64> _alpha1 = fmin(alpha, fmin(1, (qmax[i] - qavg)/(q[i] - qavg)))
            <float64> _alpha2 = fmin(alpha, fmin(1, (qavg - qmin[i])/(qavg - q[i])))
            alpha = _alpha1 if q[i] > qavg else (_alpha2 if q[i] < qavg else  alpha)
        end
        for ii
            q[ii] = qavg + alpha * (q[ii] - qavg)
        end
        """
        self._limit_kernel = (domain, instructions)

    def _construct_centroid_solver(self):
        """
        Constructs a linear problem for computing the centroids

        :return: LinearSolver instance
        """
        u = TrialFunction(self.P0)
        v = TestFunction(self.P0)
        a = assemble(inner(u, v) * dx)
        return LinearSolver(a,
                            solver_parameters={
                                'ksp_type': 'preonly',
                                'pc_type': 'bjacobi',
                                'sub_pc_type': 'ilu'
                            })

    def _update_centroids(self, field):
        """
        Update centroid values
        """
        assemble(inner(field, TestFunction(self.P0)) * dx,
                 tensor=self.centroids_rhs)
        self.centroid_solver.solve(self.centroids, self.centroids_rhs)

    def compute_bounds(self, field):
        """
        Only computes min and max bounds of neighbouring cells
        """
        self._update_centroids(field)
        self.max_field.assign(-1.0e10)  # small number
        self.min_field.assign(1.0e10)  # big number

        par_loop(self._min_max_loop,
                 dx, {
                     "maxq": (self.max_field, MAX),
                     "minq": (self.min_field, MIN),
                     "q": (self.centroids, READ)
                 },
                 is_loopy_kernel=True)

    def apply_limiter(self, field):
        """
        Only applies limiting loop on the given field
        """
        par_loop(self._limit_kernel,
                 dx, {
                     "qbar": (self.centroids, READ),
                     "q": (field, RW),
                     "qmax": (self.max_field, READ),
                     "qmin": (self.min_field, READ)
                 },
                 is_loopy_kernel=True)

    def apply(self, field):
        """
        Re-computes centroids and applies limiter to given field
        """
        assert field.function_space() == self.P1DG, \
            'Given field does not belong to this objects function space'

        self.compute_bounds(field)
        self.apply_limiter(field)
Exemplo n.º 31
0
class LinearSolver(object):
    r"""
    Class encapsulating all solves on the same FEM model

    This class forms the base of all Stat FEM computations for a linear problem. It requires
    the base FEM problem, the forcing covariance (represented by a ``ForcingCovariance`` object),
    the sensor locations, data, and uncertanties (represented by a ``ObsData`` object),
    priors on the model discrepancy hyperparameters (optional), and an ensemble MPI communicator
    for parallelizing the covariance solves (optional).

    Once these are set, the prior solves can be done and cached, which is generally the most
    computationally expensive part of the modeling. The class also contains methods for
    performing parameter estimation (a ``logposterior`` to compute the negative log posterior
    or marginal likelihood if no priors are specified and its associated derivatives), and
    prediction of sensor values and uncertainties at unmeasured locations.

    :ivar solver: the base Firedrake FEM solver
    :type solver: Firedrake LinearSolver
    :ivar b: the FEM RHS vector
    :type b: Firedrake Vector or Function
    :ivar G: Forcing Covariance sparse matrix
    :type G: ForcingCovariance
    :ivar data: Sensor locations, observed values, and uncertainties
    :type data: ObsData
    :ivar priors: list of prior distributions on hyperparameters or all ``None`` if uninformative
                  priors are assumed
    :type priors: list
    :ivar ensemble_comm: Firedrake Ensemble communicator for parallelizing covariance solves
    :type ensemble_comm: MPI Communicator
    :ivar params: Current set of parameters (a numpy array of length 3) representing the
                  data/model scaling factor :math:`{\rho}`, model discrepancy covariance,
                  and model discrepancy correlation length. All parameters are on a logarithmic
                  scale to enforce positivity.
    :type params: ndarray
    :ivar im: interpolation matrix used to interpolate between FEM mesh and sensor data
    :type im: InterpolationMatrix
    :ivar x: Prior FEM solution on distributed FEM mesh
    :type x: Firedrake Function
    :ivar mu: Prior FEM solution interpolated to sensor locations on root process (other processes
              have arrays of length 0)
    :type mu: ndarray
    :ivar Cu: Prior FEM covariance interpolated to sensor locations on root process (other processes
              have arrays of shape ``(0, 0)``
    :type Cu: ndarray
    :ivar current_logpost: Current value of the negative log-posterior (or log likelihood if prior
                           is uninformative)
    :type current_logpost: float
    """
    def __init__(self,
                 A,
                 b,
                 G,
                 data,
                 *,
                 priors=[None, None, None],
                 ensemble_comm=COMM_SELF,
                 P=None,
                 solver_parameters=None,
                 nullspace=None,
                 transpose_nullspace=None,
                 near_nullspace=None,
                 options_prefix=None):
        r"""
        Create a new object encapsulating all solves on the same FEM model

        Initialize a new object for a given FEM problem to perform the Stat FEM solves.

        This class forms the base of all Stat FEM computations for a linear problem. It requires
        the base FEM problem, the forcing covariance (represented by a ``ForcingCovariance`` object),
        the sensor locations, data, and uncertanties (represented by a ``ObsData`` object),
        priors on the model discrepancy hyperparameters (optional), and an ensemble MPI communicator
        for parallelizing the covariance solves (optional).

        :param A: the FEM stiffness matrix
        :type A: Firedrake Matrix
        :param b: the FEM RHS vector
        :type b: Firedrake Vector or Function
        :param G: Forcing Covariance sparse matrix
        :type G: ForcingCovariance
        :param data: Sensor locations, observed values, and uncertainties
        :type data: ObsData
        :param priors: list of prior distributions on hyperparameters or all ``None`` if uninformative
                       priors are assumed (optional)
        :type priors: list
        :param ensemble_comm: Firedrake Ensemble communicator for parallelizing covariance solves (optional)
        :type ensemble_comm: MPI Communicator
        :param P: an optional `MatrixBase` to construct any
                  preconditioner from; if none is supplied ``A`` is
                  used to construct the preconditioner.
        :type P: MatrixBase or other derived Matrix type
        :param parameters: (optional) dict of solver parameters
        :type parameters: dict
        :param nullspace: an optional `VectorSpaceBasis` (or
                          `MixedVectorSpaceBasis`) spanning the null space
                          of the operator.
        :type nullspace: VectorSpaceBasis or MixedVectorSpaceBasis
        :param transpose_nullspace: as for the nullspace, but used to
                                    make the right hand side consistent.
        :type transpose_nullspace: VectorSpaceBasis or MixedVectorSpaceBasis
        :param near_nullspace: as for the nullspace, but used to set
                              the near nullpace.
        :type near_nullspace: VectorSpaceBasis or MixedVectorSpaceBasis
        :param options_prefix: an optional prefix used to distinguish
                               PETSc options.  If not provided a unique
                               prefix will be created.  Use this option
                               if you want to pass options to the solver
                               from the command line in addition to
                               through the ``solver_parameters`` dict.
        :type options_prefix: str
        :returns: new ``LinearSolver`` instance
        :rtype: LinearSolver
        """

        if not isinstance(A, MatrixBase):
            raise TypeError("A must be a firedrake matrix")
        if not isinstance(b, (Function, Vector)):
            raise TypeError("b must be a firedrake function or vector")
        if not isinstance(G, ForcingCovariance):
            raise TypeError("G must be a forcing covariance")
        if not isinstance(data, ObsData):
            raise TypeError("data must be an ObsData type")
        if not isinstance(priors, list):
            raise TypeError("priors must be a list of prior objects or None")
        if not len(priors) == 3:
            raise ValueError(
                "priors must be a list of prior objects or None of length 3")
        for p in priors:
            if not p is None:
                raise TypeError(
                    "priors must be a list of prior objects or None")
        if not isinstance(ensemble_comm, type(COMM_WORLD)):
            raise TypeError(
                "ensemble_comm must be an MPI communicator created with a firedrake Ensemble"
            )

        self.solver = fdLS(A,
                           P=P,
                           solver_parameters=solver_parameters,
                           nullspace=nullspace,
                           transpose_nullspace=transpose_nullspace,
                           near_nullspace=near_nullspace,
                           options_prefix=options_prefix)
        self.b = b
        self.G = G
        self.data = data
        self.ensemble_comm = ensemble_comm
        self.priors = list(priors)
        self.params = None

        self.im = InterpolationMatrix(G.function_space, data.get_coords())

        self.x = None
        self.mu = None
        self.Cu = None
        self.current_logpost = None

    def __del__(self):
        r"""
        Delete the LinearSolver object

        When deleting a LinearSolver, one needs to deallocate the memory for the interpolation
        matrix. No inputs or return values.
        """

        self.im.destroy()

    def set_params(self, params):
        r"""
        Sets parameter values

        Checks and sets new values of the hyperparameters. New parameters must be a numpy
        array of length 3. First parameter is the data/model scaling factor :math:`{\rho}`,
        second parameter is the model discrepancy covariance, and the third parameter is
        the model discrepancy correlation length. All parameters are assumed to be on a
        logarithmic scale to enforce positivity.

        :param params: New set of parameters (must be a numpy array of length 3)
        :type params: ndarray
        :returns: None
        """

        params = np.array(params, dtype=np.float64)
        assert params.shape == (
            3, ), "bad shape for model discrepancy parameters"

        self.params = params

    def solve_prior(self):
        r"""
        Solve base (prior) FEM plus covariance interpolated to the data locations

        This method solves the prior FEM and covariance interpolated to the sensor locations.
        It does not require setting parameter values, as the model discrepancy does not
        influence these results. The covariance is cached as it is expensive to compute
        and is re-used in all other solves.

        In addition to caching the results, the method returns solution as numpy arrays
        on the root process (rank 0).

        Note that unlike the solve done in the meshspace, this uses a return value rather than a
        Firedrake/PETSc style interface to place the solution in a pre-allocated ``Function``.
        This is because each process has a different array size, so would require correctly
        pre-allocating arrays of different lengths on each process.

        :returns: FEM prior mean and covariance (as a tuple of numpy arrays) on the root process.
                  Non-root processes return numpy arrays of shape ``(0,)`` (mean) and ``(0, 0)``
                  (covariance).
        :rtype: tuple of ndarrays
        """

        # form interpolated prior covariance across all ensemble processes

        self.Cu = interp_covariance_to_data(self.im, self.G, self.solver,
                                            self.im, self.ensemble_comm)

        # solve base FEM (prior mean) and interpolate to data space on root

        self.x = Function(self.G.function_space)

        if self.ensemble_comm.rank == 0:
            self.solver.solve(self.x, self.b)
            self.mu = self.im.interp_mesh_to_data(self.x.vector())
        else:
            self.mu = np.zeros(0)

        return self.mu, self.Cu

    def solve_posterior(self, x, scale_mean=False):
        r"""
        Solve FEM posterior in mesh space

        Solve for the FEM posterior conditioned on the data on the FEM mesh. The solution
        is stored in the preallocated Firedrake ``Function``.

        Note that if an ensemble communicator was used to parallelize the covariance solves,
        the solution is only stored in the root of the ensemble communicator. The Firedrake
        ``Function`` on the other processes will not be modified.

        The optional ``scale_mean`` argument determines if the solution is to be re-scaled
        by the model discrepancy scaling factor. This value is by default ``False``.
        To re-scale to match the data, pass ``scale_mean=True``.

        :param x: Firedrake ``Function`` for holding the solution. This is modified in place
                  by the method.
        :type x: Firedrake Function
        :param scale_mean: Boolean indicating if the mean should be scaled by the model
                           discrepancy scaling factor. Optional, default is ``False``
        :type scale_mean: bool
        :returns: None
        """

        if not isinstance(bool(scale_mean), bool):
            raise TypeError("scale_mean argument must be boolean-like")

        # create interpolation matrix if not cached

        if self.Cu is None or self.x is None:
            self.solve_prior()

        if self.params is None:
            raise ValueError("must set parameter values to solve posterior")

        rho = np.exp(self.params[0])

        if scale_mean:
            scalefact = rho
        else:
            scalefact = 1.

        # remaining solves are just done on ensemble root

        if self.ensemble_comm.rank == 0:

            if self.G.comm.rank == 0:
                Ks = self.data.calc_K_plus_sigma(self.params[1:])
                try:
                    LK = cho_factor(Ks)
                except LinAlgError:
                    raise LinAlgError(
                        "Error attempting to compute the Cholesky factorization "
                        + "of the model discrepancy")
                tmp_dataspace_1 = cho_solve(LK, self.data.get_data())
            else:
                tmp_dataspace_1 = np.zeros(0)

            # interpolate to dataspace

            tmp_meshspace_1 = self.im.interp_data_to_mesh(tmp_dataspace_1)

            # solve forcing covariance and interpolate to dataspace

            tmp_meshspace_2 = solve_forcing_covariance(
                self.G, self.solver,
                tmp_meshspace_1)._scale(rho) + self.x.vector()

            tmp_dataspace_1 = self.im.interp_mesh_to_data(tmp_meshspace_2)

            if self.G.comm.rank == 0:
                try:
                    L = cho_factor(Ks + rho**2 * self.Cu)
                except LinAlgError:
                    raise LinAlgError(
                        "Error attempting to compute the Cholesky factorization "
                        + "of the model discrepancy plus forcing covariance")
                tmp_dataspace_2 = cho_solve(L, tmp_dataspace_1)
            else:
                tmp_dataspace_2 = np.zeros(0)

            tmp_meshspace_1 = self.im.interp_data_to_mesh(tmp_dataspace_2)

            tmp_meshspace_1 = solve_forcing_covariance(
                self.G, self.solver, tmp_meshspace_1)._scale(rho**2)

            x.assign(
                (tmp_meshspace_2 - tmp_meshspace_1)._scale(scalefact).function)

    def solve_posterior_covariance(self, scale_mean=False):
        r"""
        Solve posterior FEM and covariance interpolated to the data locations

        This method solves the posterior FEM and covariance interpolated to the sensor
        locations. The method returns solution as numpy arrays on the root process (rank 0).

        Note that unlike the solve done in the meshspace, this uses a return value rather than a
        Firedrake/PETSc style interface to place the solution in a pre-allocated ``Function``.
        This is because each process has a different array size, so would require correctly
        pre-allocating arrays of different lengths on each process.

        The optional ``scale_mean`` argument determines if the solution is to be re-scaled
        by the model discrepancy scaling factor. This value is by default ``False``.
        To re-scale to match the data, pass ``scale_mean=True``.

        :returns: FEM posterior mean and covariance (as a tuple of numpy arrays) on the root process.
                  Non-root processes return numpy arrays of shape ``(0,)`` (mean) and ``(0, 0)``
                  (covariance).
        :param scale_mean: Boolean indicating if the mean should be scaled by the model
                           discrepancy scaling factor. Optional, default is ``False``
        :type scale_mean: bool
        :rtype: tuple of ndarrays
        """

        if not isinstance(bool(scale_mean), bool):
            raise TypeError("scale_mean argument must be boolean-like")

        # create interpolation matrix if not cached

        if self.mu is None or self.Cu is None:
            self.solve_prior()

        if self.params is None:
            raise ValueError("must set parameter values to solve posterior")

        rho = np.exp(self.params[0])

        if scale_mean:
            scalefact = rho
        else:
            scalefact = 1.

        if self.ensemble_comm.rank == 0 and self.G.comm.rank == 0:
            try:
                Ks = self.data.calc_K_plus_sigma(self.params[1:])
                LK = cho_factor(Ks)
                LC = cho_factor(Ks + rho**2 * self.Cu)
            except LinAlgError:
                raise LinAlgError(
                    "Cholesky factorization of one of the covariance matrices failed"
                )

            # compute posterior mean

            muy = rho * np.dot(self.Cu, cho_solve(
                LK, self.data.get_data())) + self.mu
            muy_tmp = rho**2 * np.dot(self.Cu, cho_solve(LC, muy))
            muy = muy - muy_tmp

            # compute posterior covariance

            Cuy = self.Cu - rho**2 * np.dot(self.Cu, cho_solve(LC, self.Cu))

        else:
            muy = np.zeros(0)
            Cuy = np.zeros((0, 0))

        return scalefact * muy, Cuy

    def solve_prior_generating(self):
        r"""
        Solve for the prior of the generating process

        This method solves for the prior of the generating process before looking at the data.
        The main computational cost is solving for the prior of the covariance, so if this is
        cached from a previous solve this is a simple calculation.

        :returns: FEM prior mean and covariance of the true generating process (as a tuple of
                  numpy arrays) on the root process. Non-root processes return numpy arrays of
                  shape ``(0,)`` (mean) and ``(0, 0)`` (covariance).
        :rtype: tuple of ndarrays
        """

        # create interpolation matrix if not cached

        if self.mu is None or self.Cu is None:
            self.solve_prior()

        if self.params is None:
            raise ValueError(
                "must set parameter values to solve prior of generating process"
            )

        rho = np.exp(self.params[0])

        if self.G.comm.rank == 0 and self.ensemble_comm.rank == 0:
            m_eta = rho * self.mu
            C_eta = rho**2 * self.Cu + self.data.calc_K(self.params[1:])
        else:
            m_eta = np.zeros(0)
            C_eta = np.zeros((0, 0))

        return m_eta, C_eta

    def solve_posterior_generating(self):
        r"""
        Solve for the posterior of the generating process

        This method solves for the posterior of the generating process before looking at the data.
        The main computational cost is solving for the prior of the covariance, so if this is
        cached from a previous solve this is a simple calculation.

        :returns: FEM posterior mean and covariance of the true generating process (as a tuple of
                  numpy arrays) on the root process. Non-root processes return numpy arrays of
                  shape ``(0,)`` (mean) and ``(0, 0)`` (covariance).
        :rtype: tuple of ndarrays
        """

        # create interpolation matrix if not cached

        m_eta, C_eta = self.solve_prior_generating()

        if self.ensemble_comm.rank == 0 and self.G.comm.rank == 0:
            try:
                L = cho_factor(self.data.get_unc()**2 *
                               np.eye(self.data.get_n_obs()) + C_eta)
            except LinAlgError:
                raise LinAlgError(
                    "Cholesky factorization of the covariance matrix failed")

            C_etay = cho_solve(L, self.data.get_unc()**2 * C_eta)

            m_etay = cho_solve(
                L,
                np.dot(C_eta, self.data.get_data()) +
                self.data.get_unc()**2 * m_eta)
        else:
            m_etay = np.zeros(0)
            C_etay = np.zeros((0, 0))

        return m_etay, C_etay

    def predict_mean(self, coords, scale_mean=True):
        r"""
        Compute the predictive mean

        This method computes the predictive mean of data values at unmeasured locations. It returns
        the vector of predicted sensor values on the root process as numpy array. It requires only a
        small overhead above the computational work of finding the posterior mean (i.e. you get
        the mean value at new sensor locations for "free" once you have solved the posterior).

        The optional ``scale_mean`` argument determines if the solution is to be re-scaled
        by the model discrepancy scaling factor. This value is by default ``True``.
        To re-scale to match the FEM solution, pass ``scale_mean=False``.

        :param coords: Spatial coordinates at which the mean will be predicted. Must be a
                       2D Numpy array (or a 1D array, which will assume the second axis has length
                       1)
        :type coords: ndarray
        :param scale_mean: Boolean indicating if the mean should be scaled by the model
                           discrepancy scaling factor. Optional, default is ``True``
        :type scale_mean: bool
        :returns: FEM prediction at specified sensor locations as a numpy array on the root process.
                  All other processes will have a numpy array of length 0.
        :rtype: ndarray
        """

        if not isinstance(bool(scale_mean), bool):
            raise TypeError("scale_mean argument must be boolean-like")

        coords = np.array(coords, dtype=np.float64)
        if coords.ndim == 1:
            coords = np.reshape(coords, (-1, 1))
        assert coords.ndim == 2, "coords must be a 1d or 2d array"
        assert coords.shape[1] == self.data.get_n_dim(
        ), "axis 1 of coords must be the same length as the FEM dimension"

        if self.Cu is None:
            self.solve_prior()

        if self.params is None:
            raise ValueError("must set parameter values to make predictions")

        rho = np.exp(self.params[0])

        if scale_mean:
            scalefact = rho
        else:
            scalefact = 1.

        x = Function(self.G.function_space)

        self.solve_posterior(x)

        im = InterpolationMatrix(self.G.function_space, coords)

        mu = scalefact * im.interp_mesh_to_data(x.vector())

        im.destroy()

        return mu

    def predict_covariance(self, coords, unc):
        r"""
        Compute the predictive covariance

        This method computes the predictive covariance of data values at unmeasured locations.
        It returns the array of predicted sensor value covariances on the root process as numpy
        array. Unlike the mean, the predictive covariance requires doing two additional sets of
        covariance solves: one on the new sensor locations (to get the baseline covariance),
        and one set of solves that interpolates between the predictive points and the original
        sensor locations. This can be thought of as doing the covariance solves at the new points
        to get a baseline uncertainty, and then the cross-solves determine if any of the sensor
        data is close enough to the predictive points to reduce this uncertainty.

        :param coords: Spatial coordinates at which the mean will be predicted. Must be a
                       2D Numpy array (or a 1D array, which will assume the second axis has length
                       1)
        :type coords: ndarray
        :param unc: Uncertainty for unmeasured sensor locations (i.e. the statistical error one would
                    expect if these measurements were made). Can be a single non-negative float,
                    or an array of non-negative floats with the same length as the first axis of
                    ``coords``.
        :type unc: float or ndarray
        :returns: FEM predictive covariance at specified sensor locations as a numpy array on the
                  root process. All other processes will have a numpy array of shape ``(0, 0)``.
        :rtype: ndarray
        """

        coords = np.array(coords, dtype=np.float64)
        if coords.ndim == 1:
            coords = np.reshape(coords, (-1, 1))
        assert coords.ndim == 2, "coords must be a 1d or 2d array"
        assert coords.shape[1] == self.data.get_n_dim(
        ), "axis 1 of coords must be the same length as the FEM dimension"

        if self.Cu is None:
            self.solve_prior()

        if self.params is None:
            raise ValueError("must set parameter values to make predictions")

        rho = np.exp(self.params[0])

        im_coords = InterpolationMatrix(self.G.function_space, coords)

        if coords.shape[0] > self.data.get_n_obs():
            Cucd = interp_covariance_to_data(im_coords, self.G, self.solver,
                                             self.im, self.ensemble_comm)
        else:
            Cucd = interp_covariance_to_data(self.im, self.G, self.solver,
                                             im_coords, self.ensemble_comm).T
        Cucc = interp_covariance_to_data(im_coords, self.G, self.solver,
                                         im_coords, self.ensemble_comm)

        if self.ensemble_comm.rank == 0 and self.G.comm.rank == 0:
            try:
                Ks = self.data.calc_K_plus_sigma(self.params[1:])
                LC = cho_factor(Ks + rho**2 * self.Cu)
            except LinAlgError:
                raise LinAlgError(
                    "Cholesky factorization of one of the covariance matrices failed"
                )

            # compute predictive covariance

            Cuy = Cucc - rho**2 * np.dot(Cucd, cho_solve(LC, Cucd.T))

            Cuy = ObsData(coords, np.zeros(coords.shape[0]),
                          unc).calc_K_plus_sigma(
                              self.params[1:]) + rho**2 * Cuy

        else:
            Cuy = np.zeros((0, 0))

        im_coords.destroy()

        return Cuy

    def logposterior(self, params):
        r"""
        Compute the negative log posterior for a particular set of parameters

        Computes the negative log posterior (negative marginal log-likelihood minus any
        prior log-probabilities). This is computed on the root process and then broadcast
        to all processes.

        The main computational expense is computing the prior mean and covariance, which only
        needs to be done once and can be cached. This also requires computing the Cholesky
        decomposition of the covariance plus model discrepancy.

        New parameters must be a numpy array of length 3. First parameter is the data/model
        scaling factor :math:`{\rho}`, second parameter is the model discrepancy covariance,
        and the third parameter is the model discrepancy correlation length. All parameters
        are assumed to be on a logarithmic scale to enforce positivity.

        :param params: New set of parameters (must be a numpy array of length 3)
        :type params: ndarray
        :returns: negative log posterior
        :rtype: float
        """

        self.set_params(params)
        rho = np.exp(self.params[0])

        if self.Cu is None or self.mu is None:
            self.solve_prior()

        # compute log-likelihood on root process and broadcast

        if COMM_WORLD.rank == 0:
            KCu = rho**2 * self.Cu + self.data.calc_K_plus_sigma(
                self.params[1:])
            try:
                L = cho_factor(KCu)
            except LinAlgError:
                raise LinAlgError(
                    "Error attempting to factorize the covariance matrix " +
                    "in model_loglikelihood")
            invKCudata = cho_solve(L, self.data.get_data() - rho * self.mu)
            log_posterior = 0.5 * (
                self.data.get_n_obs() * np.log(2. * np.pi) +
                2. * np.sum(np.log(np.diag(L[0]))) +
                np.dot(self.data.get_data() - rho * self.mu, invKCudata))
            for i in range(3):
                if not self.priors[i] is None:
                    log_posterior -= self.priors[i].logp(self.params[i])
        else:
            log_posterior = None

        log_posterior = COMM_WORLD.bcast(log_posterior, root=0)

        assert not log_posterior is None, "error in broadcasting the log likelihood"

        COMM_WORLD.barrier()

        return log_posterior

    def logpost_deriv(self, params):
        r"""
        Compute the gradient of the negative log posterior for a particular set of parameters

        Computes the gradient of the negative log posterior (negative marginal log-likelihood
        minus any prior log-probabilities). This is computed on the root process and then broadcast
        to all processes.

        The main computational expense is computing the prior mean and covariance, which only
        needs to be done once and can be cached. This also requires computing the Cholesky
        decomposition of the covariance plus model discrepancy.

        New parameters must be a numpy array of length 3. First parameter is the data/model
        scaling factor :math:`{\rho}`, second parameter is the model discrepancy covariance,
        and the third parameter is the model discrepancy correlation length. All parameters
        are assumed to be on a logarithmic scale to enforce positivity.

        The returned log posterior gradient is a numpy array of length 3, with each component
        corresponding to the derivative of each of the input parameters.

        :param params: New set of parameters (must be a numpy array of length 3)
        :type params: ndarray
        :returns: gradient of the negative log posterior
        :rtype: ndarray
        """

        self.set_params(params)
        rho = np.exp(self.params[0])

        if self.Cu is None or self.mu is None:
            self.solve_prior()

        # compute log-likelihood on root process

        if COMM_WORLD.rank == 0:
            KCu = rho**2 * self.Cu + self.data.calc_K_plus_sigma(params[1:])
            try:
                L = cho_factor(KCu)
            except LinAlgError:
                raise LinAlgError(
                    "Error attempting to factorize the covariance matrix " +
                    "in model_loglikelihood")
            invKCudata = cho_solve(L, self.data.get_data() - rho * self.mu)

            K_deriv = self.data.calc_K_deriv(self.params[1:])

            deriv = np.zeros(3)

            deriv[0] = (
                -rho * np.dot(self.mu, invKCudata) -
                rho**2 * np.linalg.multi_dot([invKCudata, self.Cu, invKCudata])
                + rho**2 * np.trace(cho_solve(L, self.Cu)))
            for i in range(0, 2):
                deriv[i + 1] = -0.5 * (
                    np.linalg.multi_dot([invKCudata, K_deriv[i], invKCudata]) -
                    np.trace(cho_solve(L, K_deriv[i])))

            for i in range(3):
                if not self.priors[i] is None:
                    deriv[i] -= self.priors[i].dlogpdtheta(self.params[i])
        else:
            deriv = None

        deriv = COMM_WORLD.bcast(deriv, root=0)

        assert not deriv is None, "error in broadcasting the log likelihood derivative"

        COMM_WORLD.barrier()

        return deriv
Exemplo n.º 32
0
class VertexBasedLimiter(Limiter):
    """
    A vertex based limiter for P1DG fields.

    This limiter implements the vertex-based limiting scheme described in
    Dmitri Kuzmin, "A vertex-based hierarchical slope limiter for p-adaptive
    discontinuous Galerkin methods". J. Comp. Appl. Maths (2010)
    http://dx.doi.org/10.1016/j.cam.2009.05.028
    """
    def __init__(self, space):
        """
        Initialise limiter

        :param space : FunctionSpace instance
        """

        self.P1DG = space
        self.P1CG = FunctionSpace(self.P1DG.mesh(), 'CG',
                                  1)  # for min/max limits
        self.P0 = FunctionSpace(self.P1DG.mesh(), 'DG', 0)  # for centroids

        # Storage containers for cell means, max and mins
        self.centroids = Function(self.P0)
        self.centroids_rhs = Function(self.P0)
        self.max_field = Function(self.P1CG)
        self.min_field = Function(self.P1CG)

        self.centroid_solver = self._construct_centroid_solver()

        # Update min and max loop
        self._min_max_loop = """
for(int i = 0; i < maxq.dofs; i++) {
    maxq[i][0] = fmax(maxq[i][0],q[0][0]);
    minq[i][0] = fmin(minq[i][0],q[0][0]);
}
                             """
        # Perform limiting loop
        self._limit_kernel = """
double alpha = 1.0;
double qavg = qbar[0][0];
for (int i=0; i < q.dofs; i++) {
    if (q[i][0] > qavg)
        alpha = fmin(alpha, fmin(1, (qmax[i][0] - qavg)/(q[i][0] - qavg)));
    else if (q[i][0] < qavg)
        alpha = fmin(alpha, fmin(1, (qavg - qmin[i][0])/(qavg - q[i][0])));
}
for (int i=0; i<q.dofs; i++) {
    q[i][0] = qavg + alpha*(q[i][0] - qavg);
}
                             """

    def _construct_centroid_solver(self):
        """
        Constructs a linear problem for computing the centroids

        :return: LinearSolver instance
        """
        u = TrialFunction(self.P0)
        v = TestFunction(self.P0)
        a = assemble(u * v * dx)
        return LinearSolver(a,
                            solver_parameters={
                                'ksp_type': 'preonly',
                                'pc_type': 'bjacobi',
                                'sub_pc_type': 'ilu'
                            })

    def _update_centroids(self, field):
        """
        Update centroid values
        """
        assemble(TestFunction(self.P0) * field * dx, tensor=self.centroids_rhs)
        self.centroid_solver.solve(self.centroids, self.centroids_rhs)

    def compute_bounds(self, field):
        """
        Only computes min and max bounds of neighbouring cells
        """
        self._update_centroids(field)
        self.max_field.assign(-1.0e10)  # small number
        self.min_field.assign(1.0e10)  # big number

        par_loop(
            self._min_max_loop, dx, {
                "maxq": (self.max_field, MAX),
                "minq": (self.min_field, MIN),
                "q": (self.centroids, READ)
            })

    def apply_limiter(self, field):
        """
        Only applies limiting loop on the given field
        """
        par_loop(
            self._limit_kernel, dx, {
                "qbar": (self.centroids, READ),
                "q": (field, RW),
                "qmax": (self.max_field, READ),
                "qmin": (self.min_field, READ)
            })

    def apply(self, field):
        """
        Re-computes centroids and applies limiter to given field
        """
        assert field.function_space() == self.P1DG, \
            'Given field does not belong to this objects function space'

        self.compute_bounds(field)
        self.apply_limiter(field)
Exemplo n.º 33
0
    def initialize(self, pc):
        """Set up the problem context. This takes the incoming
        three-field system and constructs the static
        condensation operators using Slate expressions.

        A KSP is created for the reduced system. The eliminated
        variables are recovered via back-substitution.
        """

        from firedrake.assemble import (allocate_matrix,
                                        create_assembly_callable)
        from firedrake.bcs import DirichletBC
        from firedrake.function import Function
        from firedrake.functionspace import FunctionSpace
        from firedrake.interpolation import interpolate

        prefix = pc.getOptionsPrefix() + "condensed_field_"
        A, P = pc.getOperators()
        self.cxt = A.getPythonContext()
        if not isinstance(self.cxt, ImplicitMatrixContext):
            raise ValueError("Context must be an ImplicitMatrixContext")

        self.bilinear_form = self.cxt.a

        # Retrieve the mixed function space
        W = self.bilinear_form.arguments()[0].function_space()
        if len(W) > 3:
            raise NotImplementedError("Only supports up to three function spaces.")

        elim_fields = PETSc.Options().getString(pc.getOptionsPrefix()
                                                + "pc_sc_eliminate_fields",
                                                None)
        if elim_fields:
            elim_fields = [int(i) for i in elim_fields.split(',')]
        else:
            # By default, we condense down to the last field in the
            # mixed space.
            elim_fields = [i for i in range(0, len(W) - 1)]

        condensed_fields = list(set(range(len(W))) - set(elim_fields))
        if len(condensed_fields) != 1:
            raise NotImplementedError("Cannot condense to more than one field")

        c_field, = condensed_fields

        # Need to duplicate a space which is NOT
        # associated with a subspace of a mixed space.
        Vc = FunctionSpace(W.mesh(), W[c_field].ufl_element())
        bcs = []
        cxt_bcs = self.cxt.row_bcs
        for bc in cxt_bcs:
            if bc.function_space().index != c_field:
                raise NotImplementedError("Strong BC set on unsupported space")
            if isinstance(bc.function_arg, Function):
                bc_arg = interpolate(bc.function_arg, Vc)
            else:
                # Constants don't need to be interpolated
                bc_arg = bc.function_arg
            bcs.append(DirichletBC(Vc, bc_arg, bc.sub_domain))

        mat_type = PETSc.Options().getString(prefix + "mat_type", "aij")

        self.c_field = c_field
        self.condensed_rhs = Function(Vc)
        self.residual = Function(W)
        self.solution = Function(W)

        # Get expressions for the condensed linear system
        A_tensor = Tensor(self.bilinear_form)
        reduced_sys = self.condensed_system(A_tensor, self.residual, elim_fields)
        S_expr = reduced_sys.lhs
        r_expr = reduced_sys.rhs

        # Construct the condensed right-hand side
        self._assemble_Srhs = create_assembly_callable(
            r_expr,
            tensor=self.condensed_rhs,
            form_compiler_parameters=self.cxt.fc_params)

        # Allocate and set the condensed operator
        self.S = allocate_matrix(S_expr,
                                 bcs=bcs,
                                 form_compiler_parameters=self.cxt.fc_params,
                                 mat_type=mat_type,
                                 options_prefix=prefix,
                                 appctx=self.get_appctx(pc))

        self._assemble_S = create_assembly_callable(
            S_expr,
            tensor=self.S,
            bcs=bcs,
            form_compiler_parameters=self.cxt.fc_params,
            mat_type=mat_type)

        self._assemble_S()
        Smat = self.S.petscmat

        # If a different matrix is used for preconditioning,
        # assemble this as well
        if A != P:
            self.cxt_pc = P.getPythonContext()
            P_tensor = Tensor(self.cxt_pc.a)
            P_reduced_sys = self.condensed_system(P_tensor,
                                                  self.residual,
                                                  elim_fields)
            S_pc_expr = P_reduced_sys.lhs
            self.S_pc_expr = S_pc_expr

            # Allocate and set the condensed operator
            self.S_pc = allocate_matrix(S_expr,
                                        bcs=bcs,
                                        form_compiler_parameters=self.cxt.fc_params,
                                        mat_type=mat_type,
                                        options_prefix=prefix,
                                        appctx=self.get_appctx(pc))

            self._assemble_S_pc = create_assembly_callable(
                S_pc_expr,
                tensor=self.S_pc,
                bcs=bcs,
                form_compiler_parameters=self.cxt.fc_params,
                mat_type=mat_type)

            self._assemble_S_pc()
            Smat_pc = self.S_pc.petscmat

        else:
            self.S_pc_expr = S_expr
            Smat_pc = Smat

        # Get nullspace for the condensed operator (if any).
        # This is provided as a user-specified callback which
        # returns the basis for the nullspace.
        nullspace = self.cxt.appctx.get("condensed_field_nullspace", None)
        if nullspace is not None:
            nsp = nullspace(Vc)
            Smat.setNullSpace(nsp.nullspace(comm=pc.comm))

        # Create a SNESContext for the DM associated with the trace problem
        self._ctx_ref = self.new_snes_ctx(pc,
                                          S_expr,
                                          bcs,
                                          mat_type,
                                          self.cxt.fc_params,
                                          options_prefix=prefix)

        # Push new context onto the dm associated with the condensed problem
        c_dm = Vc.dm

        # Set up ksp for the condensed problem
        c_ksp = PETSc.KSP().create(comm=pc.comm)
        c_ksp.incrementTabLevel(1, parent=pc)

        # Set the dm for the condensed solver
        c_ksp.setDM(c_dm)
        c_ksp.setDMActive(False)
        c_ksp.setOptionsPrefix(prefix)
        c_ksp.setOperators(A=Smat, P=Smat_pc)
        self.condensed_ksp = c_ksp

        with dmhooks.add_hooks(c_dm, self,
                               appctx=self._ctx_ref,
                               save=False):
            c_ksp.setFromOptions()

        # Set up local solvers for backwards substitution
        self.local_solvers = self.local_solver_calls(A_tensor,
                                                     self.residual,
                                                     self.solution,
                                                     elim_fields)
Exemplo n.º 34
0
class SCPC(SCBase):

    needs_python_pmat = True

    """A Slate-based python preconditioner implementation of
    static condensation for problems with up to three fields.
    """

    @timed_function("SCPCInit")
    def initialize(self, pc):
        """Set up the problem context. This takes the incoming
        three-field system and constructs the static
        condensation operators using Slate expressions.

        A KSP is created for the reduced system. The eliminated
        variables are recovered via back-substitution.
        """

        from firedrake.assemble import (allocate_matrix,
                                        create_assembly_callable)
        from firedrake.bcs import DirichletBC
        from firedrake.function import Function
        from firedrake.functionspace import FunctionSpace
        from firedrake.interpolation import interpolate

        prefix = pc.getOptionsPrefix() + "condensed_field_"
        A, P = pc.getOperators()
        self.cxt = A.getPythonContext()
        if not isinstance(self.cxt, ImplicitMatrixContext):
            raise ValueError("Context must be an ImplicitMatrixContext")

        self.bilinear_form = self.cxt.a

        # Retrieve the mixed function space
        W = self.bilinear_form.arguments()[0].function_space()
        if len(W) > 3:
            raise NotImplementedError("Only supports up to three function spaces.")

        elim_fields = PETSc.Options().getString(pc.getOptionsPrefix()
                                                + "pc_sc_eliminate_fields",
                                                None)
        if elim_fields:
            elim_fields = [int(i) for i in elim_fields.split(',')]
        else:
            # By default, we condense down to the last field in the
            # mixed space.
            elim_fields = [i for i in range(0, len(W) - 1)]

        condensed_fields = list(set(range(len(W))) - set(elim_fields))
        if len(condensed_fields) != 1:
            raise NotImplementedError("Cannot condense to more than one field")

        c_field, = condensed_fields

        # Need to duplicate a space which is NOT
        # associated with a subspace of a mixed space.
        Vc = FunctionSpace(W.mesh(), W[c_field].ufl_element())
        bcs = []
        cxt_bcs = self.cxt.row_bcs
        for bc in cxt_bcs:
            if bc.function_space().index != c_field:
                raise NotImplementedError("Strong BC set on unsupported space")
            if isinstance(bc.function_arg, Function):
                bc_arg = interpolate(bc.function_arg, Vc)
            else:
                # Constants don't need to be interpolated
                bc_arg = bc.function_arg
            bcs.append(DirichletBC(Vc, bc_arg, bc.sub_domain))

        mat_type = PETSc.Options().getString(prefix + "mat_type", "aij")

        self.c_field = c_field
        self.condensed_rhs = Function(Vc)
        self.residual = Function(W)
        self.solution = Function(W)

        # Get expressions for the condensed linear system
        A_tensor = Tensor(self.bilinear_form)
        reduced_sys = self.condensed_system(A_tensor, self.residual, elim_fields)
        S_expr = reduced_sys.lhs
        r_expr = reduced_sys.rhs

        # Construct the condensed right-hand side
        self._assemble_Srhs = create_assembly_callable(
            r_expr,
            tensor=self.condensed_rhs,
            form_compiler_parameters=self.cxt.fc_params)

        # Allocate and set the condensed operator
        self.S = allocate_matrix(S_expr,
                                 bcs=bcs,
                                 form_compiler_parameters=self.cxt.fc_params,
                                 mat_type=mat_type,
                                 options_prefix=prefix,
                                 appctx=self.get_appctx(pc))

        self._assemble_S = create_assembly_callable(
            S_expr,
            tensor=self.S,
            bcs=bcs,
            form_compiler_parameters=self.cxt.fc_params,
            mat_type=mat_type)

        self._assemble_S()
        Smat = self.S.petscmat

        # If a different matrix is used for preconditioning,
        # assemble this as well
        if A != P:
            self.cxt_pc = P.getPythonContext()
            P_tensor = Tensor(self.cxt_pc.a)
            P_reduced_sys = self.condensed_system(P_tensor,
                                                  self.residual,
                                                  elim_fields)
            S_pc_expr = P_reduced_sys.lhs
            self.S_pc_expr = S_pc_expr

            # Allocate and set the condensed operator
            self.S_pc = allocate_matrix(S_expr,
                                        bcs=bcs,
                                        form_compiler_parameters=self.cxt.fc_params,
                                        mat_type=mat_type,
                                        options_prefix=prefix,
                                        appctx=self.get_appctx(pc))

            self._assemble_S_pc = create_assembly_callable(
                S_pc_expr,
                tensor=self.S_pc,
                bcs=bcs,
                form_compiler_parameters=self.cxt.fc_params,
                mat_type=mat_type)

            self._assemble_S_pc()
            Smat_pc = self.S_pc.petscmat

        else:
            self.S_pc_expr = S_expr
            Smat_pc = Smat

        # Get nullspace for the condensed operator (if any).
        # This is provided as a user-specified callback which
        # returns the basis for the nullspace.
        nullspace = self.cxt.appctx.get("condensed_field_nullspace", None)
        if nullspace is not None:
            nsp = nullspace(Vc)
            Smat.setNullSpace(nsp.nullspace(comm=pc.comm))

        # Create a SNESContext for the DM associated with the trace problem
        self._ctx_ref = self.new_snes_ctx(pc,
                                          S_expr,
                                          bcs,
                                          mat_type,
                                          self.cxt.fc_params,
                                          options_prefix=prefix)

        # Push new context onto the dm associated with the condensed problem
        c_dm = Vc.dm

        # Set up ksp for the condensed problem
        c_ksp = PETSc.KSP().create(comm=pc.comm)
        c_ksp.incrementTabLevel(1, parent=pc)

        # Set the dm for the condensed solver
        c_ksp.setDM(c_dm)
        c_ksp.setDMActive(False)
        c_ksp.setOptionsPrefix(prefix)
        c_ksp.setOperators(A=Smat, P=Smat_pc)
        self.condensed_ksp = c_ksp

        with dmhooks.add_hooks(c_dm, self,
                               appctx=self._ctx_ref,
                               save=False):
            c_ksp.setFromOptions()

        # Set up local solvers for backwards substitution
        self.local_solvers = self.local_solver_calls(A_tensor,
                                                     self.residual,
                                                     self.solution,
                                                     elim_fields)

    def condensed_system(self, A, rhs, elim_fields):
        """Forms the condensed linear system by eliminating
        specified unknowns.

        :arg A: A Slate Tensor containing the mixed bilinear form.
        :arg rhs: A firedrake function for the right-hand side.
        :arg elim_fields: An iterable of field indices to eliminate.
        """

        from firedrake.slate.static_condensation.la_utils import condense_and_forward_eliminate

        return condense_and_forward_eliminate(A, rhs, elim_fields)

    def local_solver_calls(self, A, rhs, x, elim_fields):
        """Provides solver callbacks for inverting local operators
        and reconstructing eliminated fields.

        :arg A: A Slate Tensor containing the mixed bilinear form.
        :arg rhs: A firedrake function for the right-hand side.
        :arg x: A firedrake function for the solution.
        :arg elim_fields: An iterable of eliminated field indices
                          to recover.
        """

        from firedrake.slate.static_condensation.la_utils import backward_solve
        from firedrake.assemble import create_assembly_callable

        fields = x.split()
        systems = backward_solve(A, rhs, x, reconstruct_fields=elim_fields)

        local_solvers = []
        for local_system in systems:
            Ae = local_system.lhs
            be = local_system.rhs
            i, = local_system.field_idx
            local_solve = Ae.solve(be, decomposition="PartialPivLU")
            solve_call = create_assembly_callable(
                local_solve,
                tensor=fields[i],
                form_compiler_parameters=self.cxt.fc_params)
            local_solvers.append(solve_call)

        return local_solvers

    @timed_function("SCPCUpdate")
    def update(self, pc):
        """Update by assembling into the KSP operator. No
        need to reconstruct symbolic objects.
        """

        self._assemble_S()

        # Only reassemble if a preconditioning operator
        # is provided for the condensed system
        if hasattr(self, "S_pc"):
            self._assemble_S_pc()

    def forward_elimination(self, pc, x):
        """Perform the forward elimination of fields and
        provide the reduced right-hand side for the condensed
        system.

        :arg pc: a Preconditioner instance.
        :arg x: a PETSc vector containing the incoming right-hand side.
        """

        with self.residual.dat.vec_wo as v:
            x.copy(v)

        # Now assemble residual for the reduced problem
        self._assemble_Srhs()

    def sc_solve(self, pc):
        """Solve the condensed linear system for the
        condensed field.

        :arg pc: a Preconditioner instance.
        """

        dm = self.condensed_ksp.getDM()

        with dmhooks.add_hooks(dm, self, appctx=self._ctx_ref):

            with self.condensed_rhs.dat.vec_ro as rhs:
                if self.condensed_ksp.getInitialGuessNonzero():
                    acc = self.solution.split()[self.c_field].dat.vec
                else:
                    acc = self.solution.split()[self.c_field].dat.vec_wo
                with acc as sol:
                    self.condensed_ksp.solve(rhs, sol)

    def backward_substitution(self, pc, y):
        """Perform the backwards recovery of eliminated fields.

        :arg pc: a Preconditioner instance.
        :arg y: a PETSc vector for placing the resulting fields.
        """

        # Recover eliminated unknowns
        for local_solver_call in self.local_solvers:
            local_solver_call()

        with self.solution.dat.vec_ro as w:
            w.copy(y)

    def view(self, pc, viewer=None):
        """Viewer calls for the various configurable objects in this PC."""

        super(SCPC, self).view(pc, viewer)
        if hasattr(self, "condensed_ksp"):
            viewer.printfASCII("Solving linear system using static condensation.\n")
            self.condensed_ksp.view(viewer=viewer)
            viewer.printfASCII("Locally reconstructing unknowns.\n")