Ejemplo n.º 1
0
    def multTranspose(self, mat, x, y):
        sizes = self.mat.getSizes()
        for i in range(self.dimension):
            start = i
            stride = self.dimension

            xa = x.array_r[start::stride]
            ya = y.array_r[start::stride]
            xi = PETSc.Vec().createWithArray(xa, size=sizes[0], comm=x.comm)
            yi = PETSc.Vec().createWithArray(ya, size=sizes[1], comm=y.comm)
            self.mat.multTranspose(xi, yi)
            y.array[start::stride] = yi.array_r
Ejemplo n.º 2
0
    def gather(self, global_indices=None):
        """Gather a :class:`Vector` to all processes

        :arg global_indices: the globally numbered indices to gather
                            (should be the same on all processes).  If
                            `None`, gather the entire :class:`Vector`."""
        if global_indices is None:
            N = self.size()
            v = PETSc.Vec().createSeq(N, comm=PETSc.COMM_SELF)
            is_ = PETSc.IS().createStride(N, 0, 1, comm=PETSc.COMM_SELF)
        else:
            global_indices = np.asarray(global_indices, dtype=np.int32)
            N = len(global_indices)
            v = PETSc.Vec().createSeq(N, comm=PETSc.COMM_SELF)
            is_ = PETSc.IS().createGeneral(global_indices, comm=PETSc.COMM_SELF)

        with self.dat.vec_ro as vec:
            vscat = PETSc.Scatter().create(vec, is_, v, None)
            vscat.scatterBegin(vec, v, addv=PETSc.InsertMode.INSERT_VALUES)
            vscat.scatterEnd(vec, v, addv=PETSc.InsertMode.INSERT_VALUES)
        return v.array
Ejemplo n.º 3
0
    def __init__(self, function_space, coords):
        "create and assemble interpolation matrix"

        if not isinstance(function_space, WithGeometry):
            raise TypeError(
                "bad input type for function_space: must be a FunctionSpace")

        self.coords = np.copy(coords)
        self.function_space = function_space
        self.comm = function_space.comm

        self.n_data = coords.shape[0]
        assert (coords.shape[1] == self.function_space.mesh().cell_dimension()
                ), "shape of coordinates does not match mesh dimension"

        # allocate working vectors to handle parallel matrix operations and data transfer

        # dataspace_vector is a distributed PETSc vector in the data space

        self.dataspace_distrib = PETSc.Vec().create(comm=self.comm)
        self.dataspace_distrib.setSizes((-1, self.n_data))
        self.dataspace_distrib.setFromOptions()

        self.n_data_local = self.dataspace_distrib.getSizes()[0]

        # all data computations are done on root process, so create gather method to
        # facilitate this data transfer

        self.petsc_scatter, self.dataspace_gathered = PETSc.Scatter.toZero(
            self.dataspace_distrib)

        self.meshspace_vector = Function(self.function_space).vector()

        self.n_mesh_local = self.meshspace_vector.local_size()
        self.n_mesh = self.meshspace_vector.size()

        nnz = len(self.function_space.cell_node_list[0])

        self.interp = PETSc.Mat().create(comm=self.comm)
        self.interp.setSizes(
            ((self.n_mesh_local, -1), (self.n_data_local, -1)))
        self.interp.setPreallocationNNZ(nnz)
        self.interp.setFromOptions()
        self.interp.setUp()

        self.is_assembled = False
Ejemplo n.º 4
0
    def __init__(self,
                 function_space,
                 sigma,
                 l,
                 cutoff=1.e-3,
                 regularization=1.e-8,
                 cov=sqexp):
        """
        Create new forcing covariance

        Creates a new ForcingCovariance object from a function space, parameters, and
        covariance function. Required parameters are the function space and sigma and
        correlation length parameters needed to compute the covariance matrix.

        Note that this just initializes the object, and does not compute the matrix
        entries or assemble the final PETSc matrix. This is done using the ``assemble``
        method, though if you attempt to use an unassembled matrix assembly will
        automatically be done. However the domain decomposition is done here to determine
        the number of DOFs handled by each process.
        """

        # need to investigate parallelization here, load balancing likely to be uneven
        # if we just use the local ownership from the distributed matrix
        # since each row has an uneven amount of work
        # know that we have reduced bandwidth (though unclear if this translates to a low
        # bandwidth of the assembled covariance matrix)

        if not isinstance(function_space, WithGeometry):
            raise TypeError(
                "bad input type for function_space: must be a FunctionSpace")

        self.function_space = function_space

        self.comm = function_space.comm

        # extract mesh and process local information

        self.nx = Function(self.function_space).vector().size()
        self.nx_local = Function(self.function_space).vector().local_size()

        # set parameters and covariance

        assert regularization >= 0., "regularization parameter must be non-negative"

        self.sigma = sigma
        self.l = l
        self.cutoff = cutoff
        self.regularization = regularization
        self.cov = cov

        # get local ownership information of distributed matrix

        vtemp = PETSc.Vec().create(comm=self.comm)
        vtemp.setSizes((self.nx_local, -1))
        vtemp.setFromOptions()
        vtemp.setUp()

        self.local_startind, self.local_endind = vtemp.getOwnershipRange()

        vtemp.destroy()

        self.is_assembled = False

        self.G = None
Ejemplo n.º 5
0
def interp_covariance_to_data(im_left, G, ls, im_right, ensemble_comm=COMM_SELF):
    """
    Solve for the interpolated covariance matrix

    Solve for the covariance matrix interpolated to the sensor data locations.
    Note that the arguments allow for two different interpolation matrices
    to be used, in the event that we wish to compute the covariance matrix for
    other locations to make predictions. Note that since the Covariance Matrix
    is symmetric, it is advantageous to put the matrix with fewer spatial
    locations on the right as it will lead to fewer FEM solves (simply
    take the transpose if the reverse order is desired)

    This function solves :math:`\Phi_l^T A^{-1}GA^{-1}\Phi_r`, returning
    it as a 2D numpy array on the root process. This requires doing two FEM
    solves for each location provided in :math:`\Phi_r` plus some sparse
    matrix multiplication. Non-root processes will return an empty 2D
    numpy array (shape ``(0,0)``).

    The solves can be done independently, so optionally a Firedrake
    Ensemble communicator can be provided for dividing up the solves.
    The solves are simply divided up among the processes, so it is up
    to the user to determine an appropriate number of processes given the
    number of sensor locations that are needed. If not provided, the
    solves will be done serially.

    :param im_left: Left side InterpolationMatrix
    :type im_left: InterpolationMatrix
    :param G: Forcing covariance matrix to be used in the solve.
    :type G: ForcingCovariance
    :param ls: Firedrake LinearSolver to be used for the FEM solution
    :type ls: Firedrake LinearSolver
    :param im_right: Right side Interpolation Matrix
    :type im_right: InterpolationMatrix
    :param ensemble_comm: MPI Communicator over which to parallelize the
                          FEM solves (optional, default is solve in series)
    :type ensemble_comm: MPI Communicator
    :returns: Covariance matrix interpolated to sensor locations. If run
              in parallel, this is returned on the root process as a 2D
              numpy array, while all other processes return an empty
              array (shape ``(0,0)``)
    :rtype: ndarray
    """

    if not isinstance(im_left, InterpolationMatrix):
        raise TypeError("first argument to interp_covariance_to_data must be an InterpolationMatrix")
    if not isinstance(ls, LinearSolver):
        raise TypeError("ls must be a firedrake LinearSolver")
    if not isinstance(G, ForcingCovariance):
        raise TypeError("G must be a ForcingCovariance class")
    if not isinstance(im_right, InterpolationMatrix):
        raise TypeError("fourth argument to interp_covariance_to_data must be an InterpolationMatrix")
    if not isinstance(ensemble_comm, type(COMM_SELF)):
        raise TypeError("ensemble_comm must be an MPI communicator created from a firedrake Ensemble")

    # use ensemble comm to split up solves across ensemble processes

    v_tmp = PETSc.Vec().create(comm=ensemble_comm)
    v_tmp.setSizes((-1, im_right.n_data))
    v_tmp.setFromOptions()

    imin, imax = v_tmp.getOwnershipRange()

    v_tmp.destroy()

    # create array for holding results
    # if root on base comm, will have data at the end of the solve/interpolation
    # otherwise, size will be zero

    if im_left.comm.rank == 0:
        n_local = im_left.n_data
    else:
        n_local = 0

    # additional index is for the column vectors that this process owns in the
    # ensemble, which has length imax - imin

    result_tmparray = np.zeros((imax - imin, n_local))

    for i in range(imin, imax):
        rhs = im_right.get_meshspace_column_vector(i)
        tmp = solve_forcing_covariance(G, ls, rhs)
        result_tmparray[i - imin] = im_left.interp_mesh_to_data(tmp)

    # create distributed vector for gathering results at root

    cov_distrib = PETSc.Vec().create(comm=ensemble_comm)
    cov_distrib.setSizes((n_local*(imax - imin), -1))
    cov_distrib.setFromOptions()

    cov_distrib.array = result_tmparray.flatten()

    scatterfunc, cov_gathered = PETSc.Scatter.toZero(cov_distrib)

    scatterfunc.scatter(cov_distrib, cov_gathered,
                        mode=PETSc.ScatterMode.SCATTER_FORWARD)

    out_array = np.copy(cov_gathered.array)
    cov_distrib.destroy()
    cov_gathered.destroy()

    # reshape output -- if I am root on both the main comm and ensemble comm then
    # I have the whole array. Other processes have nothing

    if im_left.comm.rank == 0 and ensemble_comm.rank == 0:
        outsize = (im_left.n_data, im_right.n_data)
    else:
        outsize = (0,0)

    return np.reshape(out_array, outsize)