Esempio n. 1
0
 def apply(self, U, ind=None, mu=None):
     assert isinstance(U, DuneVectorArray)
     assert self.check_parameter(mu)
     vectors = U._list if ind is None else [U._list[i] for i in ind]
     if len(vectors) == 0:
         return NumpyVectorArray.empty(dim=1)
     else:
         return NumpyVectorArray([[self.dune_vec.dot(v._vector)] for v in vectors])
Esempio n. 2
0
def gram_schmidt_basis_extension(basis, U, U_ind=None, product=None, copy_basis=True, copy_U=True):
    '''Extend basis using Gram-Schmidt orthonormalization.

    Parameters
    ----------
    basis
        The basis to extend.
    U
        The new basis vectors.
    U_ind
        Indices of the new basis vectors in U.
    product
        The scalar product w.r.t. which to orthonormalize; if None, the l2-scalar
        product on the coefficient vector is used.
    copy_basis
        If copy_basis is False, the old basis is extended in-place.
    copy_U
        If copy_U is False, the new basis vectors are removed from U.

    Returns
    -------
    The new basis.

    Raises
    ------
    ExtensionError
        Gram-Schmidt orthonormalization fails. Usually this is the case when U
        is not linearily independent from the basis. However this can also happen
        due to rounding errors ...
    '''
    if basis is None:
        basis = NumpyVectorArray(np.zeros((0, U.dim)))

    basis_length = len(basis)

    new_basis = basis.copy() if copy_basis else basis
    new_basis.append(U, o_ind=U_ind, remove_from_other=(not copy_U))
    gram_schmidt(new_basis, offset=len(basis), product=product)

    if len(new_basis) <= basis_length:
        raise ExtensionError

    return new_basis
Esempio n. 3
0
def trivial_basis_extension(basis, U, U_ind=None, copy_basis=True, copy_U=True):
    '''Trivially extend basis by just adding the new vector.

    We check that the new vector is not already contained in the basis, but we do
    not check for linear independence.

    Parameters
    ----------
    basis
        The basis to extend.
    U
        The new basis vector.
    U_ind
        Indices of the new basis vectors in U.
    copy_basis
        If copy_basis is False, the old basis is extended in-place.
    copy_U
        If copy_U is False, the new basis vectors are removed from U.

    Returns
    -------
    The new basis.

    Raises
    ------
    ExtensionError
        Is raised if U is already contained in basis.
    '''
    if basis is None:
        basis = NumpyVectorArray(np.zeros((0, U.dim)))

    if np.any(U.almost_equal(basis, ind=U_ind)):
        raise ExtensionError

    new_basis = basis.copy() if copy_basis else basis
    new_basis.append(U, o_ind=U_ind, remove_from_other=(not copy_U))

    return new_basis
Esempio n. 4
0
def reduce_generic_rb(discretization, RB, product=None, disable_caching=True):
    '''Generic reduced basis reductor.

    Reduces a discretization by applying `operators.project_operator` to
    each of its `operators`.

    Parameters
    ----------
    discretization
        The discretization which is to be reduced.
    RB
        The reduced basis (i.e. an array of vectors) on which to project.
    product
        Scalar product for the projection. (See
        `operators.constructions.ProjectedOperator`)
    disable_caching
        If `True`, caching of the solutions of the reduced discretization
        is disabled.

    Returns
    -------
    rd
        The reduced discretization.
    rc
        The reconstructor providing a `reconstruct(U)` method which reconstructs
        high-dimensional solutions from solutions U of the reduced discretization.
    '''

    if RB is None:
        RB = NumpyVectorArray.empty(max(op.dim_source for op in discretization.operators.itervalues()))

    projected_operators = {k: rb_project_operator(op, RB, product=product)
                           for k, op in discretization.operators.iteritems()}
    rd = discretization.with_projected_operators(projected_operators)

    if disable_caching and isinstance(rd, Cachable):
        Cachable.__init__(rd, config=NO_CACHE_CONFIG)
    rd.name += '_reduced'
    rd.disable_logging = True
    rc = GenericRBReconstructor(RB)
    return rd, rc
Esempio n. 5
0
def test_numpy_sparse_solvers(numpy_sparse_solver):
    op = NumpyMatrixOperator(diags([np.arange(1., 11.)], [0]))
    rhs = NumpyVectorArray(np.ones(10))
    solution = op.apply_inverse(rhs, options=numpy_sparse_solver)
    assert ((op.apply(solution) - rhs).l2_norm() / rhs.l2_norm())[0] < 1e-8
Esempio n. 6
0
def test_numpy_dense_solvers(numpy_dense_solver):
    op = NumpyMatrixOperator(np.eye(10) * np.arange(1, 11))
    rhs = NumpyVectorArray(np.ones(10))
    solution = op.apply_inverse(rhs, options=numpy_dense_solver)
    assert ((op.apply(solution) - rhs).l2_norm() / rhs.l2_norm())[0] < 1e-8
Esempio n. 7
0
def test_generic_solvers(generic_solver):
    op = GenericOperator()
    rhs = NumpyVectorArray(np.ones(10))
    solution = op.apply_inverse(rhs, options=generic_solver)
    assert ((op.apply(solution) - rhs).l2_norm() / rhs.l2_norm())[0] < 1e-8
Esempio n. 8
0
 def initial_projection(U, mu):
     I = p.initial_data.evaluate(grid.quadrature_points(0, order=2), mu).squeeze()
     I = np.sum(I * grid.reference_element.quadrature(order=2)[1], axis=1) * (1. / grid.reference_element.volume)
     I = NumpyVectorArray(I, copy=False)
     return I.lincomb(U).data