Пример #1
0
def reduce_stationary_coercive(discretization, RB, error_product=None, coercivity_estimator=None,
                               disable_caching=True, extends=None):
    """Reductor for |StationaryDiscretizations| with coercive operator.

    This reductor uses :meth:`~pymor.reductors.basic.reduce_generic_rb` for the actual
    RB-projection. The only addition is an error estimator. The estimator evaluates the
    dual norm of the residual with respect to a given inner product.

    Parameters
    ----------
    discretization
        The |Discretization| which is to be reduced.
    RB
        |VectorArray| containing the reduced basis on which to project.
    error_product
        Scalar product given as an |Operator| used to calculate Riesz
        representative of the residual. If `None`, the Euclidean product is used.
    coercivity_estimator
        `None` or a |Parameterfunctional| returning a lower bound for the coercivity
        constant of the given problem.
    disable_caching
        If `True`, caching of solutions is disabled for the reduced |Discretization|.
    extends
        Set by :meth:`~pymor.algorithms.greedy.greedy` to the result of the
        last reduction in case the basis extension was `hierarchic`. Used to prevent
        re-computation of Riesz representatives already obtained from previous
        reductions.

    Returns
    -------
    rd
        The reduced |Discretization|.
    rc
        The reconstructor providing a `reconstruct(U)` method which reconstructs
        high-dimensional solutions from solutions `U` of the reduced |Discretization|.
    reduction_data
        Additional data produced by the reduction process. In this case the computed
        Riesz representatives. (Compare the `extends` parameter.)
    """

    assert extends is None or len(extends) == 3

    old_residual_data = extends[2].pop('residual') if extends else None

    rd, rc, data = reduce_generic_rb(discretization, RB, disable_caching=disable_caching, extends=extends)

    residual, residual_reconstructor, residual_data = reduce_residual(discretization.operator, discretization.rhs, RB,
                                                                      product=error_product, extends=old_residual_data)

    estimator = StationaryCoerciveEstimator(residual, residual_data.get('residual_range_dims', None),
                                            coercivity_estimator)

    rd = rd.with_(estimator=estimator)

    data.update(residual=(residual, residual_reconstructor, residual_data))

    return rd, rc, data
def reduce_with_estimator(discretization,
                          RB,
                          operator_product=None,
                          vector_product=None,
                          disable_caching=True,
                          extends=None,
                          reduced_estimator=None):
    assert operator_product is None
    rd, _, reduction_data = reduce_generic_rb(discretization, RB,
                                              vector_product, disable_caching,
                                              extends)
    rc = GenericBlockRBReconstructor(RB)
    reduced_estimator.extension_step += 1
    reduced_estimator.rc = rc
    rd = rd.with_(estimator=reduced_estimator)
    return rd, rc, reduction_data
def reduce_with_estimator(discretization,
                          RB,
                          operator_product=None,
                          vector_product=None,
                          disable_caching=True,
                          extends=None,
                          reduced_estimator=None):
    assert operator_product is None
    rd, _, reduction_data = reduce_generic_rb(discretization,
                                              RB,
                                              vector_product,
                                              disable_caching,
                                              extends)
    rc = GenericBlockRBReconstructor(RB)
    reduced_estimator.extension_step += 1
    reduced_estimator.rc = rc
    rd = rd.with_(estimator=reduced_estimator)
    return rd, rc, reduction_data
Пример #4
0
def ei_rb_greedy(discretization, operator_names, samples, error_norm=None, target_error=None,
                 product=None, max_extensions=None, rb_initial_data=None, ei_initial_data=None,
                 use_estimator=True, extension_algorithm=trivial_basis_extension, cache_region='memory'):
    '''PODEI Greedy extension algorithm.

    Parameters
    ----------
    discretization
        The discretization to reduce.
    reductor
        Reductor for reducing the given discretization. This has to be a
        function of the form `reduce(discretization, data)` where data is
        the detailed data required by the reductor. If your reductor takes
        more arguments, use functools.partial.
    samples
        The set of parameter samples on which to perform the greedy search.
        Currently this set is fixed for the whole process.
    initial_data
        This is fed into reductor.reduce() for the initial projection.
        Typically this will be the reduced basis with which the algorithm
        starts.
    use_estimator
        If True, use reduced_discretization.estimate() to estimate the errors
        on the sample set. Otherwise a detailed simulation is used to calculate
        the error.
    error_norm
        If use_estimator == Flase, use this function to calculate the norm of
        the error. [Default l2_norm]
    extension_algorithm
        The extension algorithm to use to extend the current reduced basis with
        the maximum error snapshot.
    target_error
        If not None, stop the search if the maximum error on the sample set
        drops below this value.
    max_extensions
        If not None, stop algorithm after `max_extensions` extension steps.

    Returns
    -------
    Dict with the following fields:
        'data'
            The reduced basis. (More generally the data which needs to be
            fed into reduced_discretization.reduce().
        'reduced_discretization'
            The last reduced discretization which has been computed.
        'reconstructor'
            Reconstructor for `reduced_discretization`.
        'max_err'
            Last estimated maximum error on the sample set.
        'max_err_mu'
            The parameter that corresponds to `max_err`.
        'max_errs'
            Sequence of maximum errors during the greedy run.
        'max_errs_mu'
            The parameters corresponding to `max_err`.
    '''

    def interpolate(U, ind=None):
        coefficients = solve_triangular(interpolation_matrix, U.components(interpolation_dofs, ind=ind).T,
                                        lower=True, unit_diagonal=True).T
        # coefficients = np.linalg.solve(interpolation_matrix, U.components(interpolation_dofs, ind=ind).T).T
        return collateral_basis.lincomb(coefficients)

    tic = time.time()

    logger = getLogger('pymor.algorithms.ei_greedy.ei_greedy')
    samples = tuple(samples)
    logger.info('Started PODEI greedy search on {} samples'.format(len(samples)))
    data = rb_initial_data

    #ei init
    operators = [discretization.operators[operator_name] for operator_name in operator_names]
    evaluations = EvaluationProvider(discretization, operators, samples, cache_region=cache_region)

    assert isinstance(evaluations, VectorArrayInterface) or all(isinstance(ev, VectorArrayInterface) for ev in evaluations)
    if isinstance(evaluations, VectorArrayInterface):
        evaluations = (evaluations,)

    if ei_initial_data is None:
        interpolation_dofs = np.zeros((0,), dtype=np.int32)
        interpolation_matrix = np.zeros((0,0))
        collateral_basis = type(next(iter(evaluations))).empty(dim=next(iter(evaluations)).dim)
    else:
        interpolation_dofs = ei_initial_data['dofs']
        collateral_basis = ei_initial_data['basis']
        interpolation_matrix = collateral_basis.components(interpolation_dofs).T

    extensions = 0
    discard_count = 0
    crb_discard = False
    error_already_calculated = False
    Ns = [len(data)]
    Ms = [len(interpolation_dofs)]
    max_errs = []
    max_err_mus = []

    while True:
        logger.info('Reducing ...')
        
        if len(interpolation_dofs) > 0:
            rd, rc, _ = reduce_ei_rb(discretization, operator_names, data={'RB': data, 'dofs': interpolation_dofs, 'CB': collateral_basis})
        else:
            rd, rc = reduce_generic_rb(discretization, data)

        logger.info('Estimating errors ...')
        if use_estimator:
            logger.info('Error Estimator usage not yet implemented')
            break
        elif not error_already_calculated:
            max_err = -1
            for mu in samples:
                errs = error_norm(discretization.solve(mu) - rc.reconstruct(rd.solve(mu)))
                cur_max_err = np.max(errs)
                if cur_max_err > max_err:
                    max_err = cur_max_err
                    max_err_mu = mu
                    max_err_t = np.argmax(errs)      
            
        max_errs.append(max_err)
        max_err_mus.append(max_err_mu)
        logger.info('Maximum error after {} extensions: {} (mu = {}, timestep t_k: k={})'.format(extensions, max_err, max_err_mu, max_err_t))
        
        if target_error is not None and max_err <= target_error:
            logger.info('Target error reached! Stopping extension loop.')
            logger.info('Reached maximal error on snapshots of {} <= {}'.format(max_err, target_error))
            break

        # compute new interpolation dof and collateral basis vector
        ev = evaluations.data(samples.index(max_err_mu))
        if len(interpolation_dofs) > 0:
            ev_interpolated = interpolate(ev)
            ERR = ev - ev_interpolated
        else:
            ERR = ev
        new_vec = ERR.copy(ind=np.argmax(error_norm(ERR)))
     
        if new_vec.amax()[1] > 1.0e-2:      
            new_dof = new_vec.amax()[0][0]
            if new_dof in interpolation_dofs:
                logger.info('DOF {} selected twice for interplation! Stopping extension loop.'.format(new_dof))
                break
            new_vec *= 1 / new_vec.components([new_dof])[0, 0]
            interpolation_dofs = np.hstack((interpolation_dofs, new_dof))
            collateral_basis.append(new_vec, remove_from_other=True)
            interpolation_matrix = collateral_basis.components(interpolation_dofs).T
            crb_discard = False
        else:
            logger.warn('Maximum DOF is {}, skipping collateral basis extension ...'.format(new_vec.amax()[1]))
            crb_discard = True

        triangularity_error = np.max(np.abs(interpolation_matrix - np.tril(interpolation_matrix)))
        logger.info('Interpolation matrix is not lower triangular with maximum error of {}'
                    .format(triangularity_error))
        
        logger.info('Extending with snapshot for mu = {}'.format(max_err_mu))
        U = discretization.solve(max_err_mu)
        try:
            data, extension_data = extension_algorithm(data, U)
        except ExtensionError:
            logger.info('Extension failed. Stopping now.')
            break
        if not 'hierarchic' in extension_data:
            logger.warn('Extension algorithm does not report if extension was hierarchic. Assuming it was\'nt ..')
        
        rd, rc, _ = reduce_ei_rb(discretization, operator_names, data={'RB': data, 'dofs': interpolation_dofs, 'CB': collateral_basis})
        if use_estimator:
            logger.info('Error Estimator usage not yet implemented')
            break
        else:
            max_err = -1
            for mu in samples:
                errs = error_norm(discretization.solve(mu) - rc.reconstruct(rd.solve(mu)))
                cur_max_err = np.max(errs)
                if cur_max_err > max_err:
                    max_err = cur_max_err
                    max_err_mu = mu
                    max_err_t = np.argmax(errs)
        if max_errs[len(max_errs)-1] <= max_err and not crb_discard:
            logger.info('Error Increases. Discard last RB extension')
            data.remove(ind=[len(data)-1])
            discard_count += 1
            error_already_calculated = False
        else:
            extensions += 1
            Ns.append(len(data))
            Ms.append(len(interpolation_dofs))
            discard_count = 0
            error_already_calculated = True
                    
        #Version 2
#         if max_errs[len(max_errs)-1] <= max_err:
#             if discard_count < 1:
#                 logger.info('Error Increases. Discard last RB extension')
#                 data.remove(ind=[len(data)-1])
#                 discard_count = 1
#                 error_already_calculated = False
#             else:
#                 logger.info('Error still increases. Throwing away CB but keeping RB.')
#                 extensions += 1
#                 collateral_basis.remove([len(interpolation_dofs)-1])
#                 interpolation_dofs = interpolation_dofs[:-1]
#                 interpolation_matrix = interpolation_matrix[:-1,:-1]
#                 Ns.append(len(data))
#                 Ms.append(len(interpolation_dofs))
#                 discard_count = 0
#                 error_already_calculated = False
#         else:
#             extensions += 1
#             Ns.append(len(data))
#             Ms.append(len(interpolation_dofs))
#             discard_count = 0
#             error_already_calculated = True
            
        #Version 1
#         if max_errs[len(max_errs)-1] <= max_err:
#             if discard_count < 1:
#                 logger.info('Error Increases. Discard last RB extension')
#                 data.remove(ind=[len(data)-1])
#                 discard_count = 1
#                 error_already_calculated = False
#             else:
#                 logger.info('Error still increases. Keeping RB.')
#                 extensions += 1
#                 Ns.append(len(data))
#                 Ms.append(len(interpolation_dofs))
#                 discard_count = 0
#                 error_already_calculated = True
#         else:
#             extensions += 1
#             Ns.append(len(data))
#             Ms.append(len(interpolation_dofs))
#             discard_count = 0
#             error_already_calculated = True

        logger.info('N={}, M={}'.format(len(data), len(interpolation_dofs)))
        logger.info('')
        
        if max_extensions is not None and extensions >= max_extensions:
            logger.info('Maximal number of {} extensions reached.'.format(max_extensions))
            break
    
    logger.info('Reducing once more ...')
    rd, rc, ei_discretization = reduce_ei_rb(discretization, operator_names, data={'RB':data, 'dofs': interpolation_dofs, 'CB': collateral_basis})

    ei_data = ({'dofs': interpolation_dofs, 'basis': collateral_basis})
    N_M_correlation = [Ns,Ms]

    tictoc = time.time() - tic
    logger.info('PODEI Greedy search took {} seconds'.format(tictoc))
    return {'ei_discretization': ei_discretization, 'ei_data': ei_data, 
            'data': data, 'reduced_discretization': rd, 'reconstructor': rc, 'max_err': max_err,
            'max_err_mu': max_err_mu, 'max_errs': max_errs, 'max_err_mus': max_err_mus, 'extensions': extensions, 'N_M_correlation': N_M_correlation,
            'time': tictoc}
Пример #5
0
    return d


# discretize
d = discretize(50, 10000, 4)

# generate solution snapshots
snapshots = d.type_solution.empty(d.dim_solution)
for mu in d.parameter_space.sample_uniformly(2):
    snapshots.append(d.solve(mu))

# apply POD
reduced_basis = pod(snapshots, 4)[0]

# reduce the model
rd, rc, _ = reduce_generic_rb(d, reduced_basis)

# stochastic error estimation
mu_max = None
err_max = -1.
for mu in d.parameter_space.sample_randomly(10):
    U_RB = (rc.reconstruct(rd.solve(mu)))
    U = d.solve(mu)
    err = np.max((U_RB-U).l2_norm())
    if err > err_max:
        err_max = err
        mu_max = mu

# visualize maximum error solution
U_RB = (rc.reconstruct(rd.solve(mu_max)))
U = d.solve(mu_max)
Пример #6
0
 def reductor(discretization, rb, extends=None):
     return reduce_generic_rb(ei_discretization, rb, extends=extends)
Пример #7
0
def reduce_stationary_affine_linear(discretization,
                                    RB,
                                    error_product=None,
                                    coercivity_estimator=None,
                                    disable_caching=True,
                                    extends=None):
    """Reductor for linear |StationaryDiscretizations| with affinely decomposed operator and rhs.

    .. note::
       The reductor :func:`~pymor.reductors.stationary.reduce_stationary_coercive` can be used
       for arbitrary coercive |StationaryDiscretizations| and offers an improved error
       estimator with better numerical stability.

    This reductor uses :meth:`~pymor.reductors.basic.reduce_generic_rb` for the actual
    RB-projection. The only addition is an error estimator. The estimator evaluates the
    norm of the residual with respect to a given inner product.

    Parameters
    ----------
    discretization
        The |Discretization| which is to be reduced.
    RB
        |VectorArray| containing the reduced basis on which to project.
    error_product
        Scalar product |Operator| used to calculate Riesz representative of the
        residual. If `None`, the Euclidean product is used.
    coercivity_estimator
        `None` or a |Parameterfunctional| returning a lower bound for the coercivity
        constant of the given problem. Note that the computed error estimate is only
        guaranteed to be an upper bound for the error when an appropriate coercivity
        estimate is specified.
    disable_caching
        If `True`, caching of solutions is disabled for the reduced |Discretization|.
    extends
        Set by :meth:`~pymor.algorithms.greedy.greedy` to the result of the
        last reduction in case the basis extension was `hierarchic`. Used to prevent
        re-computation of Riesz representatives already obtained from previous
        reductions.

    Returns
    -------
    rd
        The reduced |Discretization|.
    rc
        The reconstructor providing a `reconstruct(U)` method which reconstructs
        high-dimensional solutions from solutions `U` of the reduced |Discretization|.
    reduction_data
        Additional data produced by the reduction process. In this case the computed
        Riesz representatives. (Compare the `extends` parameter.)
    """

    # assert isinstance(discretization, StationaryDiscretization)
    assert discretization.linear
    assert isinstance(discretization.operator, LincombOperator)
    assert all(not op.parametric for op in discretization.operator.operators)
    if discretization.rhs.parametric:
        assert isinstance(discretization.rhs, LincombOperator)
        assert all(not op.parametric for op in discretization.rhs.operators)
    assert extends is None or len(extends) == 3

    d = discretization
    rd, rc, data = reduce_generic_rb(d,
                                     RB,
                                     disable_caching=disable_caching,
                                     extends=extends)
    if extends:
        old_data = extends[2]
        old_RB_size = len(extends[1].RB)
    else:
        old_RB_size = 0

    # compute data for estimator
    space = d.operator.source

    # compute the Riesz representative of (U, .)_L2 with respect to error_product
    def riesz_representative(U):
        if error_product is None:
            return U.copy()
        else:
            return error_product.apply_inverse(U)

    def append_vector(U, R, RR):
        RR.append(riesz_representative(U), remove_from_other=True)
        R.append(U, remove_from_other=True)

    # compute all components of the residual
    if RB is None:
        RB = discretization.solution_space.empty()

    if extends:
        R_R, RR_R = old_data['R_R'], old_data['RR_R']
    elif not d.rhs.parametric:
        R_R = space.empty(reserve=1)
        RR_R = space.empty(reserve=1)
        append_vector(d.rhs.as_vector(), R_R, RR_R)
    else:
        R_R = space.empty(reserve=len(d.rhs.operators))
        RR_R = space.empty(reserve=len(d.rhs.operators))
        for op in d.rhs.operators:
            append_vector(op.as_vector(), R_R, RR_R)

    if len(RB) == 0:
        R_Os = [space.empty()]
        RR_Os = [space.empty()]
    elif not d.operator.parametric:
        R_Os = [space.empty(reserve=len(RB))]
        RR_Os = [space.empty(reserve=len(RB))]
        for i in xrange(len(RB)):
            append_vector(-d.operator.apply(RB, ind=i), R_Os[0], RR_Os[0])
    else:
        R_Os = [
            space.empty(reserve=len(RB))
            for _ in xrange(len(d.operator.operators))
        ]
        RR_Os = [
            space.empty(reserve=len(RB))
            for _ in xrange(len(d.operator.operators))
        ]
        if old_RB_size > 0:
            for op, R_O, RR_O, old_R_O, old_RR_O in izip(
                    d.operator.operators, R_Os, RR_Os, old_data['R_Os'],
                    old_data['RR_Os']):
                R_O.append(old_R_O)
                RR_O.append(old_RR_O)
        for op, R_O, RR_O in izip(d.operator.operators, R_Os, RR_Os):
            for i in xrange(old_RB_size, len(RB)):
                append_vector(-op.apply(RB, [i]), R_O, RR_O)

    # compute Gram matrix of the residuals
    R_RR = RR_R.dot(R_R, pairwise=False)
    R_RO = np.hstack([RR_R.dot(R_O, pairwise=False) for R_O in R_Os])
    R_OO = np.vstack([
        np.hstack([RR_O.dot(R_O, pairwise=False) for R_O in R_Os])
        for RR_O in RR_Os
    ])

    estimator_matrix = np.empty((len(R_RR) + len(R_OO), ) * 2)
    estimator_matrix[:len(R_RR), :len(R_RR)] = R_RR
    estimator_matrix[len(R_RR):, len(R_RR):] = R_OO
    estimator_matrix[:len(R_RR), len(R_RR):] = R_RO
    estimator_matrix[len(R_RR):, :len(R_RR)] = R_RO.T

    estimator_matrix = NumpyMatrixOperator(estimator_matrix)

    estimator = StationaryAffineLinearReducedEstimator(estimator_matrix,
                                                       coercivity_estimator)
    rd = rd.with_(estimator=estimator)
    data.update(R_R=R_R, RR_R=RR_R, R_Os=R_Os, RR_Os=RR_Os)

    return rd, rc, data
Пример #8
0
def reduce_stationary_coercive(discretization, RB, error_product=None, coercivity_estimator=None,
                               disable_caching=True, extends=None):
    """Reductor for |StationaryDiscretizations| with coercive operator.

    This reductor uses :meth:`~pymor.reductors.basic.reduce_generic_rb` for the actual
    RB-projection. The only addition is an error estimator. The estimator evaluates the
    dual norm of the residual with respect to a given inner product. We use
    :func:`~pymor.reductors.residual.reduce_residual` for improved numerical stability.
    (See "A. Buhr, C. Engwer, M. Ohlberger, S. Rave, A Numerically Stable A Posteriori
    Error Estimator for Reduced Basis Approximations of Elliptic Equations,
    Proceedings of the 11th World Congress on Computational Mechanics, 2014.")

    Parameters
    ----------
    discretization
        The |Discretization| which is to be reduced.
    RB
        |VectorArray| containing the reduced basis on which to project.
    error_product
        Scalar product |Operator| used to calculate Riesz representative of the
        residual. If `None`, the Euclidean product is used.
    coercivity_estimator
        `None` or a |Parameterfunctional| returning a lower bound for the coercivity
        constant of the given problem. Note that the computed error estimate is only
        guaranteed to be an upper bound for the error when an appropriate coercivity
        estimate is specified.
    disable_caching
        If `True`, caching of solutions is disabled for the reduced |Discretization|.
    extends
        Set by :meth:`~pymor.algorithms.greedy.greedy` to the result of the
        last reduction in case the basis extension was `hierarchic`. Used to prevent
        re-computation of residual range basis vectors already obtained from previous
        reductions.

    Returns
    -------
    rd
        The reduced |Discretization|.
    rc
        The reconstructor providing a `reconstruct(U)` method which reconstructs
        high-dimensional solutions from solutions `U` of the reduced |Discretization|.
    reduction_data
        Additional data produced by the reduction process. (Compare the `extends`
        parameter.)
    """

    assert extends is None or len(extends) == 3

    old_residual_data = extends[2].pop('residual') if extends else None

    rd, rc, data = reduce_generic_rb(discretization, RB, disable_caching=disable_caching, extends=extends)

    residual, residual_reconstructor, residual_data = reduce_residual(discretization.operator, discretization.rhs, RB,
                                                                      product=error_product, extends=old_residual_data)

    estimator = StationaryCoerciveEstimator(residual, residual_data.get('residual_range_dims', None),
                                            coercivity_estimator)

    rd = rd.with_(estimator=estimator)

    data.update(residual=(residual, residual_reconstructor, residual_data))

    return rd, rc, data
Пример #9
0
def reduce_parabolic(discretization, RB, product=None, coercivity_estimator=None,
                     disable_caching=True, extends=None):
    r"""Reductor for parabolic equations.

    This reductor uses :meth:`~pymor.reductors.basic.reduce_generic_rb` for the actual
    RB-projection. The only addition is the assembly of an error estimator which
    bounds the discrete l2-in time / energy-in space error similar to [GP05]_, [HO08]_
    as follows:

    .. math::
        \left[ C_a^{-1}(\mu)\|e_N(\mu)\|^2 + \sum_{n=1}^{N} dt\|e_n(\mu)\|^2_e \right]^{1/2}
            \leq \left[ C_a^{-1}(\mu)dt \sum_{n=1}^{N}\|\mathcal{R}^n(u_n(\mu), \mu)\|^2_{e,-1}
                        + C_a^{-1}(\mu)\|e_0\|^2 \right]^{1/2}

    Here, :math:`\|\cdot\|` denotes the norm induced by the problem's mass matrix
    (e.g. the L^2-norm) and :math:`\|\cdot\|_e` is an arbitrary energy norm w.r.t.
    which the space operator :math:`A(\mu)` is coercive, and :math:`C_a(\mu)` is a
    lower bound for its coercivity constant. Finally, :math:`\mathcal{R}^n` denotes
    the implicit Euler timestepping residual for the (fixed) time step size :math:`dt`,

    .. math::
        \mathcal{R}^n(u_n(\mu), \mu) :=
            f - M \frac{u_{n}(\mu) - u_{n-1}(\mu)}{dt} - A(u_n(\mu), \mu),

    where :math:`M` denotes the mass operator and :math:`f` the source term.
    The dual residual norm is evaluated using the numerically stable projection
    from [BEOR14]_.

    .. warning::
        The reduced basis `RB` is required to be orthonormal w.r.t. the given
        energy product. If not, the projection of the initial values will be
        computed incorrectly.

    .. [GP05]   M. A. Grepl, A. T. Patera, A Posteriori Error Bounds For Reduced-Basis
                Approximations Of Parametrized Parabolic Partial Differential Equations,
                M2AN 39(1), 157-181, 2005.
    .. [HO08]   B. Haasdonk, M. Ohlberger, Reduced basis method for finite volume
                approximations of parametrized evolution equations,
                M2AN 42(2), 277-302, 2008.
    .. [BEOR14] A. Buhr, C. Engwer, M. Ohlberger, S. Rave, A Numerically Stable A
                Posteriori Error Estimator for Reduced Basis Approximations of Elliptic
                Equations, Proceedings of the 11th World Congress on Computational
                Mechanics, 2014.

    Parameters
    ----------
    discretization
        The |InstationaryDiscretization| which is to be reduced.
    RB
        |VectorArray| containing the reduced basis on which to project.
    product
        The energy inner product |Operator| w.r.t. the reduction error is estimated.
        RB must be to be orthonomrmal w.r.t. this product!
    coercivity_estimator
        `None` or a |Parameterfunctional| returning a lower bound for the coercivity
        constant of `discretization.operator` w.r.t. `product`.
    disable_caching
        If `True`, caching of solutions is disabled for the reduced |Discretization|.
    extends
        See :meth:`~pymor.algorithms.greedy.greedy`.

    Returns
    -------
    rd
        The reduced |Discretization|.
    rc
        The reconstructor providing a `reconstruct(U)` method which reconstructs
        high-dimensional solutions from solutions `U` of the reduced |Discretization|.
    reduction_data
        Additional data produced by the reduction process. (See
        :meth:`~pymor.algorithms.greedy.greedy`.)
    """

    assert extends is None or len(extends) == 3
    assert isinstance(discretization.time_stepper, ImplicitEulerTimeStepper)

    logger = getLogger('pymor.reductors.parabolic.reduce_parabolic')

    old_residual_data = extends[2].pop('residual') if extends else None
    old_initial_resdidual_data = extends[2].pop('initial_residual') if extends else None

    with logger.block('RB projection ...'):
        rd, rc, data = reduce_generic_rb(discretization, RB, vector_product=product,
                                         disable_caching=disable_caching, extends=extends)

    dt = discretization.T / discretization.time_stepper.nt

    with logger.block('Assembling error estimator ...'):
        residual, residual_reconstructor, residual_data = reduce_implicit_euler_residual(
            discretization.operator, discretization.mass, dt, discretization.rhs,
            RB, product=product, extends=old_residual_data
        )

        initial_residual, initial_residual_reconstructor, initial_residual_data = reduce_residual(
            IdentityOperator(discretization.solution_space), discretization.initial_data, RB, False,
            product=discretization.l2_product, extends=old_initial_resdidual_data
        )

    estimator = ReduceParabolicEstimator(residual, residual_data.get('residual_range_dims', None),
                                         initial_residual, initial_residual_data.get('residual_range_dims', None),
                                         coercivity_estimator)

    rd = rd.with_(estimator=estimator)

    data.update(residual=(residual, residual_reconstructor, residual_data),
                initial_residual=(initial_residual, initial_residual_reconstructor, initial_residual_data))

    return rd, rc, data
Пример #10
0
def reductor(config,
             detailed_data,
             discretization,
             RB,
             vector_product=None,
             disable_caching=True,
             extends=None):
    elliptic_disc = detailed_data['elliptic_disc']
    elliptic_LRBMS_disc = detailed_data['elliptic_LRBMS_disc']
    T = config['end_time']
    nt = config['nt']
    if RB is None:
        RB = [
            elliptic_LRBMS_disc.local_operator(ss).source.empty()
            for ss in np.arange(elliptic_LRBMS_disc.num_subdomains)
        ]
    rd, rc, reduction_data = reduce_generic_rb(elliptic_LRBMS_disc, RB,
                                               vector_product, disable_caching,
                                               extends)
    rc = Reconstructor(elliptic_LRBMS_disc, RB)

    def unblock_op(op, sparse=False):
        assert op._blocks[0][0] is not None
        if isinstance(op._blocks[0][0], LincombOperator):
            coefficients = op._blocks[0][0].coefficients
            operators = [
                None for kk in np.arange(len(op._blocks[0][0].operators))
            ]
            for kk in np.arange(len(op._blocks[0][0].operators)):
                ops = [[
                    op._blocks[ii][jj].operators[kk]
                    if op._blocks[ii][jj] is not None else None
                    for jj in np.arange(op.num_source_blocks)
                ] for ii in np.arange(op.num_range_blocks)]
                operators[kk] = unblock_op(BlockOperator(ops))
            return LincombOperator(operators=operators,
                                   coefficients=coefficients)
        else:
            assert all(
                all([
                    isinstance(block, NumpyMatrixOperator
                               ) if block is not None else True
                    for block in row
                ]) for row in op._blocks)
            if op.source.dim == 0 and op.range.dim == 0:
                return NumpyMatrixOperator(np.zeros((0, 0)))
            elif op.source.dim == 1:
                mat = np.concatenate([
                    op._blocks[ii][0]._matrix
                    for ii in np.arange(op.num_range_blocks)
                ],
                                     axis=1)
            elif op.range.dim == 1:
                mat = np.concatenate([
                    op._blocks[0][jj]._matrix
                    for jj in np.arange(op.num_source_blocks)
                ],
                                     axis=1)
            else:
                mat = bmat([[
                    coo_matrix(op._blocks[ii][jj]._matrix)
                    if op._blocks[ii][jj] is not None else coo_matrix(
                        (op._range_dims[ii], op._source_dims[jj]))
                    for jj in np.arange(op.num_source_blocks)
                ] for ii in np.arange(op.num_range_blocks)])
                mat = mat.toarray()
            return NumpyMatrixOperator(mat)

    reduced_op = unblock_op(rd.operator, True)
    reduced_rhs = unblock_op(rd.rhs)
    estimator = ReducedAgainstWeak(
        rc, detailed_data['example'], detailed_data['wrapper'],
        detailed_data['bochner_norms']['elliptic_penalty'],
        detailed_data['space_products']['l2'], T, detailed_data['mu_min'],
        detailed_data['mu_max'], detailed_data['mu_hat'],
        detailed_data['mu_bar'], detailed_data['mu_tilde'])
    return (InstationaryDiscretization(
        T=T,
        initial_data=reduced_op.source.zeros(1),
        operator=reduced_op,
        rhs=unblock_op(rd.rhs),
        mass=unblock_op(rd.products['l2']),
        time_stepper=ImplicitEulerTimeStepper(nt),
        products={
            kk: unblock_op(rd.products[kk])
            for kk in rd.products.keys()
        },
        operators={
            kk: unblock_op(rd.operators[kk])
            for kk in rd.operators.keys() if kk != 'operator'
        },
        functionals={
            kk: unblock_op(rd.functionals[kk])
            for kk in rd.functionals.keys() if kk != 'rhs'
        },
        vector_operators={
            kk: unblock_op(rd.vector_operators[kk])
            for kk in rd.vector_operators.keys()
        },
        parameter_space=rd.parameter_space,
        estimator=estimator,
        cache_region='disk',
        name='reduced discretization ({} DoFs)'.format(reduced_op.source.dim)),
            rc, reduction_data)
Пример #11
0
    return d


# discretize
d = discretize(50, 10000, 4)

# generate solution snapshots
snapshots = d.solution_space.empty()
for mu in d.parameter_space.sample_uniformly(2):
    snapshots.append(d.solve(mu))

# apply POD
reduced_basis = pod(snapshots, 4)[0]

# reduce the model
rd, rc, _ = reduce_generic_rb(d, reduced_basis)

# stochastic error estimation
mu_max = None
err_max = -1.
for mu in d.parameter_space.sample_randomly(10):
    U_RB = (rc.reconstruct(rd.solve(mu)))
    U = d.solve(mu)
    err = np.max((U_RB - U).l2_norm())
    if err > err_max:
        err_max = err
        mu_max = mu

# visualize maximum error solution
U_RB = (rc.reconstruct(rd.solve(mu_max)))
U = d.solve(mu_max)
Пример #12
0
def reductor(config, detailed_data, discretization, RB, vector_product=None, disable_caching=True, extends=None):
    elliptic_disc = detailed_data['elliptic_disc']
    elliptic_LRBMS_disc = detailed_data['elliptic_LRBMS_disc']
    T = config['end_time']
    nt = config['nt']
    if RB is None:
        RB = [elliptic_LRBMS_disc.local_operator(ss).source.empty() for ss in np.arange(elliptic_LRBMS_disc.num_subdomains)]
    rd, rc, reduction_data = reduce_generic_rb(elliptic_LRBMS_disc, RB, vector_product, disable_caching, extends)
    rc = Reconstructor(elliptic_LRBMS_disc, RB)

    def unblock_op(op, sparse=False):
        assert op._blocks[0][0] is not None
        if isinstance(op._blocks[0][0], LincombOperator):
            coefficients = op._blocks[0][0].coefficients
            operators = [None for kk in np.arange(len(op._blocks[0][0].operators))]
            for kk in np.arange(len(op._blocks[0][0].operators)):
                ops = [[op._blocks[ii][jj].operators[kk]
                        if op._blocks[ii][jj] is not None else None
                        for jj in np.arange(op.num_source_blocks)]
                       for ii in np.arange(op.num_range_blocks)]
                operators[kk] = unblock_op(BlockOperator(ops))
            return LincombOperator(operators=operators, coefficients=coefficients)
        else:
            assert all(all([isinstance(block, NumpyMatrixOperator) if block is not None else True
                           for block in row])
                       for row in op._blocks)
            if op.source.dim == 0 and op.range.dim == 0:
                return NumpyMatrixOperator(np.zeros((0, 0)))
            elif op.source.dim == 1:
                mat = np.concatenate([op._blocks[ii][0]._matrix
                                      for ii in np.arange(op.num_range_blocks)],
                                     axis=1)
            elif op.range.dim == 1:
                mat = np.concatenate([op._blocks[0][jj]._matrix
                                      for jj in np.arange(op.num_source_blocks)],
                                     axis=1)
            else:
                mat = bmat([[coo_matrix(op._blocks[ii][jj]._matrix)
                             if op._blocks[ii][jj] is not None else coo_matrix((op._range_dims[ii], op._source_dims[jj]))
                             for jj in np.arange(op.num_source_blocks)]
                            for ii in np.arange(op.num_range_blocks)])
                mat = mat.toarray()
            return NumpyMatrixOperator(mat)

    reduced_op = unblock_op(rd.operator, True)
    reduced_rhs = unblock_op(rd.rhs)
    estimator = ReducedAgainstWeak(rc, detailed_data['example'], detailed_data['wrapper'],
                                   detailed_data['bochner_norms']['elliptic_penalty'], detailed_data['space_products']['l2'],
                                   T, detailed_data['mu_min'], detailed_data['mu_max'], detailed_data['mu_hat'],
                                   detailed_data['mu_bar'], detailed_data['mu_tilde'])
    return (InstationaryDiscretization(T=T,
                                       initial_data=reduced_op.source.zeros(1),
                                       operator=reduced_op,
                                       rhs=unblock_op(rd.rhs),
                                       mass=unblock_op(rd.products['l2']),
                                       time_stepper=ImplicitEulerTimeStepper(nt),
                                       products={kk: unblock_op(rd.products[kk]) for kk in rd.products.keys()},
                                       operators={kk: unblock_op(rd.operators[kk])
                                                  for kk in rd.operators.keys() if kk != 'operator'},
                                       functionals={kk: unblock_op(rd.functionals[kk])
                                                    for kk in rd.functionals.keys() if kk != 'rhs'},
                                       vector_operators={kk: unblock_op(rd.vector_operators[kk])
                                                         for kk in rd.vector_operators.keys()},
                                       parameter_space=rd.parameter_space,
                                       estimator=estimator,
                                       cache_region='disk',
                                       name='reduced discretization ({} DoFs)'.format(reduced_op.source.dim)),
            rc,
            reduction_data)
Пример #13
0
def numpy_reduce_stationary_affine_linear(discretization, RB, error_product=None, disable_caching=True):
    '''Reductor for stationary linear problems whose `operator` and `rhs` are affinely decomposed.

    We simply use reduce_generic_rb for the actual RB-projection. The only addition
    is an error estimator. The estimator evaluates the norm of the residual with
    respect to a given inner product. We do not estimate the norm or the coercivity
    constant of the operator, therefore the estimated error can be lower than the
    actual error.

    Parameters
    ----------
    discretization
        The discretization which is to be reduced.
    RB
        The reduced basis (i.e. an array of vectors) on which to project.
    product
        Scalar product corresponding to the norm of the error. Used to calculate
        Riesz respresentatives of the components of the residual. If `None`, the
        standard L2-product is used.
    disable_caching
        If `True`, caching of the solutions of the reduced discretization
        is disabled.

    Returns
    -------
    rd
        The reduced discretization.
    rc
        The reconstructor providing a `reconstruct(U)` method which reconstructs
        high-dimensional solutions from solutions U of the reduced discretization.
    '''

    assert isinstance(discretization, StationaryLinearDiscretization)
    assert isinstance(discretization.operator, LinearAffinelyDecomposedOperator)
    assert all(not op.parametric for op in discretization.operator.operators)
    assert discretization.operator.operator_affine_part is None\
        or not discretization.operator.operator_affine_part.parametric
    if discretization.rhs.parametric:
        assert isinstance(discretization.rhs, LinearAffinelyDecomposedOperator)
        assert all(not op.parametric for op in discretization.rhs.operators)
        assert discretization.rhs.operator_affine_part is None or not discretization.rhs.operator_affine_part.parametric

    d = discretization
    rd, rc = reduce_generic_rb(d, RB, product=None, disable_caching=disable_caching)

    # compute data for estimator
    space_dim = d.operator.dim_source

    # compute the Riesz representative of (U, .)_L2 with respect to error_product
    def riesz_representative(U):
        if error_product is None:
            return U
        return d.solver(error_product.assemble(), NumpyLinearOperator(U)).data.ravel()

    # compute all components of the residual
    ra = 1 if not d.rhs.parametric or d.rhs.operator_affine_part is not None else 0
    rl = 0 if not d.rhs.parametric else len(d.rhs.operators)
    oa = 1 if not d.operator.parametric or d.operator.operator_affine_part is not None else 0
    ol = 0 if not d.operator.parametric else len(d.operator.operators)

    # if RB is None: RB = np.zeros((0, d.operator.dim_source))
    if RB is None:
        RB = NumpyVectorArray(np.zeros((0, next(d.operators.itervalues()).dim_source)))
    R_R = np.empty((ra + rl, space_dim))
    R_O = np.empty(((oa + ol) * len(RB), space_dim))
    RR_R = np.empty((ra + rl, space_dim))
    RR_O = np.empty(((oa + ol) * len(RB), space_dim))

    if not d.rhs.parametric:
        R_R[0] = d.rhs.assemble()._matrix.ravel()
        RR_R[0] = riesz_representative(R_R[0])

    if d.rhs.parametric and d.rhs.operator_affine_part is not None:
        R_R[0] = d.rhs.operator_affine_part.assemble()._matrix.ravel()
        RR_R[0] = riesz_representative(R_R[0])

    if d.rhs.parametric:
        R_R[ra:] = np.array([op.assemble()._matrix.ravel() for op in d.rhs.operators])
        RR_R[ra:] = np.array(map(riesz_representative, R_R[ra:]))

    if len(RB) > 0 and not d.operator.parametric:
        R_O[0:len(RB)] = d.operator.apply(RB).data
        RR_O[0:len(RB)] = np.array(map(riesz_representative, R_O[0:len(RB)]))

    if len(RB) > 0 and d.operator.parametric and d.operator.operator_affine_part is not None:
        R_O[0:len(RB)] = d.operator.operator_affine_part.apply(RB).data
        RR_O[0:len(RB)] = np.array(map(riesz_representative, R_O[0:len(RB)]))

    if len(RB) > 0 and d.operator.parametric:
        for i, op in enumerate(d.operator.operators):
            A = R_O[(oa + i) * len(RB): (oa + i + 1) * len(RB)]
            A[:] = -op.apply(RB).data
            RR_O[(oa + i) * len(RB): (oa + i + 1) * len(RB)] = np.array(map(riesz_representative, A))

    # compute Gram matrix of the residuals
    R_RR = np.dot(RR_R, R_R.T)
    R_RO = np.dot(RR_R, R_O.T)
    R_OO = np.dot(RR_O, R_O.T)

    estimator_matrix = np.empty((len(R_RR) + len(R_OO),) * 2)
    estimator_matrix[:len(R_RR), :len(R_RR)] = R_RR
    estimator_matrix[len(R_RR):, len(R_RR):] = R_OO
    estimator_matrix[:len(R_RR), len(R_RR):] = R_RO
    estimator_matrix[len(R_RR):, :len(R_RR)] = R_RO.T

    rd.estimator_matrix = NumpyLinearOperator(estimator_matrix)

    # this is our estimator
    def estimate(self, U, mu=None):
        assert len(U) == 1, 'Can estimate only one solution vector'
        if not self.rhs.parametric or self.rhs.operator_affine_part is not None:
            CRA = np.ones(1)
        else:
            CRA = np.ones(0)

        if self.rhs.parametric:
            CRL = self.rhs.evaluate_coefficients(self.map_parameter(mu, 'rhs'))
        else:
            CRL = np.ones(0)

        CR = np.hstack((CRA, CRL))

        if not self.operator.parametric or self.operator.operator_affine_part is not None:
            COA = np.ones(1)
        else:
            COA = np.ones(0)

        if self.operator.parametric:
            COL = self.operator.evaluate_coefficients(self.map_parameter(mu, 'operator'))
        else:
            COL = np.ones(0)

        C = np.hstack((CR, np.dot(np.hstack((COA, COL))[..., np.newaxis], U.data).ravel()))

        return induced_norm(self.estimator_matrix)(NumpyVectorArray(C))

    rd.estimate = types.MethodType(estimate, rd)

    return rd, rc
Пример #14
0
def reductor(discretization,
             RB,
             vector_product=None,
             disable_caching=True,
             extends=None):
    if RB is None:
        RB = [
            stat_blocked_disc.local_operator(ss).source.empty()
            for ss in np.arange(stat_blocked_disc.num_subdomains)
        ]
    rd, rc, reduction_data = reduce_generic_rb(stat_blocked_disc, RB,
                                               vector_product, disable_caching,
                                               extends)

    def unblock_op(op):
        assert op._blocks[0][0] is not None
        if isinstance(op._blocks[0][0], LincombOperator):
            coefficients = op._blocks[0][0].coefficients
            operators = [
                None for kk in np.arange(len(op._blocks[0][0].operators))
            ]
            for kk in np.arange(len(op._blocks[0][0].operators)):
                ops = [[
                    op._blocks[ii][jj].operators[kk]
                    if op._blocks[ii][jj] is not None else None
                    for jj in np.arange(op.num_source_blocks)
                ] for ii in np.arange(op.num_range_blocks)]
                operators[kk] = unblock_op(BlockOperator(ops))
            return LincombOperator(operators=operators,
                                   coefficients=coefficients)
        else:
            assert all(
                all([
                    isinstance(block, NumpyMatrixOperator
                               ) if block is not None else True
                    for block in row
                ]) for row in op._blocks)
            if op.source.dim == 0 and op.range.dim == 0:
                return NumpyMatrixOperator(np.zeros((0, 0)))
            elif op.source.dim == 1:
                mat = np.concatenate([
                    op._blocks[ii][0]._matrix
                    for ii in np.arange(op.num_range_blocks)
                ],
                                     axis=1)
            elif op.range.dim == 1:
                mat = np.concatenate([
                    op._blocks[0][jj]._matrix
                    for jj in np.arange(op.num_source_blocks)
                ],
                                     axis=1)
            else:
                mat = bmat([[
                    coo_matrix(op._blocks[ii][jj]._matrix)
                    if op._blocks[ii][jj] is not None else coo_matrix(
                        (op._range_dims[ii], op._source_dims[jj]))
                    for jj in np.arange(op.num_source_blocks)
                ] for ii in np.arange(op.num_range_blocks)]).toarray()
            return NumpyMatrixOperator(mat)

    reduced_op = unblock_op(rd.operator)
    reduced_rhs = unblock_op(rd.rhs)
    return (InstationaryDiscretization(
        T=config['end_time'],
        initial_data=reduced_op.source.zeros(1),
        operator=reduced_op,
        rhs=unblock_op(rd.rhs),
        mass=unblock_op(rd.products['l2']),
        time_stepper=ImplicitEulerTimeStepper(config['nt']),
        products={
            kk: unblock_op(rd.products[kk])
            for kk in rd.products.keys()
        },
        operators={
            kk: unblock_op(rd.operators[kk])
            for kk in rd.operators.keys() if kk != 'operator'
        },
        functionals={
            kk: unblock_op(rd.functionals[kk])
            for kk in rd.functionals.keys() if kk != 'rhs'
        },
        vector_operators={
            kk: unblock_op(rd.vector_operators[kk])
            for kk in rd.vector_operators.keys()
        },
        parameter_space=rd.parameter_space,
        cache_region='disk',
        name='reduced non-blocked discretization'),
            Reconstructor(stat_blocked_disc, RB), reduction_data)
Пример #15
0
 def reductor(discretization, rb, extends=None):
     return reduce_generic_rb(ei_discretization, rb, extends=extends)
Пример #16
0
def online_phase(cfg, detailed_data, offline_data):
    logger = getLogger('.OS2015_SISC__6_2.online_phase')
    logger.setLevel('INFO')

    def doerfler_marking(indicators, theta):
        assert 0.0 < theta <= 1.0
        indices = list(range(len(indicators)))
        indicators = [ii**2 for ii in indicators]
        indicators, indices = [list(x) for x in zip(*sorted(zip(indicators, indices),
                                                            key=lambda pair: pair[0],
                                                            reverse=True))]
        total = np.sum(indicators)
        sums = np.array([np.sum(indicators[:ii+1]) for ii in np.arange(len(indicators))])
        where = sums > theta*total
        if np.any(where):
            return indices[:np.argmax(where)+1]
        else:
            return indices

    discretization = detailed_data['discretization']
    example        = detailed_data['example']
    local_products = detailed_data['local_products']
    mu_bar_dune    = detailed_data['mu_bar_dune']
    mu_hat_dune    = detailed_data['mu_hat_dune']
    norm           = detailed_data['norm']
    wrapper        = detailed_data['wrapper']

    basis     = offline_data['basis']
    basis_mus = offline_data['basis_mus']
    rd        = offline_data['rd']
    rc        = offline_data['rc']

    reduced_estimator = ReducedEstimator(discretization, example, wrapper, mu_hat_dune, mu_bar_dune,
                                         norm, cfg['estimator_compute'], cfg['estimator_return'])
    reduced_estimator.extension_step += 1
    reduced_estimator.rc = rc
    num_test_samples = cfg['num_test_samples']
    target_error = cfg['online_target_error']

    logger.info('Started online phase for {} samples'.format(num_test_samples))
    test_samples = list(discretization.parameter_space.sample_randomly(num_test_samples))

    if cfg['estimate_some_errors'] and len(test_samples) > 0:
        logger.info('Estimating discretization errors:')
        detailed_estimator = DetailedEstimator(example, wrapper, mu_hat_dune, mu_bar_dune)
        estimates = [detailed_estimator.estimate(
                         discretization.globalize_vectors(discretization.solve(mu))._list[0]._impl,
                         wrapper.dune_parameter(mu)) for mu in test_samples]
        max_error = np.amax(estimates)
        logger.info('  range: [{}, {}]'.format(np.amin(estimates), max_error))
        logger.info('  mean:   {}'.format(np.mean(estimates)))
        add_values(estimates=estimates)
        if max_error > cfg['online_target_error']:
            logger.warn('Given target error of {} is below the worst discretization error {}!'.format(
                cfg['online_target_error'], max_error))
        print('')

    failures = 0
    successes = 0
    for mu in test_samples:
        mu_dune = wrapper.dune_parameter(mu)
        mu_in_basis = mu in basis_mus
        age = np.ones(discretization.num_subdomains)
        logger.info('Solving for {} ...'.format(mu))
        U_red = rd.solve(mu)
        logger.info('Estimating (mu is {}in the basis) ...'.format('already ' if mu_in_basis else 'not '))
        error = reduced_estimator.estimate(U_red, mu, discretization)
        if error > target_error:
            if mu_in_basis:
                logger.error(('The error ({}) is larger than the target_error ({}), '
                             + 'but {} is already in the basis: aborting!').format(
                                 error, target_error, mu))
                logger.error('This usually means that the tolerances are poorly chosen!')
                failures += 1
                print('')
            else:
                try:
                    logger.info('The error ({}) is too large, starting local enrichment phase:'.format(error))
                    num_extensions = 0

                    intermediate_basis = [bb.copy() for bb in basis]
                    if cfg['local_indicators'] == 'model_reduction_error':
                        U_h = discretization.solve(mu)
                        assert len(U_h) == 1
                    while error > target_error and num_extensions < cfg['online_max_extensions']:
                        U_red_h = rc.reconstruct(U_red)
                        assert len(U_red_h) == 1
                        U_red_global = discretization.globalize_vectors(U_red_h)
                        U_red_dune = U_red_global._list[0]._impl
                        if (cfg['uniform_enrichment_factor'] > 0
                            and error/target_error > cfg['uniform_enrichment_factor']):
                            logger.info('- Enriching on all subdomains, since error/target_error = {}'.format(
                                error/target_error))
                            marked_subdomains = range(discretization.num_subdomains)
                            if 'age' in cfg['marking_strategy']:
                                age = np.ones(discretization.num_subdomains)
                        else:
                            logger.info('- Estimating local error contributions ...')
                            # compute local error indicators
                            if cfg['local_indicators'] == 'model_reduction_error':
                                difference = U_h - U_red_h
                                local_indicators = [induced_norm(local_products[ss])(difference._blocks[ss])
                                                    for ss in np.arange(discretization.num_subdomains)]
                            elif cfg['local_indicators'] == 'eta_red':
                                local_indicators = list(example.estimate_local(U_red_dune,
                                                                               'eta_OS2014_*',
                                                                               mu_hat_dune,
                                                                               mu_bar_dune,
                                                                               mu_dune))
                            else:
                                raise ConfigurationError('Unknown local_indicators given: {}'.format(
                                    cfg['local_indicators']))
                            # mark subdomains
                            if 'doerfler' in cfg['marking_strategy']:
                                marked_subdomains = set(doerfler_marking(local_indicators,
                                                                         cfg['doerfler_marking_theta']))
                            else:
                                raise ConfigurationError('Unknown marking_strategy given: {}'.format(
                                    cfg['local_indicators']))
                            if 'neighbours' in cfg['marking_strategy']:
                                for ss in list(marked_subdomains):
                                    neighbours = (list(discretization._impl.neighbouring_subdomains(ss)))
                                    for nn in neighbours:
                                        marked_subdomains.append(nn)
                                marked_subdomains = set(marked_subdomains)
                            if 'age' in cfg['marking_strategy']:
                                only_marked = len(marked_subdomains)
                                too_old = np.where(age > cfg['marking_max_age'])[0]
                                for ss in too_old:
                                    marked_subdomains.add(ss)
                                logger.info(('  {} subdomains marked ({} bc. of age), '
                                             + 'computing local solutions ...').format(
                                                 len(marked_subdomains), len(marked_subdomains) - only_marked))
                            else:
                                logger.info('  {} subdomains marked, computing local solutions ...'.format(
                                    len(marked_subdomains)))
                            for ss in np.arange(discretization.num_subdomains):
                                if ss in marked_subdomains:
                                    age[ss] = 1
                                else:
                                    age[ss] += 1
                        # compute updated local solution
                        local_solutions = [None for ss in np.arange(discretization.num_subdomains)]
                        for subdomain in marked_subdomains:
                            local_boundary_values = cfg['local_boundary_values']
                            if not (local_boundary_values == 'dirichlet' or local_boundary_values == 'neumann'):
                                raise ConfigurationError('Unknown local_boundary_values given: {}'.format(
                                    local_boundary_values))
                            oversampled_discretization = discretization.get_oversampled_discretization(
                                    subdomain, local_boundary_values)
                            local_discretization = discretization.get_local_discretization(subdomain)
                            U_red_oversampled_dune = example.project_global_to_oversampled(U_red_dune, subdomain)
                            U_h_improved_oversampled_dune = example.solve_oversampled(
                                    subdomain, local_boundary_values, U_red_oversampled_dune, mu_dune)
                            U_h_improved_local_dune = example.project_oversampled_to_local(
                                    U_h_improved_oversampled_dune, subdomain)
                            U_h_improved_local = make_listvectorarray(wrapper[U_h_improved_local_dune])
                            local_solutions[subdomain] = U_h_improved_local
                        # extend local bases
                        logger.info('  Extending bases on {} subdomain{}...'.format(
                            len(marked_subdomains), '' if len(marked_subdomains) == 1 else 's'))
                        old_basis_size = sum([len(bb) for bb in intermediate_basis])
                        extended_bases, _ = gram_schmidt_block_basis_extension(
                                [intermediate_basis[ss] for ss in marked_subdomains],
                                [local_solutions[ss] for ss in marked_subdomains],
                                product=[local_products[ss] for ss in marked_subdomains])
                        assert len(extended_bases) == len(marked_subdomains)
                        for ii, subdomain in enumerate(marked_subdomains):
                            intermediate_basis[subdomain] = extended_bases[ii]
                        new_basis_size = sum([len(bb) for bb in intermediate_basis])
                        num_extensions += 1
                        logger.info('  Reducing ...')
                        rd, _, _ = reduce_generic_rb(discretization, intermediate_basis)
                        rc = GenericBlockRBReconstructor(intermediate_basis)
                        reduced_estimator.rc = rc
                        reduced_estimator.extension_step += 1
                        U_red = rd.solve(mu)
                        logger.info('  Estimating (total basis size: {})'.format(
                            sum(len(bb) for bb in intermediate_basis)))
                        new_error = reduced_estimator.estimate(U_red, mu, discretization)
                        order = np.log(new_error/error)/np.log(old_basis_size/new_basis_size)
                        logger.info('              {} (relative improvement: {})'.format(new_error, order))
                        if new_error > error:
                            logger.warn('The error has increased (from {} to {}) after enrichment!'.format(error,
                                                                                                           new_error))
                        elif order < 1:
                            logger.warn(('The error has decreased only slightly '
                                         + '(from {} to {}) after enrichment!').format(error, new_error))
                        if num_extensions >= cfg['online_max_extensions'] and new_error > cfg['online_target_error']:
                            basis = intermediate_basis
                            raise EnrichmentError('Reached maximum number of {} extensions!'.format(
                                cfg['online_max_extensions']))
                        error = new_error
                    logger.info('  The error ({}) is below the target error, continuing ...'.format(error))
                    successes += 1
                    basis = intermediate_basis
                    logger.info('Basis sizes range from {} to {}.'.format(np.min([len(bb) for bb in basis]),
                                                                          np.max([len(bb) for bb in basis])))
                except EnrichmentError, ee:
                    logger.critical('Enrichment stopped because: {}'.format(ee))
                    logger.info('Basis sizes range from {} to {}.'.format(np.min([len(bb) for bb in basis]),
                                                                          np.max([len(bb) for bb in basis])))
                    logger.info('Continuing with the next parameter ...')
                    failures += 1
                print('')
        else:
            logger.info('The error ({}) is below the target error, continuing ...'.format(error))
            successes += 1
            print('')
Пример #17
0
def reduce_stationary_affine_linear(discretization, RB, error_product=None, coercivity_estimator=None,
                                    disable_caching=True, extends=None):
    """Reductor for linear |StationaryDiscretizations| whose with affinely decomposed operator and rhs.

    This reductor uses :meth:`~pymor.reductors.basic.reduce_generic_rb` for the actual
    RB-projection. The only addition is an error estimator. The estimator evaluates the
    norm of the residual with respect to a given inner product.

    Parameters
    ----------
    discretization
        The |Discretization| which is to be reduced.
    RB
        |VectorArray| containing the reduced basis on which to project.
    error_product
        Scalar product given as an |Operator| used to calculate Riesz
        representative of the residual. If `None`, the Euclidean product is used.
    coercivity_estimator
        `None` or a |Parameterfunctional| returning a lower bound for the coercivity
        constant of the given problem.
    disable_caching
        If `True`, caching of solutions is disabled for the reduced |Discretization|.
    extends
        Set by :meth:`~pymor.algorithms.greedy.greedy` to the result of the
        last reduction in case the basis extension was `hierarchic`. Used to prevent
        re-computation of Riesz representatives already obtained from previous
        reductions.

    Returns
    -------
    rd
        The reduced |Discretization|.
    rc
        The reconstructor providing a `reconstruct(U)` method which reconstructs
        high-dimensional solutions from solutions `U` of the reduced |Discretization|.
    reduction_data
        Additional data produced by the reduction process. In this case the computed
        Riesz representatives. (Compare the `extends` parameter.)
    """

    #assert isinstance(discretization, StationaryDiscretization)
    assert discretization.linear
    assert isinstance(discretization.operator, LincombOperator)
    assert all(not op.parametric for op in discretization.operator.operators)
    if discretization.rhs.parametric:
        assert isinstance(discretization.rhs, LincombOperator)
        assert all(not op.parametric for op in discretization.rhs.operators)
    assert extends is None or len(extends) == 3

    d = discretization
    rd, rc, data = reduce_generic_rb(d, RB, disable_caching=disable_caching, extends=extends)
    if extends:
        old_data = extends[2]
        old_RB_size = len(extends[1].RB)
    else:
        old_RB_size = 0

    # compute data for estimator
    space = d.operator.source

    # compute the Riesz representative of (U, .)_L2 with respect to error_product
    def riesz_representative(U):
        if error_product is None:
            return U.copy()
        else:
            return error_product.apply_inverse(U)

    def append_vector(U, R, RR):
        RR.append(riesz_representative(U), remove_from_other=True)
        R.append(U, remove_from_other=True)

    # compute all components of the residual
    if RB is None:
        RB = discretization.solution_space.empty()

    if extends:
        R_R, RR_R = old_data['R_R'], old_data['RR_R']
    elif not d.rhs.parametric:
        R_R = space.empty(reserve=1)
        RR_R = space.empty(reserve=1)
        append_vector(d.rhs.as_vector(), R_R, RR_R)
    else:
        R_R = space.empty(reserve=len(d.rhs.operators))
        RR_R = space.empty(reserve=len(d.rhs.operators))
        for op in d.rhs.operators:
            append_vector(op.as_vector(), R_R, RR_R)

    if len(RB) == 0:
        R_Os = [space.empty()]
        RR_Os = [space.empty()]
    elif not d.operator.parametric:
        R_Os = [space.empty(reserve=len(RB))]
        RR_Os = [space.empty(reserve=len(RB))]
        for i in xrange(len(RB)):
            append_vector(-d.operator.apply(RB, ind=i), R_Os[0], RR_Os[0])
    else:
        R_Os = [space.empty(reserve=len(RB)) for _ in xrange(len(d.operator.operators))]
        RR_Os = [space.empty(reserve=len(RB)) for _ in xrange(len(d.operator.operators))]
        if old_RB_size > 0:
            for op, R_O, RR_O, old_R_O, old_RR_O in izip(d.operator.operators, R_Os, RR_Os,
                                                         old_data['R_Os'], old_data['RR_Os']):
                R_O.append(old_R_O)
                RR_O.append(old_RR_O)
        for op, R_O, RR_O in izip(d.operator.operators, R_Os, RR_Os):
            for i in xrange(old_RB_size, len(RB)):
                append_vector(-op.apply(RB, [i]), R_O, RR_O)

    # compute Gram matrix of the residuals
    R_RR = RR_R.dot(R_R, pairwise=False)
    R_RO = np.hstack([RR_R.dot(R_O, pairwise=False) for R_O in R_Os])
    R_OO = np.vstack([np.hstack([RR_O.dot(R_O, pairwise=False) for R_O in R_Os]) for RR_O in RR_Os])

    estimator_matrix = np.empty((len(R_RR) + len(R_OO),) * 2)
    estimator_matrix[:len(R_RR), :len(R_RR)] = R_RR
    estimator_matrix[len(R_RR):, len(R_RR):] = R_OO
    estimator_matrix[:len(R_RR), len(R_RR):] = R_RO
    estimator_matrix[len(R_RR):, :len(R_RR)] = R_RO.T

    estimator_matrix = NumpyMatrixOperator(estimator_matrix)

    estimator = StationaryAffineLinearReducedEstimator(estimator_matrix, coercivity_estimator)
    rd = rd.with_(estimator=estimator)
    data.update(R_R=R_R, RR_R=RR_R, R_Os=R_Os, RR_Os=RR_Os)

    return rd, rc, data
Пример #18
0
def reduce_parabolic(discretization,
                     RB,
                     product=None,
                     coercivity_estimator=None,
                     disable_caching=True,
                     extends=None):
    r"""Reductor for parabolic equations.

    This reductor uses :meth:`~pymor.reductors.basic.reduce_generic_rb` for the actual
    RB-projection. The only addition is the assembly of an error estimator which
    bounds the discrete l2-in time / energy-in space error similar to [GP05]_, [HO08]_
    as follows:

    .. math::
        \left[ C_a^{-1}(\mu)\|e_N(\mu)\|^2 + \sum_{n=1}^{N} dt\|e_n(\mu)\|^2_e \right]^{1/2}
            \leq \left[ C_a^{-1}(\mu)dt \sum_{n=1}^{N}\|\mathcal{R}^n(u_n(\mu), \mu)\|^2_{e,-1}
                        + C_a^{-1}(\mu)\|e_0\|^2 \right]^{1/2}

    Here, :math:`\|\cdot\|` denotes the norm induced by the problem's mass matrix
    (e.g. the L^2-norm) and :math:`\|\cdot\|_e` is an arbitrary energy norm w.r.t.
    which the space operator :math:`A(\mu)` is coercive, and :math:`C_a(\mu)` is a
    lower bound for its coercivity constant. Finally, :math:`\mathcal{R}^n` denotes
    the implicit Euler timestepping residual for the (fixed) time step size :math:`dt`,

    .. math::
        \mathcal{R}^n(u_n(\mu), \mu) :=
            f - M \frac{u_{n}(\mu) - u_{n-1}(\mu)}{dt} - A(u_n(\mu), \mu),

    where :math:`M` denotes the mass operator and :math:`f` the source term.
    The dual residual norm is evaluated using the numerically stable projection
    from [BEOR14]_.

    .. warning::
        The reduced basis `RB` is required to be orthonormal w.r.t. the given
        energy product. If not, the projection of the initial values will be
        computed incorrectly.

    .. [GP05]   M. A. Grepl, A. T. Patera, A Posteriori Error Bounds For Reduced-Basis
                Approximations Of Parametrized Parabolic Partial Differential Equations,
                M2AN 39(1), 157-181, 2005.
    .. [HO08]   B. Haasdonk, M. Ohlberger, Reduced basis method for finite volume
                approximations of parametrized evolution equations,
                M2AN 42(2), 277-302, 2008.
    .. [BEOR14] A. Buhr, C. Engwer, M. Ohlberger, S. Rave, A Numerically Stable A
                Posteriori Error Estimator for Reduced Basis Approximations of Elliptic
                Equations, Proceedings of the 11th World Congress on Computational
                Mechanics, 2014.

    Parameters
    ----------
    discretization
        The |InstationaryDiscretization| which is to be reduced.
    RB
        |VectorArray| containing the reduced basis on which to project.
    product
        The energy inner product |Operator| w.r.t. the reduction error is estimated.
        RB must be to be orthonomrmal w.r.t. this product!
    coercivity_estimator
        `None` or a |Parameterfunctional| returning a lower bound for the coercivity
        constant of `discretization.operator` w.r.t. `product`.
    disable_caching
        If `True`, caching of solutions is disabled for the reduced |Discretization|.
    extends
        See :meth:`~pymor.algorithms.greedy.greedy`.

    Returns
    -------
    rd
        The reduced |Discretization|.
    rc
        The reconstructor providing a `reconstruct(U)` method which reconstructs
        high-dimensional solutions from solutions `U` of the reduced |Discretization|.
    reduction_data
        Additional data produced by the reduction process. (See
        :meth:`~pymor.algorithms.greedy.greedy`.)
    """

    assert extends is None or len(extends) == 3
    assert isinstance(discretization.time_stepper, ImplicitEulerTimeStepper)

    logger = getLogger('pymor.reductors.parabolic.reduce_parabolic')

    old_residual_data = extends[2].pop('residual') if extends else None
    old_initial_resdidual_data = extends[2].pop(
        'initial_residual') if extends else None

    with logger.block('RB projection ...'):
        rd, rc, data = reduce_generic_rb(discretization,
                                         RB,
                                         vector_product=product,
                                         disable_caching=disable_caching,
                                         extends=extends)

    dt = discretization.T / discretization.time_stepper.nt

    with logger.block('Assembling error estimator ...'):
        residual, residual_reconstructor, residual_data = reduce_implicit_euler_residual(
            discretization.operator,
            discretization.mass,
            dt,
            discretization.rhs,
            RB,
            product=product,
            extends=old_residual_data)

        initial_residual, initial_residual_reconstructor, initial_residual_data = reduce_residual(
            IdentityOperator(discretization.solution_space),
            discretization.initial_data,
            RB,
            False,
            product=discretization.l2_product,
            extends=old_initial_resdidual_data)

    estimator = ReduceParabolicEstimator(
        residual, residual_data.get('residual_range_dims',
                                    None), initial_residual,
        initial_residual_data.get('residual_range_dims', None),
        coercivity_estimator)

    rd = rd.with_(estimator=estimator)

    data.update(residual=(residual, residual_reconstructor, residual_data),
                initial_residual=(initial_residual,
                                  initial_residual_reconstructor,
                                  initial_residual_data))

    return rd, rc, data