コード例 #1
0
def ei_greedy(U,
              error_norm=None,
              atol=None,
              rtol=None,
              max_interpolation_dofs=None,
              copy=True,
              pool=dummy_pool):
    """Generate data for empirical interpolation using EI-Greedy algorithm.

    Given a |VectorArray| `U`, this method generates a collateral basis and
    interpolation DOFs for empirical interpolation of the vectors contained in `U`.
    The returned objects can be used to instantiate an |EmpiricalInterpolatedOperator|
    (with `triangular=True`).

    The interpolation data is generated by a greedy search algorithm, where in each
    loop iteration the worst approximated vector in `U` is added to the collateral basis.

    Parameters
    ----------
    U
        A |VectorArray| of vectors to interpolate.
    error_norm
        Norm w.r.t. which to calculate the interpolation error. If `None`, the Euclidean norm
        is used.
    atol
        Stop the greedy search if the largest approximation error is below this threshold.
    rtol
        Stop the greedy search if the largest relative approximation error is below this threshold.
    max_interpolation_dofs
        Stop the greedy search if the number of interpolation DOF (= dimension of the collateral
        basis) reaches this value.
    copy
        If `False`, `U` will be modified during executing of the algorithm.
    pool
        If not `None`, the |WorkerPool| to use for parallelization.

    Returns
    -------
    interpolation_dofs
        |NumPy array| of the DOFs at which the vectors are evaluated.
    collateral_basis
        |VectorArray| containing the generated collateral basis.
    data
        Dict containing the following fields:

            :errors:                Sequence of maximum approximation errors during
                                    greedy search.
            :triangularity_errors:  Sequence of maximum absolute values of interoplation
                                    matrix coefficients in the upper triangle (should
                                    be near zero).
    """

    if pool:  # dispatch to parallel implemenation
        assert isinstance(U, (VectorArrayInterface, RemoteObjectInterface))
        with RemoteObjectManager() as rom:
            if isinstance(U, VectorArrayInterface):
                U = rom.manage(pool.scatter_array(U))
            return _parallel_ei_greedy(
                U,
                error_norm=error_norm,
                atol=atol,
                rtol=rtol,
                max_interpolation_dofs=max_interpolation_dofs,
                copy=copy,
                pool=pool)

    assert isinstance(U, VectorArrayInterface)

    logger = getLogger('pymor.algorithms.ei.ei_greedy')
    logger.info('Generating Interpolation Data ...')

    interpolation_dofs = np.zeros((0, ), dtype=np.int32)
    collateral_basis = U.empty()
    max_errs = []
    triangularity_errs = []

    if copy:
        U = U.copy()

    ERR = U

    errs = ERR.l2_norm() if error_norm is None else error_norm(ERR)
    max_err_ind = np.argmax(errs)
    initial_max_err = max_err = errs[max_err_ind]

    # main loop
    while True:
        if max_interpolation_dofs is not None and len(
                interpolation_dofs) >= max_interpolation_dofs:
            logger.info(
                'Maximum number of interpolation DOFs reached. Stopping extension loop.'
            )
            logger.info(
                f'Final maximum interpolation error with'
                f'{len(interpolation_dofs)} interpolation DOFs: {max_err}')
            break

        logger.info(f'Maximum interpolation error with '
                    f'{len(interpolation_dofs)} interpolation DOFs: {max_err}')

        if atol is not None and max_err <= atol:
            logger.info(
                'Absolute error tolerance reached! Stopping extension loop.')
            break

        if rtol is not None and max_err / initial_max_err <= rtol:
            logger.info(
                'Relative error tolerance reached! Stopping extension loop.')
            break

        # compute new interpolation dof and collateral basis vector
        new_vec = U[max_err_ind].copy()
        new_dof = new_vec.amax()[0][0]
        if new_dof in interpolation_dofs:
            logger.info(
                f'DOF {new_dof} selected twice for interplation! Stopping extension loop.'
            )
            break
        new_dof_value = new_vec.dofs([new_dof])[0, 0]
        if new_dof_value == 0.:
            logger.info(
                f'DOF {new_dof} selected for interpolation has zero maximum error! Stopping extension loop.'
            )
            break
        new_vec *= 1 / new_dof_value
        interpolation_dofs = np.hstack((interpolation_dofs, new_dof))
        collateral_basis.append(new_vec)
        max_errs.append(max_err)

        # update U and ERR
        new_dof_values = U.dofs([new_dof])
        U.axpy(-new_dof_values[:, 0], new_vec)
        errs = ERR.l2_norm() if error_norm is None else error_norm(ERR)
        max_err_ind = np.argmax(errs)
        max_err = errs[max_err_ind]

    interpolation_matrix = collateral_basis.dofs(interpolation_dofs).T
    triangularity_errors = np.abs(interpolation_matrix -
                                  np.tril(interpolation_matrix))
    for d in range(1, len(interpolation_matrix) + 1):
        triangularity_errs.append(np.max(triangularity_errors[:d, :d]))

    if len(triangularity_errs) > 0:
        logger.info(
            f'Interpolation matrix is not lower triangular with maximum error of {triangularity_errs[-1]}'
        )

    data = {'errors': max_errs, 'triangularity_errors': triangularity_errs}

    return interpolation_dofs, collateral_basis, data
コード例 #2
0
def interpolate_operators(fom,
                          operator_names,
                          parameter_sample,
                          error_norm=None,
                          product=None,
                          atol=None,
                          rtol=None,
                          max_interpolation_dofs=None,
                          pod_options={},
                          alg='ei_greedy',
                          pool=dummy_pool):
    """Empirical operator interpolation using the EI-Greedy/DEIM algorithm.

    This is a convenience method to facilitate the use of :func:`ei_greedy` or :func:`deim`.
    Given a |Model|, names of |Operators|, and a sample of |Parameters|, first
    the operators are evaluated on the solution snapshots of the model for the
    provided parameters. These evaluations are then used as input for
    :func:`ei_greedy`/:func:`deim`.  Finally the resulting interpolation data is used to
    create |EmpiricalInterpolatedOperators| and a new model with the interpolated
    operators is returned.

    Note that this implementation creates *one* common collateral basis for all specified
    operators, which might not be what you want.

    Parameters
    ----------
    fom
        The |Model| whose |Operators| will be interpolated.
    operator_names
        List of keys in the `operators` dict of the model. The corresponding
        |Operators| will be interpolated.
    parameter_sample
        A list of |Parameters| for which solution snapshots are calculated.
    error_norm
        See :func:`ei_greedy`.
        Has no effect if `alg == 'deim'`.
    product
        Inner product for POD computation in :func:`deim`.
        Has no effect if `alg == 'ei_greedy'`.
    atol
        See :func:`ei_greedy`.
    rtol
        See :func:`ei_greedy`.
    max_interpolation_dofs
        See :func:`ei_greedy`.
    pod_options
        Further options for :func:`~pymor.algorithms.pod.pod` algorithm.
        Has no effect if `alg == 'ei_greedy'`.
    alg
        Either `ei_greedy` or `deim`.
    pool
        If not `None`, the |WorkerPool| to use for parallelization.

    Returns
    -------
    eim
        |Model| with |Operators| given by `operator_names` replaced by
        |EmpiricalInterpolatedOperators|.
    data
        Dict containing the following fields:

            :dofs:                  |NumPy array| of the DOFs at which the |Operators| have to be evaluated.
            :basis:                 |VectorArray| containing the generated collateral basis.

        In addition, `data` contains the fields of the `data` `dict` returned by
        :func:`ei_greedy`/:func:`deim`.
    """

    assert alg in ('ei_greedy', 'deim')
    logger = getLogger('pymor.algorithms.ei.interpolate_operators')
    with RemoteObjectManager() as rom:
        operators = [
            getattr(fom, operator_name) for operator_name in operator_names
        ]
        with logger.block(
                'Computing operator evaluations on solution snapshots ...'):
            if pool:
                logger.info(
                    f'Using pool of {len(pool)} workers for parallel evaluation'
                )
                evaluations = rom.manage(pool.push(fom.solution_space.empty()))
                pool.map(_interpolate_operators_build_evaluations,
                         parameter_sample,
                         fom=fom,
                         operators=operators,
                         evaluations=evaluations)
            else:
                evaluations = operators[0].range.empty()
                for mu in parameter_sample:
                    U = fom.solve(mu)
                    for op in operators:
                        evaluations.append(op.apply(U, mu=mu))

        if alg == 'ei_greedy':
            with logger.block('Performing EI-Greedy:'):
                dofs, basis, data = ei_greedy(
                    evaluations,
                    error_norm,
                    atol=atol,
                    rtol=rtol,
                    max_interpolation_dofs=max_interpolation_dofs,
                    copy=False,
                    pool=pool)
        elif alg == 'deim':
            if alg == 'deim' and pool is not dummy_pool:
                logger.warn(
                    'DEIM algorithm not parallel. Collecting operator evaluations.'
                )
                evaluations = pool.apply(_identity, x=evaluations)
                evs = evaluations[0]
                for e in evaluations[1:]:
                    evs.append(e, remove_from_other=True)
                evaluations = evs
            with logger.block('Executing DEIM algorithm:'):
                dofs, basis, data = deim(evaluations,
                                         modes=max_interpolation_dofs,
                                         atol=atol,
                                         rtol=rtol,
                                         pod_options=pod_options,
                                         product=product)
        else:
            assert False

    ei_operators = {
        name: EmpiricalInterpolatedOperator(operator,
                                            dofs,
                                            basis,
                                            triangular=(alg == 'ei_greedy'))
        for name, operator in zip(operator_names, operators)
    }
    eim = fom.with_(name=f'{fom.name}_ei', **ei_operators)

    data.update({'dofs': dofs, 'basis': basis})
    return eim, data
コード例 #3
0
def greedy(d, reductor, samples, use_estimator=True, error_norm=None,
           atol=None, rtol=None, max_extensions=None, extension_params=None, pool=None):
    """Greedy basis generation algorithm.

    This algorithm generates a reduced basis by iteratively adding the
    worst approximated solution snapshot for a given training set to the
    reduced basis. The approximation error is computed either by directly
    comparing the reduced solution to the detailed solution or by using
    an error estimator (`use_estimator == True`). The reduction and basis
    extension steps are performed by calling the methods provided by the
    `reductor` and `extension_algorithm` arguments.

    Parameters
    ----------
    d
        The |Discretization| to reduce.
    reductor
        Reductor for reducing the given |Discretization|. This has to be a
        an object with a `reduce` method, such that `reductor.reduce()`
        yields the reduced discretization, and an `exted_basis` method,
        such that `reductor.extend_basis(U, copy_U=False, **extension_params)`
        extends the current reduced basis by the vectors contained in `U`.
        For an example see :class:`~pymor.reductors.coercive.CoerciveRBReductor`.
    samples
        The set of |Parameter| samples on which to perform the greedy search.
    use_estimator
        If `True`, use `rd.estimate()` to estimate the errors on the
        sample set. Otherwise `d.solve()` is called to compute the exact
        model reduction error.
    error_norm
        If `use_estimator == False`, use this function to calculate the
        norm of the error. If `None`, the Euclidean norm is used.
    atol
        If not `None`, stop the algorithm if the maximum (estimated) error
        on the sample set drops below this value.
    rtol
        If not `None`, stop the algorithm if the maximum (estimated)
        relative error on the sample set drops below this value.
    max_extensions
        If not `None`, stop the algorithm after `max_extensions` extension
        steps.
    extension_params
        `dict` of parameters passed to the `reductor.extend_basis` method.
    pool
        If not `None`, the |WorkerPool| to use for parallelization.

    Returns
    -------
    Dict with the following fields:

        :rd:                     The reduced |Discretization| obtained for the
                                 computed basis.
        :max_errs:               Sequence of maximum errors during the greedy run.
        :max_err_mus:            The parameters corresponding to `max_errs`.
        :extensions:             Number of performed basis extensions.
        :time:                   Total runtime of the algorithm.
        :reduction_data:         The `reduction_data` returned by the last
                                 `reductor` call.
    """

    logger = getLogger('pymor.algorithms.greedy.greedy')
    samples = list(samples)
    sample_count = len(samples)
    extension_params = extension_params or {}
    logger.info('Started greedy search on {} samples'.format(sample_count))
    if pool is None or pool is dummy_pool:
        pool = dummy_pool
    else:
        logger.info('Using pool of {} workers for parallel greedy search'.format(len(pool)))

    with RemoteObjectManager() as rom:
        # Push everything we need during the greedy search to the workers.
        # Distribute the training set evenly among the workes.
        if not use_estimator:
            rom.manage(pool.push(d))
            if error_norm:
                rom.manage(pool.push(error_norm))
        samples = rom.manage(pool.scatter_list(samples))

        tic = time.time()
        extensions = 0
        max_errs = []
        max_err_mus = []

        while True:
            with logger.block('Reducing ...'):
                rd = reductor.reduce()

            if sample_count == 0:
                logger.info('There is nothing else to do for empty samples.')
                return {'rd': rd,
                        'max_errs': [], 'max_err_mus': [], 'extensions': 0,
                        'time': time.time() - tic}

            with logger.block('Estimating errors ...'):
                if use_estimator:
                    errors, mus = list(zip(*pool.apply(_estimate, rd=rd, d=None, reductor=None,
                                                       samples=samples, error_norm=None)))
                else:
                    errors, mus = list(zip(*pool.apply(_estimate, rd=rd, d=d, reductor=reductor,
                                                       samples=samples, error_norm=error_norm)))
            max_err_ind = np.argmax(errors)
            max_err, max_err_mu = errors[max_err_ind], mus[max_err_ind]

            max_errs.append(max_err)
            max_err_mus.append(max_err_mu)
            logger.info('Maximum error after {} extensions: {} (mu = {})'.format(extensions, max_err, max_err_mu))

            if atol is not None and max_err <= atol:
                logger.info('Absolute error tolerance ({}) reached! Stoping extension loop.'.format(atol))
                break

            if rtol is not None and max_err / max_errs[0] <= rtol:
                logger.info('Relative error tolerance ({}) reached! Stoping extension loop.'.format(rtol))
                break

            with logger.block('Computing solution snapshot for mu = {} ...'.format(max_err_mu)):
                U = d.solve(max_err_mu)
            with logger.block('Extending basis with solution snapshot ...'):
                try:
                    reductor.extend_basis(U, copy_U=False, **extension_params)
                except ExtensionError:
                    logger.info('Extension failed. Stopping now.')
                    break
            extensions += 1

            logger.info('')

            if max_extensions is not None and extensions >= max_extensions:
                logger.info('Maximum number of {} extensions reached.'.format(max_extensions))
                with logger.block('Reducing once more ...'):
                    rd = reductor.reduce()
                break

        tictoc = time.time() - tic
        logger.info('Greedy search took {} seconds'.format(tictoc))
        return {'rd': rd,
                'max_errs': max_errs, 'max_err_mus': max_err_mus, 'extensions': extensions,
                'time': tictoc}
コード例 #4
0
ファイル: ei.py プロジェクト: mahgadalla/pymor
def interpolate_operators(d, operator_names, parameter_sample, error_norm=None,
                          atol=None, rtol=None, max_interpolation_dofs=None, pool=dummy_pool):
    """Empirical operator interpolation using the EI-Greedy algorithm.

    This is a convenience method to facilitate the use of :func:`ei_greedy`. Given
    a |Discretization|, names of |Operators|, and a sample of |Parameters|, first the operators
    are evaluated on the solution snapshots of the discretization for the provided parameters.
    These evaluations are then used as input for :func:`ei_greedy`. Finally the resulting
    interpolation data is used to create |EmpiricalInterpolatedOperators| and a new
    discretization with the interpolated operators is returned.

    Note that this implementation creates *one* common collateral basis for all specified
    operators, which might not be what you want.

    Parameters
    ----------
    d
        The |Discretization| whose |Operators| will be interpolated.
    operator_names
        List of keys in the `operators` dict of the discretization. The corresponding
        |Operators| will be interpolated.
    parameter_sample
        A list of |Parameters| for which solution snapshots are calculated.
    error_norm
        See :func:`ei_greedy`.
    atol
        See :func:`ei_greedy`.
    rtol
        See :func:`ei_greedy`.
    max_interpolation_dofs
        See :func:`ei_greedy`.
    pool
        If not `None`, the |WorkerPool| to use for parallelization.

    Returns
    -------
    ei_d
        |Discretization| with |Operators| given by `operator_names` replaced by
        |EmpiricalInterpolatedOperators|.
    data
        Dict containing the following fields:

            :dofs:                  |NumPy array| of the DOFs at which the |Operators| have to be evaluated.
            :basis:                 |VectorArray| containing the generated collateral basis.
            :errors:                Sequence of maximum approximation errors during greedy search.
            :triangularity_errors:  Sequence of maximum absolute values of interoplation
                                    matrix coefficients in the upper triangle (should
                                    be near zero).
    """

    logger = getLogger('pymor.algorithms.ei.interpolate_operators')
    with RemoteObjectManager() as rom:
        operators = [d.operators[operator_name] for operator_name in operator_names]
        with logger.block('Computing operator evaluations on solution snapshots ...'):
            if pool:
                logger.info('Using pool of {} workers for parallel evaluation'.format(len(pool)))
                evaluations = rom.manage(pool.push(d.solution_space.empty()))
                pool.map(_interpolate_operators_build_evaluations, parameter_sample,
                         d=d, operators=operators, evaluations=evaluations)
            else:
                evaluations = operators[0].range.empty()
                for mu in parameter_sample:
                    U = d.solve(mu)
                    for op in operators:
                        evaluations.append(op.apply(U, mu=mu))

        with logger.block('Performing EI-Greedy:'):
            dofs, basis, data = ei_greedy(evaluations, error_norm, atol=atol, rtol=rtol,
                                          max_interpolation_dofs=max_interpolation_dofs,
                                          copy=False, pool=pool)

    ei_operators = {name: EmpiricalInterpolatedOperator(operator, dofs, basis, triangular=True)
                    for name, operator in zip(operator_names, operators)}
    operators_dict = d.operators.copy()
    operators_dict.update(ei_operators)
    ei_d = d.with_(operators=operators_dict, name='{}_ei'.format(d.name))

    data.update({'dofs': dofs, 'basis': basis})
    return ei_d, data
コード例 #5
0
def adaptive_greedy(d, reductor, parameter_space=None,
                    use_estimator=True, error_norm=None,
                    target_error=None, max_extensions=None,
                    validation_mus=0, rho=1.1, gamma=0.2, theta=0.,
                    extension_params=None, visualize=False, visualize_vertex_size=80,
                    pool=dummy_pool):
    """Greedy basis generation algorithm with adaptively refined training set.

    This method extends pyMOR's default :func:`~pymor.algorithms.greedy.greedy`
    greedy basis generation algorithm by adaptive refinement of the
    parameter training set according to [HDO11]_ to prevent overfitting
    of the reduced basis to the training set. This is achieved by
    estimating the reduction error on an additional validation set of
    parameters. If the ratio between the estimated errors on the validation
    set and the validation set is larger than `rho`, the training set
    is refined using standard grid refinement techniques.

    .. [HDO11] Haasdonk, B.; Dihlmann, M. & Ohlberger, M.,
               A training set and multiple bases generation approach for
               parameterized model reduction based on adaptive grids in
               parameter space,
               Math. Comput. Model. Dyn. Syst., 2011, 17, 423-442

    Parameters
    ----------
    d
        See :func:`~pymor.algorithms.greedy.greedy`.
    reductor
        See :func:`~pymor.algorithms.greedy.greedy`.
    parameter_space
        The |ParameterSpace| for which to compute the reduced model. If `None`,
        the parameter space of `d` is used.
    use_estimator
        See :func:`~pymor.algorithms.greedy.greedy`.
    error_norm
        See :func:`~pymor.algorithms.greedy.greedy`.
    target_error
        See :func:`~pymor.algorithms.greedy.greedy`.
    max_extensions
        See :func:`~pymor.algorithms.greedy.greedy`.
    validation_mus
        One of the following:
          - a list of |Parameters| to use as validation set,
          - a positive number indicating the number of random parameters
            to use as validation set,
          - a non-positive number, indicating the negative number of random
            parameters to use as validation set in addition to the centers
            of the elements of the adaptive training set.
    rho
        Maximum allowed ratio between maximum estimated error on validation
        set vs. maximum estimated error on training set. If the ratio is
        larger, the training set is refined.
    gamma
        Weight of the age penalty term in the training set refinement
        indicators.
    theta
        Ratio of training set elements to select for refinement.
        (One element is always refined.)
    extension_params
        See :func:`~pymor.algorithms.greedy.greedy`.
    visualize
        If `True`, visualize the refinement indicators. (Only available
        for 2 and 3 dimensional parameter spaces.)
    visualize_vertex_size
        Size of the vertices in the visualization.
    pool
        See :func:`~pymor.algorithms.greedy.greedy`.

    Returns
    -------
    Dict with the following fields:

        :rd:                     The reduced |Discretization| obtained for the
                                 computed basis.
        :extensions:             Number of greedy iterations.
        :max_errs:               Sequence of maximum errors during the greedy run.
        :max_err_mus:            The parameters corresponding to `max_errs`.
        :max_val_errs:           Sequence of maximum errors on the validation set.
        :max_val_err_mus:        The parameters corresponding to `max_val_errs`.
        :refinements:            Number of refinements made in each extension step.
        :training_set_sizes:     The final size of the training set in each extension step.
        :time:                   Duration of the algorithm.
        :reduction_data:         Reduction data returned by the last reductor call.
    """

    extension_params = extension_params or {}

    def estimate(mus):
        if use_estimator:
            errors = pool.map(_estimate, mus, rd=rd)
        else:
            errors = pool.map(_estimate, mus, rd=rd, d=d, reductor=reductor, error_norm=error_norm)
        # most error_norms will return an array of length 1 instead of a number, so we extract the numbers
        # if necessary
        return np.array([x[0] if hasattr(x, '__len__') else x for x in errors])

    logger = getLogger('pymor.algorithms.adaptivegreedy.adaptive_greedy')

    if pool is None or pool is dummy_pool:
        pool = dummy_pool
    else:
        logger.info('Using pool of {} workers for parallel greedy search'.format(len(pool)))

    with RemoteObjectManager() as rom:
        # Push everything we need during the greedy search to the workers.
        if not use_estimator:
            rom.manage(pool.push(d))
            if error_norm:
                rom.manage(pool.push(error_norm))

        tic = time.time()

        # setup training and validation sets
        parameter_space = parameter_space or d.parameter_space
        sample_set = AdaptiveSampleSet(parameter_space)
        if validation_mus <= 0:
            validation_set = sample_set.center_mus + parameter_space.sample_randomly(-validation_mus)
        else:
            validation_set = parameter_space.sample_randomly(validation_mus)
        if visualize and sample_set.dim not in (2, 3):
            raise NotImplementedError
        logger.info('Training set size: {}. Validation set size: {}'
                    .format(len(sample_set.vertex_mus), len(validation_set)))

        extensions = 0
        max_errs = []
        max_err_mus = []
        max_val_errs = []
        max_val_err_mus = []
        refinements = []
        training_set_sizes = []

        while True:  # main loop
            with logger.block('Reducing ...'):
                rd = reductor.reduce()

            current_refinements = 0
            while True:  # estimate reduction errors and refine training set until no overfitting is detected

                # estimate on training set
                with logger.block('Estimating errors ...'):
                    errors = estimate(sample_set.vertex_mus)
                max_err_ind = np.argmax(errors)
                max_err, max_err_mu = errors[max_err_ind], sample_set.vertex_mus[max_err_ind]
                logger.info('Maximum error after {} extensions: {} (mu = {})'.format(extensions, max_err, max_err_mu))

                # estimate on validation set
                val_errors = estimate(validation_set)
                max_val_err_ind = np.argmax(val_errors)
                max_val_err, max_val_err_mu = val_errors[max_val_err_ind], validation_set[max_val_err_ind]
                logger.info('Maximum validation error: {}'.format(max_val_err))
                logger.info('Validation error to training error ratio: {:.3e}'.format(max_val_err / max_err))

                if max_val_err >= max_err * rho:  # overfitting?

                    # compute element indicators for training set refinement
                    if current_refinements == 0:
                        logger.info2('Overfitting detected. Computing element indicators ...')
                    else:
                        logger.info3('Overfitting detected after refinement. Computing element indicators ...')
                    vertex_errors = np.max(errors[sample_set.vertex_ids], axis=1)
                    center_errors = estimate(sample_set.center_mus)
                    indicators_age_part = (gamma * sample_set.volumes / sample_set.total_volume
                                           * (sample_set.refinement_count - sample_set.creation_times))
                    indicators_error_part = np.max([vertex_errors, center_errors], axis=0) / max_err
                    indicators = indicators_age_part + indicators_error_part

                    # select elements
                    sorted_indicators_inds = np.argsort(indicators)[::-1]
                    refinement_elements = sorted_indicators_inds[:max(int(len(sorted_indicators_inds) * theta), 1)]
                    logger.info('Refining {} elements: {}'.format(len(refinement_elements), refinement_elements))

                    # visualization
                    if visualize:
                        from mpl_toolkits.mplot3d import Axes3D  # NOQA
                        import matplotlib.pyplot as plt
                        plt.figure()
                        plt.subplot(2, 2, 1, projection=None if sample_set.dim == 2 else '3d')
                        plt.title('estimated errors')
                        sample_set.visualize(vertex_data=errors, center_data=center_errors, new_figure=False)
                        plt.subplot(2, 2, 2, projection=None if sample_set.dim == 2 else '3d')
                        plt.title('indicators_error_part')
                        vmax = np.max([indicators_error_part, indicators_age_part, indicators])
                        data = {('volume_data' if sample_set.dim == 2 else 'center_data'): indicators_error_part}
                        sample_set.visualize(vertex_size=visualize_vertex_size, vmin=0, vmax=vmax, new_figure=False,
                                             **data)
                        plt.subplot(2, 2, 3, projection=None if sample_set.dim == 2 else '3d')
                        plt.title('indicators_age_part')
                        data = {('volume_data' if sample_set.dim == 2 else 'center_data'): indicators_age_part}
                        sample_set.visualize(vertex_size=visualize_vertex_size, vmin=0, vmax=vmax, new_figure=False,
                                             **data)
                        plt.subplot(2, 2, 4, projection=None if sample_set.dim == 2 else '3d')
                        if sample_set.dim == 2:
                            plt.title('indicators')
                            sample_set.visualize(volume_data=indicators,
                                                 center_data=np.zeros(len(refinement_elements)),
                                                 center_inds=refinement_elements,
                                                 vertex_size=visualize_vertex_size, vmin=0, vmax=vmax, new_figure=False)
                        else:
                            plt.title('selected cells')
                            sample_set.visualize(center_data=np.zeros(len(refinement_elements)),
                                                 center_inds=refinement_elements,
                                                 vertex_size=visualize_vertex_size, vmin=0, vmax=vmax, new_figure=False)
                        plt.show()

                    # refine training set
                    sample_set.refine(refinement_elements)
                    current_refinements += 1

                    # update validation set if needed
                    if validation_mus <= 0:
                        validation_set = sample_set.center_mus + parameter_space.sample_randomly(-validation_mus)

                    logger.info('New training set size: {}. New validation set size: {}'
                                .format(len(sample_set.vertex_mus), len(validation_set)))
                    logger.info('Number of refinements: {}'.format(sample_set.refinement_count))
                    logger.info('')
                else:
                    break  # no overfitting, leave the refinement loop

            max_errs.append(max_err)
            max_err_mus.append(max_err_mu)
            max_val_errs.append(max_val_err)
            max_val_err_mus.append(max_val_err_mu)
            refinements.append(current_refinements)
            training_set_sizes.append(len(sample_set.vertex_mus))

            # break if traget error reached
            if target_error is not None and max_err <= target_error:
                logger.info('Reached maximal error on snapshots of {} <= {}'.format(max_err, target_error))
                break

            # basis extension
            with logger.block('Computing solution snapshot for mu = {} ...'.format(max_err_mu)):
                U = d.solve(max_err_mu)
            with logger.block('Extending basis with solution snapshot ...'):
                try:
                    reductor.extend_basis(U, copy_U=False, **extension_params)
                except ExtensionError:
                    logger.info('Extension failed. Stopping now.')
                    break
            extensions += 1

            logger.info('')

            # break if prescribed basis size reached
            if max_extensions is not None and extensions >= max_extensions:
                logger.info('Maximum number of {} extensions reached.'.format(max_extensions))
                with logger.block('Reducing once more ...'):
                    rd = reductor.reduce()
                break

    tictoc = time.time() - tic
    logger.info('Greedy search took {} seconds'.format(tictoc))
    return {'rd': rd,
            'max_errs': max_errs, 'max_err_mus': max_err_mus, 'extensions': extensions,
            'max_val_errs': max_val_errs, 'max_val_err_mus': max_val_err_mus,
            'refinements': refinements, 'training_set_sizes': training_set_sizes,
            'time': tictoc}
コード例 #6
0
def ei_greedy(U,
              error_norm=None,
              atol=None,
              rtol=None,
              max_interpolation_dofs=None,
              projection='ei',
              product=None,
              copy=True,
              pool=dummy_pool):
    """Generate data for empirical interpolation by a greedy search (EI-Greedy algorithm).

    Given a |VectorArray| `U`, this method generates a collateral basis and
    interpolation DOFs for empirical interpolation of the vectors contained in `U`.
    The returned objects can also be used to instantiate an |EmpiricalInterpolatedOperator|.

    The interpolation data is generated by a greedy search algorithm, adding in each
    loop the worst approximated vector in `U` to the collateral basis.

    Parameters
    ----------
    U
        A |VectorArray| of vectors to interpolate.
    error_norm
        Norm w.r.t. which to calculate the interpolation error. If `None`, the Euclidean norm
        is used.
    atol
        Stop the greedy search if the largest approximation error is below this threshold.
    rtol
        Stop the greedy search if the largest relative approximation error is below this threshold.
    max_interpolation_dofs
        Stop the greedy search if the number of interpolation DOF (= dimension of the collateral
        basis) reaches this value.
    projection
        If `ei`, compute the approximation error by comparing the given vector by its
        interpolant. If `orthogonal`, compute the error by comparing with the orthogonal projection
        onto the span of the collateral basis.
    product
        If `projection == 'orthogonal'`, the product which is used to perform the projection.
        If `None`, the Euclidean product is used.
    copy
        If `False`, `U` will be modified during executing of the algorithm.
    pool
        If not `None`, the |WorkerPool| to use for parallelization.

    Returns
    -------
    interpolation_dofs
        |NumPy array| of the DOFs at which the vectors are evaluated.
    collateral_basis
        |VectorArray| containing the generated collateral basis.
    data
        Dict containing the following fields:

            :errors:                Sequence of maximum approximation errors during
                                    greedy search.
            :triangularity_errors:  Sequence of maximum absolute values of interoplation
                                    matrix coefficients in the upper triangle (should
                                    be near zero).
    """

    assert projection in ('orthogonal', 'ei')

    if pool:  # dispatch to parallel implemenation
        if projection == 'ei':
            pass
        elif projection == 'orthogonal':
            raise ValueError(
                'orthogonal projection not supported in parallel implementation'
            )
        else:
            assert False
        assert isinstance(U, (VectorArrayInterface, RemoteObjectInterface))
        with RemoteObjectManager() as rom:
            if isinstance(U, VectorArrayInterface):
                U = rom.manage(pool.scatter_array(U))
            return _parallel_ei_greedy(
                U,
                error_norm=error_norm,
                atol=atol,
                rtol=rtol,
                max_interpolation_dofs=max_interpolation_dofs,
                copy=copy,
                pool=pool)

    assert isinstance(U, VectorArrayInterface)

    logger = getLogger('pymor.algorithms.ei.ei_greedy')
    logger.info('Generating Interpolation Data ...')

    interpolation_dofs = np.zeros((0, ), dtype=np.int32)
    collateral_basis = U.empty()
    max_errs = []
    triangularity_errs = []

    if copy:
        U = U.copy()

    if projection == 'orthogonal':
        ERR = U.copy()
        onb_collateral_basis = collateral_basis.empty()
    else:
        ERR = U

    errs = ERR.l2_norm() if error_norm is None else error_norm(ERR)
    max_err_ind = np.argmax(errs)
    initial_max_err = max_err = errs[max_err_ind]

    # main loop
    while True:
        if max_interpolation_dofs is not None and len(
                interpolation_dofs) >= max_interpolation_dofs:
            logger.info(
                'Maximum number of interpolation DOFs reached. Stopping extension loop.'
            )
            logger.info(
                'Final maximum {} error with {} interpolation DOFs: {}'.format(
                    'projection' if projection else 'interpolation',
                    len(interpolation_dofs), max_err))
            break

        logger.info('Maximum {} error with {} interpolation DOFs: {}'.format(
            'projection' if projection else 'interpolation',
            len(interpolation_dofs), max_err))

        if atol is not None and max_err <= atol:
            logger.info(
                'Absolute error tolerance reached! Stopping extension loop.')
            break

        if rtol is not None and max_err / initial_max_err <= rtol:
            logger.info(
                'Relative error tolerance reached! Stopping extension loop.')
            break

        # compute new interpolation dof and collateral basis vector
        new_vec = U.copy(ind=max_err_ind)
        new_dof = new_vec.amax()[0][0]
        if new_dof in interpolation_dofs:
            logger.info(
                'DOF {} selected twice for interplation! Stopping extension loop.'
                .format(new_dof))
            break
        new_dof_value = new_vec.components([new_dof])[0, 0]
        if new_dof_value == 0.:
            logger.info(
                'DOF {} selected for interpolation has zero maximum error! Stopping extension loop.'
                .format(new_dof))
            break
        new_vec *= 1 / new_dof_value
        interpolation_dofs = np.hstack((interpolation_dofs, new_dof))
        collateral_basis.append(new_vec)
        max_errs.append(max_err)

        # update U and ERR
        new_dof_values = U.components([new_dof])
        U.axpy(-new_dof_values[:, 0], new_vec)
        if projection == 'orthogonal':
            onb_collateral_basis.append(new_vec)
            gram_schmidt(onb_collateral_basis,
                         offset=len(onb_collateral_basis) - 1,
                         copy=False)
            coeffs = ERR.dot(onb_collateral_basis,
                             o_ind=len(onb_collateral_basis) - 1)
            ERR.axpy(-coeffs[:, 0],
                     onb_collateral_basis,
                     x_ind=len(onb_collateral_basis) - 1)
        errs = ERR.l2_norm() if error_norm is None else error_norm(ERR)
        max_err_ind = np.argmax(errs)
        max_err = errs[max_err_ind]

    interpolation_matrix = collateral_basis.components(interpolation_dofs).T
    triangularity_errors = np.abs(interpolation_matrix -
                                  np.tril(interpolation_matrix))
    for d in range(1, len(interpolation_matrix) + 1):
        triangularity_errs.append(np.max(triangularity_errors[:d, :d]))

    if len(triangularity_errs) > 0:
        logger.info(
            'Interpolation matrix is not lower triangular with maximum error of {}'
            .format(triangularity_errs[-1]))

    data = {'errors': max_errs, 'triangularity_errors': triangularity_errs}

    return interpolation_dofs, collateral_basis, data
コード例 #7
0
ファイル: greedy.py プロジェクト: simon-ca/pymor
def greedy(discretization,
           reductor,
           samples,
           initial_basis=None,
           use_estimator=True,
           error_norm=None,
           extension_algorithm=gram_schmidt_basis_extension,
           atol=None,
           rtol=None,
           max_extensions=None,
           pool=None):
    """Greedy basis generation algorithm.

    This algorithm generates a reduced basis by iteratively adding the
    worst approximated solution snapshot for a given training set to the
    reduced basis. The approximation error is computed either by directly
    comparing the reduced solution to the detailed solution or by using
    an error estimator (`use_estimator == True`). The reduction and basis
    extension steps are performed by calling the methods provided by the
    `reductor` and `extension_algorithm` arguments.

    Parameters
    ----------
    discretization
        The |Discretization| to reduce.
    reductor
        Reductor for reducing the given |Discretization|. This has to be a
        function of the form `reductor(discretization, basis, extends=None)`.
        If your reductor takes more arguments, use, e.g., :func:`functools.partial`.
        The method has to return a tuple
        `(reduced_discretization, reconstructor, reduction_data)`.
        In case the last basis extension was `hierarchic` (see
        `extension_algorithm`), the extends argument is set to
        `(last_reduced_discretization, last_reconstructor, last_reduction_data)`
        which can be used by the reductor to speed up the reduction
        process. For an example see
        :func:`~pymor.reductors.coercive.reduce_coercive`.
    samples
        The set of |Parameter| samples on which to perform the greedy search.
    initial_basis
        The initial reduced basis with which the algorithm starts. If `None`,
        an empty basis is used as initial basis.
    use_estimator
        If `True`, use `reduced_discretization.estimate()` to estimate the
        errors on the sample set. Otherwise a detailed simulation is
        performed to calculate the error.
    error_norm
        If `use_estimator == False`, use this function to calculate the
        norm of the error. If `None`, the Euclidean norm is used.
    extension_algorithm
        The extension algorithm to be used to extend the current reduced
        basis with the maximum error snapshot. This has to be a function
        of the form `extension_algorithm(old_basis, new_vector)`, which
        returns a tuple `(new_basis, extension_data)`, where
        `extension_data` is a dict at least containing the key
        `hierarchic`. `hierarchic` should be set to `True` if `new_basis`
        contains `old_basis` as its first vectors.
    atol
        If not `None`, stop the algorithm if the maximum (estimated) error
        on the sample set drops below this value.
    rtol
        If not `None`, stop the algorithm if the maximum (estimated)
        relative error on the sample set drops below this value.
    max_extensions
        If not `None`, stop the algorithm after `max_extensions` extension
        steps.
    pool
        If not `None`, the |WorkerPool| to use for parallelization.

    Returns
    -------
    Dict with the following fields:

        :basis:                  The reduced basis.
        :reduced_discretization: The reduced |Discretization| obtained for the
                                 computed basis.
        :reconstructor:          Reconstructor for `reduced_discretization`.
        :max_errs:               Sequence of maximum errors during the greedy run.
        :max_err_mus:            The parameters corresponding to `max_errs`.
    """

    logger = getLogger('pymor.algorithms.greedy.greedy')
    samples = list(samples)
    sample_count = len(samples)
    logger.info('Started greedy search on {} samples'.format(sample_count))
    if pool is None or pool is dummy_pool:
        pool = dummy_pool
    else:
        logger.info(
            'Using pool of {} workers for parallel greedy search'.format(
                len(pool)))

    with RemoteObjectManager() as rom:
        # Push everything we need during the greedy search to the workers.
        # Distribute the training set evenly among the workes.
        if not use_estimator:
            rom.manage(pool.push(discretization))
            if error_norm:
                rom.manage(pool.push(error_norm))
        samples = rom.manage(pool.scatter_list(samples))

        basis = initial_basis

        tic = time.time()
        extensions = 0
        max_errs = []
        max_err_mus = []
        hierarchic = False

        rd, rc, reduction_data = None, None, None
        while True:
            with logger.block('Reducing ...'):
                rd, rc, reduction_data = reductor(discretization, basis) if not hierarchic \
                    else reductor(discretization, basis, extends=(rd, rc, reduction_data))

            if sample_count == 0:
                logger.info('There is nothing else to do for empty samples.')
                return {
                    'basis': basis,
                    'reduced_discretization': rd,
                    'reconstructor': rc,
                    'max_errs': [],
                    'max_err_mus': [],
                    'extensions': 0,
                    'time': time.time() - tic,
                    'reduction_data': reduction_data
                }

            with logger.block('Estimating errors ...'):
                if use_estimator:
                    errors, mus = list(
                        zip(*pool.apply(_estimate,
                                        rd=rd,
                                        d=None,
                                        rc=None,
                                        samples=samples,
                                        error_norm=None)))
                else:
                    # FIXME: Always communicating rc may become a bottleneck in some use cases.
                    #        Add special treatment for GenericRBReconstructor?
                    errors, mus = list(
                        zip(*pool.apply(_estimate,
                                        rd=rd,
                                        d=discretization,
                                        rc=rc,
                                        samples=samples,
                                        error_norm=error_norm)))
            max_err_ind = np.argmax(errors)
            max_err, max_err_mu = errors[max_err_ind], mus[max_err_ind]

            max_errs.append(max_err)
            max_err_mus.append(max_err_mu)
            logger.info(
                'Maximum error after {} extensions: {} (mu = {})'.format(
                    extensions, max_err, max_err_mu))

            if atol is not None and max_err <= atol:
                logger.info(
                    'Absolute error tolerance ({}) reached! Stoping extension loop.'
                    .format(atol))
                break

            if rtol is not None and max_err / max_errs[0] <= rtol:
                logger.info(
                    'Relative error tolerance ({}) reached! Stoping extension loop.'
                    .format(rtol))
                break

            with logger.block(
                    'Computing solution snapshot for mu = {} ...'.format(
                        max_err_mu)):
                U = discretization.solve(max_err_mu)
            with logger.block('Extending basis with solution snapshot ...'):
                try:
                    basis, extension_data = extension_algorithm(basis, U)
                except ExtensionError:
                    logger.info('Extension failed. Stopping now.')
                    break
            extensions += 1
            if 'hierarchic' not in extension_data:
                logger.warn(
                    'Extension algorithm does not report if extension was hierarchic. Assuming it was\'nt ..'
                )
                hierarchic = False
            else:
                hierarchic = extension_data['hierarchic']

            logger.info('')

            if max_extensions is not None and extensions >= max_extensions:
                logger.info('Maximum number of {} extensions reached.'.format(
                    max_extensions))
                with logger.block('Reducing once more ...'):
                    rd, rc, reduction_data = reductor(discretization, basis) if not hierarchic \
                        else reductor(discretization, basis, extends=(rd, rc, reduction_data))
                break

        tictoc = time.time() - tic
        logger.info('Greedy search took {} seconds'.format(tictoc))
        return {
            'basis': basis,
            'reduced_discretization': rd,
            'reconstructor': rc,
            'max_errs': max_errs,
            'max_err_mus': max_err_mus,
            'extensions': extensions,
            'time': tictoc,
            'reduction_data': reduction_data
        }