Esempio n. 1
0
def newton(operator, rhs, initial_guess=None, mu=None, error_norm=None,
           miniter=None, maxiter=None, reduction=None, abs_limit=None,
           return_stages=False, return_residuals=False):
    miniter = defaults.newton_miniter if miniter is None else miniter
    maxiter = defaults.newton_maxiter if maxiter is None else maxiter
    reduction = defaults.newton_reduction if reduction is None else reduction
    abs_limit = defaults.newton_abs_limit if abs_limit is None else abs_limit
    logger = getLogger('pymor.algorithms.newton')

    data = {}

    if initial_guess is None:
        initial_guess = operator.type_source.zeros(operator.dim_source)

    if return_stages:
        data['stages'] = operator.type_source.empty(operator.dim_source)

    if return_residuals:
        data['residuals'] = operator.type_range.empty(operator.dim_range)

    U = initial_guess.copy()
    residual = rhs - operator.apply(U, mu=mu)

    err = first_err = residual.l2_norm()[0] if error_norm is None else error_norm(residual)[0]
    logger.info('      Initial Residual: {:5e}'.format(err))

    iteration = 0
    while iteration < miniter or iteration < maxiter and err > abs_limit and err/first_err > reduction:
        if iteration > 0 and return_stages:
            data['stages'].append(U)
        if return_residuals:
            data['residuals'].append(residual)
        iteration += 1
        jacobian = operator.jacobian(U, mu=mu)
        try:
            correction = jacobian.apply_inverse(residual)
        except InversionError:
            raise NewtonError('Could not invert jacobian')
        U += correction
        residual = rhs - operator.apply(U, mu=mu)

        old_err = err
        err = residual.l2_norm()[0] if error_norm is None else error_norm(residual)[0]
        logger.info('Iteration {:2}: Residual: {:5e},  Reduction: {:5e}, Total Reduction: {:5e}'
                    .format(iteration, err, err / old_err, err / first_err))

    if err > abs_limit and err/first_err > reduction:
        raise NewtonError('Failed to converge')

    return U, data
Esempio n. 2
0
def newton(operator, rhs, initial_guess=None, mu=None, error_norm=None,
           miniter=None, maxiter=None, reduction=None, abs_limit=None,
           stagnation_window=None, stagnation_threshold=None,
           return_stages=False, return_residuals=False):
    miniter = defaults.newton_miniter if miniter is None else miniter
    maxiter = defaults.newton_maxiter if maxiter is None else maxiter
    reduction = defaults.newton_reduction if reduction is None else reduction
    abs_limit = defaults.newton_abs_limit if abs_limit is None else abs_limit
    logger = getLogger('pymor.algorithms.newton')

    data = {}

    if initial_guess is None:
        initial_guess = operator.type_source.zeros(operator.dim_source)

    if return_stages:
        data['stages'] = operator.type_source.empty(operator.dim_source)

    if return_residuals:
        data['residuals'] = operator.type_range.empty(operator.dim_range)

    U = initial_guess.copy()
    residual = rhs - operator.apply(U, mu=mu)

    err = residual.l2_norm()[0] if error_norm is None else error_norm(residual)[0]
    logger.info('      Initial Residual: {:5e}'.format(err))

    iteration = 0
    error_sequence = [err]
    while (iteration < miniter
           or (iteration < maxiter
               and err > abs_limit and err/error_sequence[0] > reduction
               and (len(error_sequence) < stagnation_window + 1
                    or err/max(error_sequence[-stagnation_window - 1:]) < stagnation_threshold))):
        if iteration > 0 and return_stages:
            data['stages'].append(U)
        if return_residuals:
            data['residuals'].append(residual)
        iteration += 1
        jacobian = operator.jacobian(U, mu=mu)
        try:
            correction = jacobian.apply_inverse(residual)
        except InversionError:
            raise NewtonError('Could not invert jacobian')
        U += correction
        residual = rhs - operator.apply(U, mu=mu)

        err = residual.l2_norm()[0] if error_norm is None else error_norm(residual)[0]
        logger.info('Iteration {:2}: Residual: {:5e},  Reduction: {:5e}, Total Reduction: {:5e}'
                    .format(iteration, err, err / error_sequence[-1], err / error_sequence[0]))
        error_sequence.append(err)

    if (err <= abs_limit):
        logger.info('Absolute limit of {} reached. Stopping.'.format(abs_limit))
    elif (err/error_sequence[0] <= reduction):
        logger.info('Prescribed total reduction of {} reached. Stopping.'.format(reduction))
    elif (len(error_sequence) >= stagnation_window + 1
          and err/max(error_sequence[-stagnation_window - 1:]) >= stagnation_threshold):
        logger.info('Error is stagnating (threshold: {:5e}, window: {}). Stopping.'.format(stagnation_threshold,
                                                                                           stagnation_window))
    else:
        raise NewtonError('Failed to converge')

    data['error_sequence'] = np.array(error_sequence)

    return U, data
Esempio n. 3
0
# -*- coding: utf-8 -*-
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: Rene Milk, Stephan Rave, Felix Schindler
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)

from __future__ import absolute_import, division, print_function

from itertools import izip

from pymor.core import getLogger
logger = getLogger(__name__)


def iadd_masked(U, V, U_ind):
    '''Indexed, masked in-place addition.

    This is the same as ::

        U[U_ind] += V

    with two exceptions:
        1. Negative indices are skipped.
        2. If the same index is repeated, all additions are performed,
           not only the last one.
    '''
    logger.warn('Call to unoptimized function iadd_masked')
    assert len(U_ind) == len(V), 'Lengths of U_ind and V must match'
    assert U.shape[1:] == V.shape[1:], 'U.shape[1:] != V.shape[1:]'

    for ind, v in izip(U_ind, V):
        if ind < 0:
Esempio n. 4
0
def greedy(discretization, reductor, samples, initial_basis=None, use_estimator=True, error_norm=None,
           extension_algorithm=trivial_basis_extension, target_error=None, max_extensions=None):
    """Greedy basis generation algorithm.

    This algorithm generates a reduced basis by iteratively adding the
    worst approximated solution snapshot for a given training set to the
    reduced basis. The approximation error is computed either by directly
    comparing the reduced solution to the detailed solution or by using
    an error estimator (`use_estimator == True`). The reduction and basis
    extension steps are performed by calling the methods provided by the
    `reductor` and `extension_algorithm` arguments.

    Parameters
    ----------
    discretization
        The |Discretization| to reduce.
    reductor
        Reductor for reducing the given |Discretization|. This has to be a
        function of the form `reductor(discretization, basis, extends=None)`.
        If your reductor takes more arguments, use, e.g., :func:`functools.partial`.
        The method has to return a tuple
        `(reduced_discretization, reconstructor, reduction_data)`.
        In case the last basis extension was `hierarchic` (see
        `extension_algorithm`), the extends argument is set to
        `(last_reduced_discretization, last_reconstructor, last_reduction_data)`
        which can be used by the reductor to speed up the reduction
        process. For an example see
        :func:`~pymor.reductors.linear.reduce_stationary_affine_linear`.
    samples
        The set of |Parameter| samples on which to perform the greedy search.
    initial_basis
        The initial reduced basis with which the algorithm starts. If `None`,
        an empty basis is used as initial_basis.
    use_estimator
        If `True`, use `reduced_discretization.estimate()` to estimate the
        errors on the sample set. Otherwise a detailed simulation is
        performed to calculate the error.
    error_norm
        If `use_estimator == False`, use this function to calculate the
        norm of the error. If `None`, the Euclidean norm is used.
    extension_algorithm
        The extension algorithm to be used to extend the current reduced
        basis with the maximum error snapshot. This has to be a function
        of the form `extension_algorithm(old_basis, new_vector)`, which
        returns a tuple `(new_basis, extension_data)`, where
        `extension_data` is a dict at least containing the key
        `hierarchic`. `hierarchic` is set to `True` if `new_basis`
        contains `old_basis` as its first vectors.
    target_error
        If not `None`, stop the algorithm if the maximum (estimated) error
        on the sample set drops below this value.
    max_extensions
        If not `None`, stop the algorithm after `max_extensions` extension
        steps.

    Returns
    -------
    Dict with the following fields:

        :basis:                  The reduced basis.
        :reduced_discretization: The reduced |Discretization| obtained for the
                                 computed basis.
        :reconstructor:          Reconstructor for `reduced_discretization`.
        :max_err:                Last estimated maximum error on the sample set.
        :max_err_mu:             The parameter that corresponds to `max_err`.
        :max_errs:               Sequence of maximum errors during the greedy run.
        :max_err_mus:            The parameters corresponding to `max_errs`.
    """

    logger = getLogger('pymor.algorithms.greedy.greedy')
    samples = list(samples)
    logger.info('Started greedy search on {} samples'.format(len(samples)))
    basis = initial_basis

    tic = time.time()
    extensions = 0
    max_errs = []
    max_err_mus = []
    hierarchic = False

    rd, rc, reduction_data = None, None, None
    while True:
        logger.info('Reducing ...')
        rd, rc, reduction_data = reductor(discretization, basis) if not hierarchic \
            else reductor(discretization, basis, extends=(rd, rc, reduction_data))

        logger.info('Estimating errors ...')
        if use_estimator:
            errors = [rd.estimate(rd.solve(mu), mu) for mu in samples]
        elif error_norm is not None:
            errors = [error_norm(discretization.solve(mu) - rc.reconstruct(rd.solve(mu))) for mu in samples]
        else:
            errors = [(discretization.solve(mu) - rc.reconstruct(rd.solve(mu))).l2_norm() for mu in samples]

        # most error_norms will return an array of length 1 instead of a number, so we extract the numbers
        # if necessary
        errors = map(lambda x: x[0] if hasattr(x, '__len__') else x, errors)

        max_err, max_err_mu = max(((err, mu) for err, mu in izip(errors, samples)), key=lambda t: t[0])
        max_errs.append(max_err)
        max_err_mus.append(max_err_mu)
        logger.info('Maximum error after {} extensions: {} (mu = {})'.format(extensions, max_err, max_err_mu))

        if target_error is not None and max_err <= target_error:
            logger.info('Reached maximal error on snapshots of {} <= {}'.format(max_err, target_error))
            break

        logger.info('Extending with snapshot for mu = {}'.format(max_err_mu))
        U = discretization.solve(max_err_mu)
        try:
            basis, extension_data = extension_algorithm(basis, U)
        except ExtensionError:
            logger.info('Extension failed. Stopping now.')
            break
        extensions += 1
        if not 'hierarchic' in extension_data:
            logger.warn('Extension algorithm does not report if extension was hierarchic. Assuming it was\'nt ..')
            hierarchic = False
        else:
            hierarchic = extension_data['hierarchic']

        logger.info('')

        if max_extensions is not None and extensions >= max_extensions:
            logger.info('Maximal number of {} extensions reached.'.format(max_extensions))
            logger.info('Reducing once more ...')
            rd, rc, reduction_data = reductor(discretization, basis) if not hierarchic \
                else reductor(discretization, basis, extends=(rd, rc, reduction_data))
            break

    tictoc = time.time() - tic
    logger.info('Greedy search took {} seconds'.format(tictoc))
    return {'basis': basis, 'reduced_discretization': rd, 'reconstructor': rc, 'max_err': max_err,
            'max_err_mu': max_err_mu, 'max_errs': max_errs, 'max_err_mus': max_err_mus, 'extensions': extensions,
            'time': tictoc, 'reduction_data': reduction_data}
          'greedy_use_estimator': True,
          'estimator_compute': 'eta_red',
          'estimator_return': 'eta_red',
          'num_test_samples': 10,
          'estimate_some_errors': True,
          'local_indicators': 'eta_red',
          'marking_strategy': 'doerfler_and_age',
          'marking_max_age': 4,
          'doerfler_marking_theta': 0.66,
          'local_boundary_values': 'dirichlet',
          'online_target_error': 5,
          'online_max_extensions': 20}
DATASET_ID = config['dune_example'] + '_online_enrichment_test'

pymor.core.logger.MAX_HIERACHY_LEVEL = 2
getLogger('pymor.WrappedDiscretization').setLevel('WARN')
getLogger('pymor.algorithms').setLevel('INFO')
getLogger('dune.pymor.discretizations').setLevel('WARN')


if __name__ == '__main__':

    logfile = NamedTemporaryFile(delete=False).name
    pymor.core.logger.FILENAME = logfile
    new_dataset(DATASET_ID, **config)

    detailed_data = prepare(config)
    print('')
    offline_data  = offline_phase(config, detailed_data)
    print('')
    _             = online_phase(config, detailed_data, offline_data)
Esempio n. 6
0
def thermalblock_demo(args):
    args['XBLOCKS'] = int(args['XBLOCKS'])
    args['YBLOCKS'] = int(args['YBLOCKS'])
    args['--grid'] = int(args['--grid'])
    args['SNAPSHOTS'] = int(args['SNAPSHOTS'])
    args['RBSIZE'] = int(args['RBSIZE'])
    args['--test'] = int(args['--test'])
    args['--estimator-norm'] = args['--estimator-norm'].lower()
    assert args['--estimator-norm'] in {'trivial', 'h1'}
    args['--extension-alg'] = args['--extension-alg'].lower()
    assert args['--extension-alg'] in {'trivial', 'gram_schmidt', 'h1_gram_schmidt'}
    args['--reductor'] = args['--reductor'].lower()
    assert args['--reductor'] in {'default', 'numpy_default'}

    import IPython.parallel as p
    from pymor.playground.remote import setup_remote, RemoteStationaryDiscretization

    rc = p.Client()
    rv = rc[0]

    print('Discretize ...')
    rv.execute('''
import numpy as np

import pymor.core as core
core.logger.MAX_HIERACHY_LEVEL = 2
from pymor.analyticalproblems import ThermalBlockProblem
from pymor.discretizers import discretize_elliptic_cg
core.getLogger('pymor.algorithms').setLevel('INFO')
core.getLogger('pymor.discretizations').setLevel('INFO')
core.getLogger('pymor.la').setLevel('INFO')

print('Setup Problem ...')
problem = ThermalBlockProblem(num_blocks=({nx}, {ny}))

print('Discretize ...')
discretization, _ = discretize_elliptic_cg(problem, diameter=m.sqrt(2) / {grid})
'''.format(nx=args['XBLOCKS'], ny=args['YBLOCKS'], grid=args['--grid']))

    discretization_id = setup_remote(rv, 'discretization')
    discretization = RemoteStationaryDiscretization(rv, discretization_id)

    print('The parameter type is {}'.format(discretization.parameter_type))

    print('RB generation ...')

    error_product = discretization.h1_product if args['--estimator-norm'] == 'h1' else None
    reductors = {'default': partial(reduce_stationary_affine_linear, error_product=error_product)}
    reductor = reductors[args['--reductor']]
    extension_algorithms = {'trivial': trivial_basis_extension,
                            'gram_schmidt': gram_schmidt_basis_extension,
                            'h1_gram_schmidt': partial(gram_schmidt_basis_extension, product=discretization.h1_product)}
    extension_algorithm = extension_algorithms[args['--extension-alg']]
    greedy_data = greedy(discretization, reductor, discretization.parameter_space.sample_uniformly(args['SNAPSHOTS']),
                         use_estimator=args['--with-estimator'], error_norm=discretization.h1_norm,
                         initial_basis=discretization.operator.type_source.empty(dim=discretization.operator.dim_source),  # NOQA
                         extension_algorithm=extension_algorithm, max_extensions=args['RBSIZE'])
    rb_discretization, reconstructor = greedy_data['reduced_discretization'], greedy_data['reconstructor']

    print('\nSearching for maximum error on random snapshots ...')

    tic = time.time()
    h1_err_max = -1
    cond_max = -1
    for mu in discretization.parameter_space.sample_randomly(args['--test']):
        print('Solving RB-Scheme for mu = {} ... '.format(mu), end='')
        URB = reconstructor.reconstruct(rb_discretization.solve(mu))
        U = discretization.solve(mu)
        h1_err = discretization.h1_norm(U - URB)[0]
        cond = np.linalg.cond(rb_discretization.operator.assemble(mu)._matrix)
        if h1_err > h1_err_max:
            h1_err_max = h1_err
            Umax = U
            URBmax = URB
            mumax = mu
        if cond > cond_max:
            cond_max = cond
            cond_max_mu = mu
        print('H1-error = {}, condition = {}'.format(h1_err, cond))
    toc = time.time()
    t_est = toc - tic
    real_rb_size = len(greedy_data['basis'])

    print('''
    *** RESULTS ***

    Problem:
       number of blocks:                   {args[XBLOCKS]}x{args[YBLOCKS]}
       h:                                  sqrt(2)/{args[--grid]}

    Greedy basis generation:
       number of snapshots:                {args[SNAPSHOTS]}^({args[XBLOCKS]}x{args[YBLOCKS]})
       used estimator:                     {args[--with-estimator]}
       estimator norm:                     {args[--estimator-norm]}
       extension method:                   {args[--extension-alg]}
       prescribed basis size:              {args[RBSIZE]}
       actual basis size:                  {real_rb_size}
       elapsed time:                       {greedy_data[time]}

    Stochastic error estimation:
       number of samples:                  {args[--test]}
       maximal H1-error:                   {h1_err_max}  (mu = {mumax})
       maximal condition of system matrix: {cond_max}  (mu = {cond_max_mu})
       elapsed time:                       {t_est}
    '''.format(**locals()))

    sys.stdout.flush()
Esempio n. 7
0
    def clear(self):
        import glob
        filename = self._cache_region.backend.filename
        del self._cache_region
        files = glob.glob(filename + '*')
        map(os.unlink, files)
        self._new_region()


cache_regions = {'memory': DogpileMemoryCacheRegion(),
                 'disk': DogpileDiskCacheRegion()}
_caching_disabled = int(os.environ.get('PYMOR_CACHE_DISABLE', 0)) == 1
if _caching_disabled:
    from pymor.core import getLogger
    getLogger('pymor.core.cache').warn('caching globally disabled by environment')


def enable_caching():
    '''Globally enable caching.'''
    global _caching_disabled
    _caching_disabled = int(os.environ.get('PYMOR_CACHE_DISABLE', 0)) == 1


def disable_caching():
    '''Globally disable caching.'''
    global _caching_disabled
    _caching_disabled = True


def clear_caches():
Esempio n. 8
0
def gram_schmidt(A, product=None, tol=1e-14, offset=0, find_duplicates=True,
                 reiterate=True, reiteration_threshold=1e-1, check=True, check_tol=1e-3,
                 copy=False):
    """Orthonormalize a |VectorArray| using the Gram-Schmidt algorithm.

    Parameters
    ----------
    A
        The |VectorArray| which is to be orthonormalized.
    product
        The scalar product w.r.t. which to orthonormalize, given as a linear
        |Operator|. If `None` the Euclidean product is used.
    tol
        Tolerance to determine a linear dependent row.
    offset
        Assume that the first `offset` vectors are already orthogonal and start the
        algorithm at the `offset + 1`-th vector.
    find_duplicates
        If `True`, eliminate duplicate vectors before the main loop.
    reiterate
        If `True`, orthonormalize again if the norm of the orthogonalized vector is
        much smaller than the norm of the original vector.
    reiteration_threshold
        If `reiterate` is `True`, re-orthonormalize if the ratio between the norms of
        the orthogonalized vector and the original vector is smaller than this value.
    check
        If `True`, check if the resulting VectorArray is really orthonormal.
    check_tol
        Tolerance for the check.
    copy
        If `True`, create a copy of `A` instead of working directly on `A`.


    Returns
    -------
    The orthonormalized |VectorArray|.
    """

    logger = getLogger('pymor.la.gram_schmidt.gram_schmidt')

    if copy:
        A = A.copy()

    # find duplicate vectors since in some circumstances these cannot be detected in the main loop
    # (is this really needed or is in this cases the tolerance poorly chosen anyhow)
    if find_duplicates:
        i = 0
        while i < len(A):
            duplicates = A.almost_equal(A, ind=i, o_ind=np.arange(max(offset, i + 1), len(A)))
            if np.any(duplicates):
                A.remove(np.where(duplicates)[0])
                logger.info("Removing duplicate vectors")
            i += 1

    # main loop
    remove = []
    norm = None
    for i in xrange(offset, len(A)):
        # first calculate norm
        if product is None:
            oldnorm = A.l2_norm(ind=i)[0]
        else:
            oldnorm = np.sqrt(product.apply2(A, A, V_ind=i, U_ind=i, pairwise=True))[0]

        if float_cmp_all(oldnorm, 0):
            logger.info("Removing null vector {}".format(i))
            remove.append(i)
            continue

        if i == 0:
            A.scal(1/oldnorm, ind=0)

        else:
            first_iteration = True

            # If reiterate is True, reiterate as long as the norm of the vector changes
            # strongly during orthonormalization (due to Andreas Buhr).
            while first_iteration or reiterate and norm < reiteration_threshold:
                # this loop assumes that oldnorm is the norm of the ith vector when entering

                if first_iteration:
                    first_iteration = False
                else:
                    logger.info('Orthonormalizing vector {} again'.format(i))

                # orthogonalize to all vectors left
                for j in xrange(i):
                    if j in remove:
                        continue
                    if product is None:
                        p = A.dot(A, ind=i, o_ind=j, pairwise=True)[0]
                    else:
                        p = product.apply2(A, A, V_ind=i, U_ind=j, pairwise=True)[0]
                    A.axpy(-p, A, ind=i, x_ind=j)

                # calculate new norm
                if product is None:
                    norm = A.l2_norm(ind=i)[0]
                else:
                    norm = np.sqrt(product.apply2(A, A, V_ind=i, U_ind=i, pairwise=True))[0]

                # remove vector if it got too small:
                if norm / oldnorm < tol:
                    logger.info("Removing linear dependent vector {}".format(i))
                    remove.append(i)
                    break

                A.scal(1 / norm, ind=i)
                oldnorm = 1.

    if remove:
        A.remove(remove)

    if check:
        if not product and not float_cmp_all(A.dot(A, pairwise=False), np.eye(len(A)),
                                             atol=check_tol, rtol=0.):
            err = np.max(np.abs(A.dot(A, pairwise=False) - np.eye(len(A))))
            raise AccuracyError('result not orthogonal (max err={})'.format(err))
        elif product and not float_cmp_all(product.apply2(A, A, pairwise=False), np.eye(len(A)),
                                           atol=check_tol, rtol=0.):
            err = np.max(np.abs(product.apply2(A, A, pairwise=False) - np.eye(len(A))))
            raise AccuracyError('result not orthogonal (max err={})'.format(err))

    return A
Esempio n. 9
0
def logmodule(module_name):
    logger = core.getLogger(module_name)
    exercise_logger(logger)
Esempio n. 10
0
    exercise_logger(logger)


def logclass(cls):
    logger = cls.logger
    exercise_logger(logger)


def test_logger():
    import pymor

    fails = []
    for importer, pack_name, _ in pkgutil.walk_packages(
        pymor.__path__, pymor.__name__ + ".", lambda n: fails.append(n)
    ):
        yield logmodule, pack_name
        try:
            importer.find_module(pack_name).load_module(pack_name)
        except TypeError, e:
            fails.append(pack_name)
    import pprint

    if len(fails):
        core.getLogger(__name__).error("Failed imports: {}".format(pprint.pformat(set(fails))))
    for cls in pymor.core.interfaces.BasicInterface.implementors(True):
        yield logclass, cls


if __name__ == "__main__":
    runmodule(name="pymortests.core.logger")
Esempio n. 11
0
def ei_greedy(evaluations, error_norm=None, target_error=None, max_interpolation_dofs=None,
              projection='orthogonal', product=None):
    '''Generate data for empirical operator interpolation by a greedy search (EI-Greedy algorithm).

    Given evaluations of |Operators|, this method generates a collateral_basis and
    interpolation DOFs for empirical operator interpolation. The returned objects
    can be used to instantiate an |EmpiricalInterpolatedOperator|.

    The interpolation data is generated by a greedy search algorithm, adding in each
    loop the worst approximated operator evaluation to the collateral basis.

    Parameters
    ----------
    evaluations
        An iterable of operator evaluations. Each element must be a |VectorArray|
        of the same type and dimension, but it can hold an arbitrary number of evaluations.
    error_norm
        Norm w.r.t. which to calculate the interpolation error. If `None`, the Euclidean norm
        is used.
    target_error
        Stop the greedy search if the largest approximation error is below this threshold.
    max_interpolation_dofs
        Stop the greedy search if the number of interpolation DOF (= dimension of the collateral
        basis) reaches this value.
    projection
        If `ei`, compute the approximation error by comparing the given evaluation by the
        evaluation of the interpolated operator. If `orthogonal`, compute the error by
        comparing with the orthogonal projection onto the span of the collateral basis.
    product
        If `projection == 'orthogonal'`, the product which is used to perform the projection.
        If `None`, the Euclidean product is used.

    Returns
    -------
    interpolation_dofs
        |NumPy array| of the DOFs at which the operators have to be evaluated.
    collateral_basis
        |VectorArray| containing the generated collateral basis.
    data
        Dict containing the following fields:

            :errors: sequence of maximum approximation errors during greedy search.
    '''

    assert projection in ('orthogonal', 'ei')
    assert isinstance(evaluations, VectorArrayInterface)\
        or all(isinstance(ev, VectorArrayInterface) for ev in evaluations)
    if isinstance(evaluations, VectorArrayInterface):
        evaluations = (evaluations,)

    logger = getLogger('pymor.algorithms.ei.ei_greedy')
    logger.info('Generating Interpolation Data ...')

    interpolation_dofs = np.zeros((0,), dtype=np.int32)
    interpolation_matrix = np.zeros((0, 0))
    collateral_basis = type(next(iter(evaluations))).empty(dim=next(iter(evaluations)).dim)
    max_errs = []
    triangularity_errs = []

    def interpolate(U, ind=None):
        coefficients = solve_triangular(interpolation_matrix, U.components(interpolation_dofs, ind=ind).T,
                                        lower=True, unit_diagonal=True).T
        # coefficients = np.linalg.solve(interpolation_matrix, U.components(interpolation_dofs, ind=ind).T).T
        return collateral_basis.lincomb(coefficients)

    # compute the maximum projection error and error vector for the current interpolation data
    def projection_error():
        max_err = -1.

        # precompute gramian_inverse if needed
        if projection == 'orthogonal' and len(interpolation_dofs) > 0:
            if product is None:
                gramian = collateral_basis.gramian()
            else:
                gramian = product.apply2(collateral_basis, collateral_basis, pairwise=False)
            gramian_cholesky = cho_factor(gramian, overwrite_a=True)

        for AU in evaluations:
            if len(interpolation_dofs) > 0:
                if projection == 'ei':
                    AU_interpolated = interpolate(AU)
                    ERR = AU - AU_interpolated
                else:
                    if product is None:
                        coefficients = cho_solve(gramian_cholesky,
                                                 collateral_basis.dot(AU, pairwise=False)).T
                    else:
                        coefficients = cho_solve(gramian_cholesky,
                                                 product.apply2(collateral_basis, AU, pairwise=False)).T
                    AU_projected = collateral_basis.lincomb(coefficients)
                    ERR = AU - AU_projected
            else:
                ERR = AU
            errs = ERR.l2_norm() if error_norm is None else error_norm(ERR)
            local_max_err_ind = np.argmax(errs)
            local_max_err = errs[local_max_err_ind]
            if local_max_err > max_err:
                max_err = local_max_err
                if len(interpolation_dofs) == 0 or projection == 'ei':
                    new_vec = ERR.copy(ind=local_max_err_ind)
                else:
                    new_vec = AU.copy(ind=local_max_err_ind)
                    new_vec -= interpolate(AU, ind=local_max_err_ind)

        return max_err, new_vec

    # main loop
    while True:
        max_err, new_vec = projection_error()

        logger.info('Maximum interpolation error with {} interpolation DOFs: {}'.format(len(interpolation_dofs),
                                                                                        max_err))
        if target_error is not None and max_err <= target_error:
            logger.info('Target error reached! Stopping extension loop.')
            break

        # compute new interpolation dof and collateral basis vector
        new_dof = new_vec.amax()[0][0]
        if new_dof in interpolation_dofs:
            logger.info('DOF {} selected twice for interplation! Stopping extension loop.'.format(new_dof))
            break
        new_vec *= 1 / new_vec.components([new_dof])[0, 0]
        interpolation_dofs = np.hstack((interpolation_dofs, new_dof))
        collateral_basis.append(new_vec, remove_from_other=True)
        interpolation_matrix = collateral_basis.components(interpolation_dofs).T
        max_errs.append(max_err)

        triangularity_error = np.max(np.abs(interpolation_matrix - np.tril(interpolation_matrix)))
        triangularity_errs.append(triangularity_error)
        logger.info('Interpolation matrix is not lower triangular with maximum error of {}'
                    .format(triangularity_error))

        if len(interpolation_dofs) >= max_interpolation_dofs:
            logger.info('Maximum number of interpolation DOFs reached. Stopping extension loop.')
            max_err, _ = projection_error()
            logger.info('Final maximum interpolation error with {} interpolation DOFs: {}'.format(
                len(interpolation_dofs), max_err))
            break

        logger.info('')

    data = {'errors': max_errs, 'triangularity_errors': triangularity_errs}

    return interpolation_dofs, collateral_basis, data
Esempio n. 12
0
def deim(evaluations, modes=None, error_norm=None, product=None):
    '''Generate data for empirical operator interpolation using DEIM algorithm.

    Given evaluations of |Operators|, this method generates a collateral_basis and
    interpolation DOFs for empirical operator interpolation. The returned objects
    can be used to instantiate an |EmpiricalInterpolatedOperator|.

    The collateral basis is determined by the first POD modes of the operator
    evaluations.

    Parameters
    ----------
    evaluations
        A |VectorArray| of operator evaluations.
    modes
        Dimension of the collateral basis i.e. number of POD modes of the operator evaluations.
    error_norm
        Norm w.r.t. which to calculate the interpolation error. If `None`, the Euclidean norm
        is used.
    product
        Product |Operator| used for POD.

    Returns
    -------
    interpolation_dofs
        |NumPy array| of the DOFs at which the operators have to be evaluated.
    collateral_basis
        |VectorArray| containing the generated collateral basis.
    data
        Dict containing the following fields:

            :errors: sequence of maximum approximation errors during greedy search.
    '''

    assert isinstance(evaluations, VectorArrayInterface)

    logger = getLogger('pymor.algorithms.ei.deim')
    logger.info('Generating Interpolation Data ...')

    collateral_basis = pod(evaluations, modes, product=product)[0]

    interpolation_dofs = np.zeros((0,), dtype=np.int32)
    interpolation_matrix = np.zeros((0, 0))
    errs = []

    for i in xrange(len(collateral_basis)):

        if len(interpolation_dofs) > 0:
            coefficients = np.linalg.solve(interpolation_matrix,
                                           collateral_basis.components(interpolation_dofs, ind=i).T).T
            U_interpolated = collateral_basis.lincomb(coefficients, ind=range(len(interpolation_dofs)))
            ERR = collateral_basis.copy(ind=i)
            ERR -= U_interpolated
        else:
            ERR = collateral_basis.copy(ind=i)

        err = ERR.l2_norm() if error_norm is None else error_norm(ERR)

        logger.info('Interpolation error for basis vector {}: {}'.format(i, err))

        # compute new interpolation dof and collateral basis vector
        new_dof = ERR.amax()[0][0]

        if new_dof in interpolation_dofs:
            logger.info('DOF {} selected twice for interplation! Stopping extension loop.'.format(new_dof))
            break

        interpolation_dofs = np.hstack((interpolation_dofs, new_dof))
        interpolation_matrix = collateral_basis.components(interpolation_dofs, ind=range(len(interpolation_dofs))).T
        errs.append(err)

        logger.info('')

    if len(interpolation_dofs) < len(collateral_basis):
        collateral_basis.remove(ind=range(len(interpolation_dofs), len(collateral_basis)))

    logger.info('Finished.'.format(new_dof))

    data = {'errors': errs}

    return interpolation_dofs, collateral_basis, data
Esempio n. 13
0
        def __init__(self, grid, U, bounding_box, codim, title, legend, separate_colorbars, backend):
            assert isinstance(U, VectorArrayInterface) and hasattr(U, 'data') \
                or (isinstance(U, tuple) and all(isinstance(u, VectorArrayInterface) and hasattr(u, 'data') for u in U)
                    and all(len(u) == len(U[0]) for u in U))
            U = (U.data,) if hasattr(U, 'data') else tuple(u.data for u in U)
            if isinstance(legend, str):
                legend = (legend,)
            assert legend is None or isinstance(legend, tuple) and len(legend) == len(U)
            if backend == 'gl':
                widget = GlumpyPatchWidget
            else:
                widget = MatplotlibPatchWidget
                if not separate_colorbars and len(U) > 1:
                    l = getLogger('pymor.gui.qt.visualize_patch')
                    l.warn('separate_colorbars=False not supported for matplotlib backend')
                separate_colorbars = True

            class PlotWidget(QWidget):
                def __init__(self):
                    super(PlotWidget, self).__init__()
                    if separate_colorbars:
                        vmins = tuple(np.min(u) for u in U)
                        vmaxs = tuple(np.max(u) for u in U)
                    else:
                        vmins = (min(np.min(u) for u in U),) * len(U)
                        vmaxs = (max(np.max(u) for u in U),) * len(U)
                    layout = QHBoxLayout()
                    plot_layout = QGridLayout()
                    plots = [widget(self, grid, vmin=vmin, vmax=vmax, bounding_box=bounding_box, codim=codim)
                             for vmin, vmax in izip(vmins, vmaxs)]
                    if legend:
                        for i, plot, l in izip(xrange(len(plots)), plots, legend):
                            subplot_layout = QVBoxLayout()
                            caption = QLabel(l)
                            caption.setAlignment(Qt.AlignHCenter)
                            subplot_layout.addWidget(caption)
                            if not separate_colorbars or backend == 'matplotlib':
                                subplot_layout.addWidget(plot)
                            else:
                                hlayout = QHBoxLayout()
                                hlayout.addWidget(plot)
                                hlayout.addWidget(ColorBarWidget(self, vmin=vmins[i], vmax=vmaxs[i]))
                                subplot_layout.addLayout(hlayout)
                            plot_layout.addLayout(subplot_layout, int(i/2), (i % 2), 1, 1)
                    else:
                        for i, plot in enumerate(plots):
                            if not separate_colorbars or backend == 'matplotlib':
                                plot_layout.addWidget(plot, int(i/2), (i % 2), 1, 1)
                            else:
                                hlayout = QHBoxLayout()
                                hlayout.addWidget(plot)
                                hlayout.addWidget(ColorBarWidget(self, vmin=vmins[i], vmax=vmaxs[i]))
                                plot_layout.addLayout(plot, int(i/2), (i % 2), 1, 1)
                    layout.addLayout(plot_layout)
                    if not separate_colorbars:
                        layout.addWidget(ColorBarWidget(self, vmin=vmin, vmax=vmax))
                    self.setLayout(layout)
                    self.plots = plots

                def set(self, U, ind):
                    for u, plot in izip(U, self.plots):
                        plot.set(u[ind])

            super(MainWindow, self).__init__(U, PlotWidget(), title=title, length=len(U[0]))
            self.grid = grid
            self.codim = codim
Esempio n. 14
0
def ei_rb_greedy(discretization, operator_names, samples, error_norm=None, target_error=None,
                 product=None, max_extensions=None, rb_initial_data=None, ei_initial_data=None,
                 use_estimator=True, extension_algorithm=trivial_basis_extension, cache_region='memory'):
    '''PODEI Greedy extension algorithm.

    Parameters
    ----------
    discretization
        The discretization to reduce.
    reductor
        Reductor for reducing the given discretization. This has to be a
        function of the form `reduce(discretization, data)` where data is
        the detailed data required by the reductor. If your reductor takes
        more arguments, use functools.partial.
    samples
        The set of parameter samples on which to perform the greedy search.
        Currently this set is fixed for the whole process.
    initial_data
        This is fed into reductor.reduce() for the initial projection.
        Typically this will be the reduced basis with which the algorithm
        starts.
    use_estimator
        If True, use reduced_discretization.estimate() to estimate the errors
        on the sample set. Otherwise a detailed simulation is used to calculate
        the error.
    error_norm
        If use_estimator == Flase, use this function to calculate the norm of
        the error. [Default l2_norm]
    extension_algorithm
        The extension algorithm to use to extend the current reduced basis with
        the maximum error snapshot.
    target_error
        If not None, stop the search if the maximum error on the sample set
        drops below this value.
    max_extensions
        If not None, stop algorithm after `max_extensions` extension steps.

    Returns
    -------
    Dict with the following fields:
        'data'
            The reduced basis. (More generally the data which needs to be
            fed into reduced_discretization.reduce().
        'reduced_discretization'
            The last reduced discretization which has been computed.
        'reconstructor'
            Reconstructor for `reduced_discretization`.
        'max_err'
            Last estimated maximum error on the sample set.
        'max_err_mu'
            The parameter that corresponds to `max_err`.
        'max_errs'
            Sequence of maximum errors during the greedy run.
        'max_errs_mu'
            The parameters corresponding to `max_err`.
    '''

    def interpolate(U, ind=None):
        coefficients = solve_triangular(interpolation_matrix, U.components(interpolation_dofs, ind=ind).T,
                                        lower=True, unit_diagonal=True).T
        # coefficients = np.linalg.solve(interpolation_matrix, U.components(interpolation_dofs, ind=ind).T).T
        return collateral_basis.lincomb(coefficients)

    tic = time.time()

    logger = getLogger('pymor.algorithms.ei_greedy.ei_greedy')
    samples = tuple(samples)
    logger.info('Started PODEI greedy search on {} samples'.format(len(samples)))
    data = rb_initial_data

    #ei init
    operators = [discretization.operators[operator_name] for operator_name in operator_names]
    evaluations = EvaluationProvider(discretization, operators, samples, cache_region=cache_region)

    assert isinstance(evaluations, VectorArrayInterface) or all(isinstance(ev, VectorArrayInterface) for ev in evaluations)
    if isinstance(evaluations, VectorArrayInterface):
        evaluations = (evaluations,)

    if ei_initial_data is None:
        interpolation_dofs = np.zeros((0,), dtype=np.int32)
        interpolation_matrix = np.zeros((0,0))
        collateral_basis = type(next(iter(evaluations))).empty(dim=next(iter(evaluations)).dim)
    else:
        interpolation_dofs = ei_initial_data['dofs']
        collateral_basis = ei_initial_data['basis']
        interpolation_matrix = collateral_basis.components(interpolation_dofs).T

    extensions = 0
    discard_count = 0
    crb_discard = False
    error_already_calculated = False
    Ns = [len(data)]
    Ms = [len(interpolation_dofs)]
    max_errs = []
    max_err_mus = []

    while True:
        logger.info('Reducing ...')
        
        if len(interpolation_dofs) > 0:
            rd, rc, _ = reduce_ei_rb(discretization, operator_names, data={'RB': data, 'dofs': interpolation_dofs, 'CB': collateral_basis})
        else:
            rd, rc = reduce_generic_rb(discretization, data)

        logger.info('Estimating errors ...')
        if use_estimator:
            logger.info('Error Estimator usage not yet implemented')
            break
        elif not error_already_calculated:
            max_err = -1
            for mu in samples:
                errs = error_norm(discretization.solve(mu) - rc.reconstruct(rd.solve(mu)))
                cur_max_err = np.max(errs)
                if cur_max_err > max_err:
                    max_err = cur_max_err
                    max_err_mu = mu
                    max_err_t = np.argmax(errs)      
            
        max_errs.append(max_err)
        max_err_mus.append(max_err_mu)
        logger.info('Maximum error after {} extensions: {} (mu = {}, timestep t_k: k={})'.format(extensions, max_err, max_err_mu, max_err_t))
        
        if target_error is not None and max_err <= target_error:
            logger.info('Target error reached! Stopping extension loop.')
            logger.info('Reached maximal error on snapshots of {} <= {}'.format(max_err, target_error))
            break

        # compute new interpolation dof and collateral basis vector
        ev = evaluations.data(samples.index(max_err_mu))
        if len(interpolation_dofs) > 0:
            ev_interpolated = interpolate(ev)
            ERR = ev - ev_interpolated
        else:
            ERR = ev
        new_vec = ERR.copy(ind=np.argmax(error_norm(ERR)))
     
        if new_vec.amax()[1] > 1.0e-2:      
            new_dof = new_vec.amax()[0][0]
            if new_dof in interpolation_dofs:
                logger.info('DOF {} selected twice for interplation! Stopping extension loop.'.format(new_dof))
                break
            new_vec *= 1 / new_vec.components([new_dof])[0, 0]
            interpolation_dofs = np.hstack((interpolation_dofs, new_dof))
            collateral_basis.append(new_vec, remove_from_other=True)
            interpolation_matrix = collateral_basis.components(interpolation_dofs).T
            crb_discard = False
        else:
            logger.warn('Maximum DOF is {}, skipping collateral basis extension ...'.format(new_vec.amax()[1]))
            crb_discard = True

        triangularity_error = np.max(np.abs(interpolation_matrix - np.tril(interpolation_matrix)))
        logger.info('Interpolation matrix is not lower triangular with maximum error of {}'
                    .format(triangularity_error))
        
        logger.info('Extending with snapshot for mu = {}'.format(max_err_mu))
        U = discretization.solve(max_err_mu)
        try:
            data, extension_data = extension_algorithm(data, U)
        except ExtensionError:
            logger.info('Extension failed. Stopping now.')
            break
        if not 'hierarchic' in extension_data:
            logger.warn('Extension algorithm does not report if extension was hierarchic. Assuming it was\'nt ..')
        
        rd, rc, _ = reduce_ei_rb(discretization, operator_names, data={'RB': data, 'dofs': interpolation_dofs, 'CB': collateral_basis})
        if use_estimator:
            logger.info('Error Estimator usage not yet implemented')
            break
        else:
            max_err = -1
            for mu in samples:
                errs = error_norm(discretization.solve(mu) - rc.reconstruct(rd.solve(mu)))
                cur_max_err = np.max(errs)
                if cur_max_err > max_err:
                    max_err = cur_max_err
                    max_err_mu = mu
                    max_err_t = np.argmax(errs)
        if max_errs[len(max_errs)-1] <= max_err and not crb_discard:
            logger.info('Error Increases. Discard last RB extension')
            data.remove(ind=[len(data)-1])
            discard_count += 1
            error_already_calculated = False
        else:
            extensions += 1
            Ns.append(len(data))
            Ms.append(len(interpolation_dofs))
            discard_count = 0
            error_already_calculated = True
                    
        #Version 2
#         if max_errs[len(max_errs)-1] <= max_err:
#             if discard_count < 1:
#                 logger.info('Error Increases. Discard last RB extension')
#                 data.remove(ind=[len(data)-1])
#                 discard_count = 1
#                 error_already_calculated = False
#             else:
#                 logger.info('Error still increases. Throwing away CB but keeping RB.')
#                 extensions += 1
#                 collateral_basis.remove([len(interpolation_dofs)-1])
#                 interpolation_dofs = interpolation_dofs[:-1]
#                 interpolation_matrix = interpolation_matrix[:-1,:-1]
#                 Ns.append(len(data))
#                 Ms.append(len(interpolation_dofs))
#                 discard_count = 0
#                 error_already_calculated = False
#         else:
#             extensions += 1
#             Ns.append(len(data))
#             Ms.append(len(interpolation_dofs))
#             discard_count = 0
#             error_already_calculated = True
            
        #Version 1
#         if max_errs[len(max_errs)-1] <= max_err:
#             if discard_count < 1:
#                 logger.info('Error Increases. Discard last RB extension')
#                 data.remove(ind=[len(data)-1])
#                 discard_count = 1
#                 error_already_calculated = False
#             else:
#                 logger.info('Error still increases. Keeping RB.')
#                 extensions += 1
#                 Ns.append(len(data))
#                 Ms.append(len(interpolation_dofs))
#                 discard_count = 0
#                 error_already_calculated = True
#         else:
#             extensions += 1
#             Ns.append(len(data))
#             Ms.append(len(interpolation_dofs))
#             discard_count = 0
#             error_already_calculated = True

        logger.info('N={}, M={}'.format(len(data), len(interpolation_dofs)))
        logger.info('')
        
        if max_extensions is not None and extensions >= max_extensions:
            logger.info('Maximal number of {} extensions reached.'.format(max_extensions))
            break
    
    logger.info('Reducing once more ...')
    rd, rc, ei_discretization = reduce_ei_rb(discretization, operator_names, data={'RB':data, 'dofs': interpolation_dofs, 'CB': collateral_basis})

    ei_data = ({'dofs': interpolation_dofs, 'basis': collateral_basis})
    N_M_correlation = [Ns,Ms]

    tictoc = time.time() - tic
    logger.info('PODEI Greedy search took {} seconds'.format(tictoc))
    return {'ei_discretization': ei_discretization, 'ei_data': ei_data, 
            'data': data, 'reduced_discretization': rd, 'reconstructor': rc, 'max_err': max_err,
            'max_err_mu': max_err_mu, 'max_errs': max_errs, 'max_err_mus': max_err_mus, 'extensions': extensions, 'N_M_correlation': N_M_correlation,
            'time': tictoc}
Esempio n. 15
0
from pymor.algorithms.timestepping import ExplicitEulerTimeStepper
from pymor.discretizations import InstationaryDiscretization
from pymor.grids import OnedGrid
from pymor.gui.qt import Matplotlib1DVisualizer
from pymor.la.pod import pod
from pymor.operators.constructions import VectorFunctional
from pymor.parameters.spaces import CubicParameterSpace
from pymor.reductors.basic import reduce_generic_rb

# import wrapped classes
from wrapper import WrappedDiffusionOperator, WrappedVectorArray, WrappedVector

# configure logging
from pymor.core import getLogger
getLogger('pymor.discretizations').setLevel('INFO')


def discretize(n, nt, blocks):
    h = 1. / blocks
    ops = [WrappedDiffusionOperator.create(n, h * i, h * (i + 1)) for i in xrange(blocks)]
    # operator = WrappedDiffusionOperator.create(n, 0, 1)
    operator = WrappedDiffusionOperator.lincomb(ops, coefficients_name='diffusion_coefficients')

    initial_data = WrappedVectorArray.zeros(operator.dim_source)

    # use data property of WrappedVector to setup rhs
    # note that we cannot use the data property of WrappedVectorArray,
    # since ListVectorArray will always return a copy
    rhs_vec = WrappedVector.zeros(operator.dim_source)
    rhs_data = rhs_vec.data
Esempio n. 16
0
from pymor.playground.algorithms import greedy_lrbms
from pymor.core import cache
from pymor.core.exceptions import ConfigError
from pymor.discretizations import StationaryDiscretization
from pymor.la import NumpyVectorArray
from pymor.la.basic import induced_norm
from pymor.la.pod import pod
from pymor.la import induced_norm
from pymor.operators import NumpyMatrixOperator
from pymor.operators.basic import NumpyLincombMatrixOperator
from pymor.parameters import CubicParameterSpace
from pymor.reductors import reduce_generic_rb
from pymor.reductors.basic import GenericRBReconstructor, reduce_generic_rb
from pymor.reductors.linear import reduce_stationary_affine_linear

logger = core.getLogger('pymor.main.demo')
logger.setLevel('INFO')
core.getLogger('pymor.WrappedDiscretization').setLevel('WARN')
core.getLogger('pymor.algorithms').setLevel('INFO')
core.getLogger('dune.pymor.discretizations').setLevel('WARN')

def load_dune_module(settings_filename):

    logger.info('initializing dune module...')
    example = dune_module.ThermalblockExample()
    example.initialize([settings_filename])
    _, wrapper = wrap_module(dune_module)
    return example, wrapper


def perform_standard_rb(config, detailed_discretization, training_samples):
Esempio n. 17
0
from OpenGL.GL import *
import OpenGL.GL as gl
from OpenGL.arrays import vbo
from PySide import QtOpenGL

import pymor.core as core
core.logger.MAX_HIERACHY_LEVEL = 2
from pymor.analyticalproblems import ThermalBlockProblem
from pymor.discretizers import discretize_elliptic_cg
from pymor.reductors.linear import reduce_stationary_affine_linear
from pymor.algorithms import greedy, gram_schmidt_basis_extension
from pymor.parameters.base import Parameter
from glumpy.graphics.vertex_buffer import VertexBuffer
from pymor.core.cache import Cachable, NO_CACHE_CONFIG

core.getLogger('pymor.algorithms').setLevel('DEBUG')
core.getLogger('pymor.discretizations').setLevel('DEBUG')

PARAM_STEPS = 10
PARAM_MIN = 0.1
PARAM_MAX = 1


def compile_vertex_shader(source):
    """Compile a vertex shader from source."""
    vertex_shader = gl.glCreateShader(gl.GL_VERTEX_SHADER)
    gl.glShaderSource(vertex_shader, source)
    gl.glCompileShader(vertex_shader)
    # check compilation error
    result = gl.glGetShaderiv(vertex_shader, gl.GL_COMPILE_STATUS)
    if not(result):
Esempio n. 18
0
"""

from __future__ import absolute_import, division, print_function

import sys
import time
from functools import partial

import numpy as np
from docopt import docopt

import pymor.core as core
core.logger.MAX_HIERACHY_LEVEL = 2
from pymor.algorithms import greedy, trivial_basis_extension, gram_schmidt_basis_extension
from pymor.reductors.linear import reduce_stationary_affine_linear
core.getLogger('pymor.algorithms').setLevel('INFO')
core.getLogger('pymor.discretizations').setLevel('INFO')
core.getLogger('pymor.la').setLevel('INFO')


def thermalblock_demo(args):
    args['XBLOCKS'] = int(args['XBLOCKS'])
    args['YBLOCKS'] = int(args['YBLOCKS'])
    args['--grid'] = int(args['--grid'])
    args['SNAPSHOTS'] = int(args['SNAPSHOTS'])
    args['RBSIZE'] = int(args['RBSIZE'])
    args['--test'] = int(args['--test'])
    args['--estimator-norm'] = args['--estimator-norm'].lower()
    assert args['--estimator-norm'] in {'trivial', 'h1'}
    args['--extension-alg'] = args['--extension-alg'].lower()
    assert args['--extension-alg'] in {'trivial', 'gram_schmidt', 'h1_gram_schmidt'}
    'estimator_compute': 'eta_red',
    'estimator_return': 'eta_red',
    'num_test_samples': 10,
    'estimate_some_errors': True,
    'local_indicators': 'eta_red',
    'marking_strategy': 'doerfler_and_age',
    'marking_max_age': 4,
    'doerfler_marking_theta': 0.66,
    'local_boundary_values': 'dirichlet',
    'online_target_error': 5,
    'online_max_extensions': 20
}
DATASET_ID = config['dune_example'] + '_online_enrichment_test'

pymor.core.logger.MAX_HIERACHY_LEVEL = 2
getLogger('pymor.WrappedDiscretization').setLevel('WARN')
getLogger('pymor.algorithms').setLevel('INFO')
getLogger('dune.pymor.discretizations').setLevel('WARN')

if __name__ == '__main__':

    logfile = NamedTemporaryFile(delete=False).name
    pymor.core.logger.FILENAME = logfile
    new_dataset(DATASET_ID, **config)

    detailed_data = prepare(config)
    print('')
    offline_data = offline_phase(config, detailed_data)
    print('')
    _ = online_phase(config, detailed_data, offline_data)
Esempio n. 20
0
def greedy(discretization, reductor, samples, initial_data=None, use_estimator=True, error_norm=None,
           extension_algorithm=trivial_basis_extension, target_error=None, max_extensions=None):
    '''Greedy extension algorithm.

    Parameters
    ----------
    discretization
        The discretization to reduce.
    reductor
        Reductor for reducing the given discretization. This has to be a
        function of the form `reduce(discretization, data)` where data is
        the detailed data required by the reductor. If your reductor takes
        more arguments, use functools.partial.
    samples
        The set of parameter samples on which to perform the greedy search.
        Currently this set is fixed for the whole process.
    initial_data
        This is fed into reductor.reduce() for the initial projection.
        Typically this will be the reduced basis with which the algorithm
        starts.
    use_estimator
        If True, use reduced_discretization.estimate() to estimate the errors
        on the sample set. Otherwise a detailed simulation is used to calculate
        the error.
    error_norm
        If use_estimator == Flase, use this function to calculate the norm of
        the error. [Default l2_norm]
    extension_algorithm
        The extension algorithm to use to extend the current reduced basis with
        the maximum error snapshot.
    target_error
        If not None, stop the search if the maximum error on the sample set
        drops below this value.
    max_extensions
        If not None, stop algorithm after `max_extensions` extension steps.

    Returns
    -------
    Dict with the following fields:
        'data'
            The reduced basis. (More generally the data which needs to be
            fed into reduced_discretization.reduce().
        'reduced_discretization'
            The last reduced discretization which has been computed.
        'reconstructor'
            Reconstructor for `reduced_discretization`.
        'max_err'
            Last estimated maximum error on the sample set.
        'max_err_mu'
            The parameter that corresponds to `max_err`.
        'max_errs'
            Sequence of maximum errors during the greedy run.
        'max_errs_mu'
            The parameters corresponding to `max_err`.
    '''

    logger = getLogger('pymor.algorithms.greedy.greedy')
    samples = list(samples)
    logger.info('Started greedy search on {} samples'.format(len(samples)))
    data = initial_data

    tic = time.time()
    extensions = 0
    max_errs = []
    max_err_mus = []

    while True:
        logger.info('Reducing ...')
        rd, rc = reductor(discretization, data)

        logger.info('Estimating errors ...')
        if use_estimator:
            errors = [rd.estimate(rd.solve(mu), mu) for mu in samples]
        elif error_norm is not None:
            errors = [error_norm(discretization.solve(mu) - rc.reconstruct(rd.solve(mu))) for mu in samples]
        else:
            errors = [(discretization.solve(mu) - rc.reconstruct(rd.solve(mu))).l2_norm() for mu in samples]

        max_err, max_err_mu = max(((err, mu) for err, mu in izip(errors, samples)), key=lambda t: t[0])
        max_errs.append(max_err)
        max_err_mus.append(max_err_mu)
        logger.info('Maximum error after {} extensions: {} (mu = {})'.format(extensions, max_err, max_err_mu))

        if target_error is not None and max_err <= target_error:
            logger.info('Reached maximal error on snapshots of {} <= {}'.format(max_err, target_error))
            break

        logger.info('Extending with snapshot for mu = {}'.format(max_err_mu))
        U = discretization.solve(max_err_mu)
        try:
            data = extension_algorithm(data, U)
        except ExtensionError:
            logger.info('Extension failed. Stopping now.')
            break
        extensions += 1

        logger.info('')

        if max_extensions is not None and extensions >= max_extensions:
            logger.info('Maximal number of {} extensions reached.'.format(max_extensions))
            logger.info('Reducing once more ...')
            rd, rc = reductor(discretization, data)
            break

    tictoc = time.time() - tic
    logger.info('Greedy search took {} seconds'.format(tictoc))
    return {'data': data, 'reduced_discretization': rd, 'reconstructor': rc, 'max_err': max_err,
            'max_err_mu': max_err_mu, 'max_errs': max_errs, 'max_err_mus': max_err_mus, 'extensions': extensions,
            'time': tictoc}