Beispiel #1
0
def pod_basis_extension(basis, U, count=1, copy_basis=True, product=None):
    '''Extend basis with the first `count` POD modes of the projection of U onto the
    orthogonal complement of the basis.

    Note that the provided basis is assumed to be orthonormal w.r.t. the provided
    scalar product!

    Parameters
    ----------
    basis
        |VectorArray| containing the basis to extend. The basis is expected to be
        orthonormal w.r.t. `product`.
    U
        |VectorArray| containing the vectors to which the POD is applied.
    count
        Number of POD modes that are to be appended to the basis.
    product
        The scalar product w.r.t. which to orthonormalize; if None, the Euclidean
        product is used.
    copy_basis
        If copy_basis is False, the old basis is extended in-place.

    Returns
    -------
    new_basis
        The extended basis.
    extension_data
        Dict containing the following fields:

            :hierarchic: `True` if `new_basis` contains `basis` as its first vectors.

    Raises
    ------
    ExtensionError
        POD produces no new vectors. This is the case when no vector in U
        is linearly independent from the basis.
    '''
    if basis is None:
        return pod(U, modes=count, product=product), {'hierarchic': True}

    basis_length = len(basis)

    new_basis = basis.copy() if copy_basis else basis

    if product is None:
        U_proj_err = U - basis.lincomb(U.dot(basis, pairwise=False))
    else:
        U_proj_err = U - basis.lincomb(product.apply2(U, basis, pairwise=False))

    new_basis.append(pod(U_proj_err, modes=count, product=product))

    if len(new_basis) <= basis_length:
        raise ExtensionError

    return new_basis, {'hierarchic': True}
Beispiel #2
0
def pod_basis_extension(basis, U, count=1, copy_basis=True, product=None):
    '''Extend basis with the first `count` POD modes of the projection error of U

    Parameters
    ----------
    basis
        The basis to extend. The basis is expected to be orthonormal w.r.t. `product`.
    U
        The vectors to which the POD is applied:
    count
        Number of POD modes that are to be appended to the basis.
    product
        The scalar product w.r.t. which to orthonormalize; if None, the l2-scalar
        product on the coefficient vector is used.
    copy_basis
        If copy_basis is False, the old basis is extended in-place.

    Returns
    -------
    The new basis.

    Raises
    ------
    ExtensionError
        POD produces new vectors. Usually this is the case when U
        is not linearily independent from the basis. However this can also happen
        due to rounding errors ...
    '''
    if basis is None:
        return pod(U, modes=count, product=product)

    basis_length = len(basis)

    new_basis = basis.copy() if copy_basis else basis

    if product is None:
        U_proj_err = U - basis.lincomb(U.dot(basis, pairwise=False))
    else:
        U_proj_err = U - basis.lincomb(product.apply2(U, basis, pairwise=False))

    new_basis.append(pod(U_proj_err, modes=count, product=product))

    if len(new_basis) <= basis_length:
        raise ExtensionError

    return new_basis
Beispiel #3
0
    d = InstationaryDiscretization(T=1e-0, operator=operator, rhs=rhs, initial_data=initial_data,
                                   time_stepper=time_stepper, num_values=20, parameter_space=parameter_space,
                                   visualizer=visualizer, name='C++-Discretization', cache_region=None)
    return d


# discretize
d = discretize(50, 10000, 4)

# generate solution snapshots
snapshots = d.type_solution.empty(d.dim_solution)
for mu in d.parameter_space.sample_uniformly(2):
    snapshots.append(d.solve(mu))

# apply POD
reduced_basis = pod(snapshots, 4)[0]

# reduce the model
rd, rc, _ = reduce_generic_rb(d, reduced_basis)

# stochastic error estimation
mu_max = None
err_max = -1.
for mu in d.parameter_space.sample_randomly(10):
    U_RB = (rc.reconstruct(rd.solve(mu)))
    U = d.solve(mu)
    err = np.max((U_RB-U).l2_norm())
    if err > err_max:
        err_max = err
        mu_max = mu
Beispiel #4
0
def thermalblock_demo(args):
    args['XBLOCKS'] = int(args['XBLOCKS'])
    args['YBLOCKS'] = int(args['YBLOCKS'])
    args['--grid'] = int(args['--grid'])
    args['SNAPSHOTS'] = int(args['SNAPSHOTS'])
    args['RBSIZE'] = int(args['RBSIZE'])
    args['--test'] = int(args['--test'])
    args['--pod-norm'] = args['--pod-norm'].lower()
    assert args['--pod-norm'] in {'trivial', 'h1'}

    print('Solving on TriaGrid(({0},{0}))'.format(args['--grid']))

    print('Setup Problem ...')
    problem = ThermalBlockProblem(num_blocks=(args['XBLOCKS'], args['YBLOCKS']))

    print('Discretize ...')
    discretization, _ = discretize_elliptic_cg(problem, diameter=m.sqrt(2) / args['--grid'])

    print('The parameter type is {}'.format(discretization.parameter_type))

    if args['--plot-solutions']:
        print('Showing some solutions')
        Us = tuple()
        legend = tuple()
        for mu in discretization.parameter_space.sample_randomly(2):
            print('Solving for diffusion = \n{} ... '.format(mu['diffusion']))
            sys.stdout.flush()
            Us = Us + (discretization.solve(mu),)
            legend = legend + (str(mu['diffusion']),)
        discretization.visualize(Us, legend=legend, title='Detailed Solutions for different parameters', block=True)

    print('RB generation ...')

    tic = time.time()

    print('Solving on training set ...')
    S_train = list(discretization.parameter_space.sample_uniformly(args['SNAPSHOTS']))
    snapshots = discretization.operator.type_source.empty(discretization.operator.dim_source, reserve=len(S_train))
    for mu in S_train:
        snapshots.append(discretization.solve(mu))

    print('Performing POD ...')
    pod_product = discretization.h1_product if args['--pod-norm'] == 'h1' else None
    rb = pod(snapshots, modes=args['RBSIZE'], product=pod_product)[0]

    print('Reducing ...')
    reductor = reduce_generic_rb
    rb_discretization, reconstructor, _ = reductor(discretization, rb)

    toc = time.time()
    t_offline = toc - tic

    print('\nSearching for maximum error on random snapshots ...')

    tic = time.time()
    h1_err_max = -1
    cond_max = -1
    for mu in discretization.parameter_space.sample_randomly(args['--test']):
        print('Solving RB-Scheme for mu = {} ... '.format(mu), end='')
        URB = reconstructor.reconstruct(rb_discretization.solve(mu))
        U = discretization.solve(mu)
        h1_err = discretization.h1_norm(U - URB)[0]
        cond = np.linalg.cond(rb_discretization.operator.assemble(mu)._matrix)
        if h1_err > h1_err_max:
            h1_err_max = h1_err
            Umax = U
            URBmax = URB
            mumax = mu
        if cond > cond_max:
            cond_max = cond
            cond_max_mu = mu
        print('H1-error = {}, condition = {}'.format(h1_err, cond))
    toc = time.time()
    t_est = toc - tic
    real_rb_size = len(rb)

    print('''
    *** RESULTS ***

    Problem:
       number of blocks:                   {args[XBLOCKS]}x{args[YBLOCKS]}
       h:                                  sqrt(2)/{args[--grid]}

    POD basis generation:
       number of snapshots:                {args[SNAPSHOTS]}^({args[XBLOCKS]}x{args[YBLOCKS]})
       pod norm:                           {args[--pod-norm]}
       prescribed basis size:              {args[RBSIZE]}
       actual basis size:                  {real_rb_size}
       elapsed time:                       {t_offline}

    Stochastic error estimation:
       number of samples:                  {args[--test]}
       maximal H1-error:                   {h1_err_max}  (mu = {mumax})
       maximal condition of system matrix: {cond_max}  (mu = {cond_max_mu})
       elapsed time:                       {t_est}
    '''.format(**locals()))

    sys.stdout.flush()
    if args['--plot-err']:
        discretization.visualize((U, URB, U - URB), legend=('Detailed Solution', 'Reduced Solution', 'Error'),
                                 title='Maximum Error Solution', separate_colorbars=True, block=True)
Beispiel #5
0
                                   visualizer=visualizer,
                                   name='C++-Discretization',
                                   cache_region=None)
    return d


# discretize
d = discretize(50, 10000, 4)

# generate solution snapshots
snapshots = d.solution_space.empty()
for mu in d.parameter_space.sample_uniformly(2):
    snapshots.append(d.solve(mu))

# apply POD
reduced_basis = pod(snapshots, 4)[0]

# reduce the model
rd, rc, _ = reduce_generic_rb(d, reduced_basis)

# stochastic error estimation
mu_max = None
err_max = -1.
for mu in d.parameter_space.sample_randomly(10):
    U_RB = (rc.reconstruct(rd.solve(mu)))
    U = d.solve(mu)
    err = np.max((U_RB - U).l2_norm())
    if err > err_max:
        err_max = err
        mu_max = mu
def perform_standard_rb(config, detailed_discretization, training_samples):

    # parse config
    reductor_id = config.get('pymor', 'reductor')
    if reductor_id == 'generic':
        reductor = reduce_generic_rb
    elif reductor_id == 'stationary_affine_linear':
        reductor_error_product = config.get('pymor', 'reductor_error_product')
        assert reductor_error_product == 'None'
        reductor_error_product = None
        reductor = partial(reduce_stationary_affine_linear, error_product=reductor_error_product)
    else:
        raise ConfigError('unknown \'pymor.reductor\' given: \'{}\''.format(reductor_id))

    # first the extension algorithm product, if needed
    extension_algorithm_id = config.get('pymor', 'extension_algorithm')
    if extension_algorithm_id in {'gram_schmidt', 'pod'}:
        extension_algorithm_product_id = config.get('pymor', 'extension_algorithm_product')
        if extension_algorithm_product_id == 'None':
            extension_algorithm_product = None
        else:
            extension_algorithm_product = detailed_discretization.products[extension_algorithm_product_id]
    # then the extension algorithm
    extension_algorithm_id = config.get('pymor', 'extension_algorithm')
    if extension_algorithm_id == 'gram_schmidt':
        extension_algorithm = partial(gram_schmidt_basis_extension, product=extension_algorithm_product)
        extension_algorithm_id += ' ({})'.format(extension_algorithm_product_id)
    elif extension_algorithm_id == 'pod':
        extension_algorithm = partial(pod_basis_extension, product=extension_algorithm_product)
        extension_algorithm_id += ' ({})'.format(extension_algorithm_product_id)
    elif extension_algorithm_id == 'trivial':
        extension_algorithm = trivial_basis_extension
    else:
        raise ConfigError('unknown \'pymor.extension_algorithm\' given: \'{}\''.format(extension_algorithm_id))

    greedy_error_norm_id = config.get('pymor', 'greedy_error_norm')
    assert greedy_error_norm_id in {'l2', 'h1_semi'}
    greedy_error_norm = induced_norm(detailed_discretization.products[greedy_error_norm_id])

    greedy_use_estimator = config.getboolean('pymor', 'use_estimator')
    greedy_max_rb_size = config.getint('pymor', 'max_rb_size')
    greedy_target_error = config.getfloat('pymor', 'target_error')

    # do the actual work
    greedy_data = greedy(detailed_discretization,
                         reductor,
                         training_samples,
                         initial_basis=detailed_discretization.functionals['rhs'].type_source.empty(
                                      dim=detailed_discretization.functionals['rhs'].dim_source),
                         use_estimator=greedy_use_estimator,
                         error_norm=greedy_error_norm,
                         extension_algorithm=extension_algorithm,
                         max_extensions=greedy_max_rb_size,
                         target_error=greedy_target_error)
    reduced_basis = greedy_data['basis']
    rb_size = len(reduced_basis)

    # perform final compression
    final_compression = config.getboolean('pymor', 'final_compression')
    if final_compression:
        t = time.time()
        logger.info('Applying final compression ...')

        # select product
        compression_product_id = config.get('pymor', 'compression_product')
        if compression_product_id == 'None':
            compression_product = None
        else:
            compression_product = detailed_discretization.products[compression_product_id]

        # do the actual work
        reduced_basis = pod(reduced_basis, product=compression_product)

        rd, rc, _ = reductor(detailed_discretization, reduced_basis)
        greedy_data['reduced_discretization'], greedy_data['reconstructor'] = rd, rc

        time_compression = time.time() - t
        compressed_rb_size = len(reduced_basis)

        #report
        report_string = '''
Greedy basis generation:
    used estimator:        {greedy_use_estimator}
    error norm:            {greedy_error_norm_id}
    extension method:      {extension_algorithm_id}
    prescribed basis size: {greedy_max_rb_size}
    prescribed error:      {greedy_target_error}
    actual basis size:     {rb_size}
    greedy time:           {greedy_data[time]}
    compression method:     pod ({compression_product_id})
    compressed basis size:  {compressed_rb_size}
    final compression time: {time_compression}
'''.format(**locals())
    else:
        #report
        report_string = '''
Greedy basis generation:
    used estimator:        {greedy_use_estimator}
    error norm:            {greedy_error_norm_id}
    extension method:      {extension_algorithm_id}
    prescribed basis size: {greedy_max_rb_size}
    prescribed error:      {greedy_target_error}
    actual basis size:     {rb_size}
    elapsed time:          {greedy_data[time]}
'''.format(**locals())

    return report_string, greedy_data
def perform_lrbms(config, multiscale_discretization, training_samples):

    num_subdomains = multiscale_discretization._impl.num_subdomains()

    # parse config
    # first the extension algorithm product, if needed
    extension_algorithm_id = config.get('pymor', 'extension_algorithm')
    if extension_algorithm_id in {'gram_schmidt', 'pod'}:
        extension_algorithm_product_id = config.get('pymor', 'extension_algorithm_product')
        if extension_algorithm_product_id == 'None':
            extension_algorithm_products = [None for ss in np.arange(num_subdomains)]
        else:
            extension_algorithm_products = [multiscale_discretization.local_product(ss, extension_algorithm_product_id)
                                            for ss in np.arange(num_subdomains)]
    # then the extension algorithm
    if extension_algorithm_id == 'gram_schmidt':
        extension_algorithm = [partial(gram_schmidt_basis_extension, product=extension_algorithm_products[ss])
                               for ss in np.arange(num_subdomains)]
        extension_algorithm_id += ' ({})'.format(extension_algorithm_product_id)
    elif extension_algorithm_id == 'pod':
        extension_algorithm = [partial(pod_basis_extension, product=extension_algorithm_products[ss])
                               for ss in np.arange(num_subdomains)]
        extension_algorithm_id += ' ({})'.format(extension_algorithm_product_id)
    elif extension_algorithm_id == 'trivial':
        extension_algorithm = [trivial_basis_extension for ss in np.arange(num_subdomains)]
    else:
        raise ConfigError('unknown \'pymor.extension_algorithm\' given:\'{}\''.format(extension_algorithm_id))

    greedy_error_norm_id = config.get('pymor', 'greedy_error_norm')
    if greedy_error_norm_id == 'None':
        greedy_error_norm = None
    else:
        greedy_error_norm = induced_norm(multiscale_discretization.products[greedy_error_norm_id])

    greedy_use_estimator = config.getboolean('pymor', 'use_estimator')
    assert greedy_use_estimator is False
    greedy_max_rb_size = config.getint('pymor', 'max_rb_size')
    greedy_target_error = config.getfloat('pymor', 'target_error')

    # do the actual work
    greedy_data = greedy_lrbms(multiscale_discretization,
                               reduce_generic_rb,
                               training_samples,
                               initial_basis=[multiscale_discretization.local_rhs(ss).type_source.empty(dim=multiscale_discretization.local_rhs(ss).dim_source)
                                             for ss in np.arange(num_subdomains)],
                               use_estimator=greedy_use_estimator,
                               error_norm=greedy_error_norm,
                               extension_algorithm=extension_algorithm,
                               max_extensions=greedy_max_rb_size,
                               target_error=greedy_target_error)

    reduced_basis = greedy_data['basis']
    rb_size = [len(local_data) for local_data in reduced_basis]

    # perform final compression
    final_compression = config.getboolean('pymor', 'final_compression')
    if final_compression:
        t = time.time()
        logger.info('Applying final compression ...')

        # select local product
        compression_product_id = config.get('pymor', 'compression_product')
        if compression_product_id == 'None':
            compression_products = [None for ss in np.arange(num_subdomains)]
        else:
            compression_products = [multiscale_discretization.local_product(ss, compression_product_id)
                                            for ss in np.arange(num_subdomains)]

        # do the actual work
        reduced_basis = [pod(reduced_basis[ss], product=compression_products[ss]) for ss in np.arange(num_subdomains)]

        rd, rc, _ = reduce_generic_rb(multiscale_discretization, reduced_basis)
        greedy_data['reduced_discretization'], greedy_data['reconstructor'] = rd, rc

        time_compression = time.time() - t
        compressed_rb_size = [len(local_data) for local_data in reduced_basis]

        #report
        report_string = '''
Greedy basis generation:
    used estimator:        {greedy_use_estimator}
    error norm:            {greedy_error_norm_id}
    extension method:      {extension_algorithm_id}
    prescribed basis size: {greedy_max_rb_size}
    prescribed error:      {greedy_target_error}
    actual basis size:     {rb_size}
    greedy time:           {greedy_data[time]}
    compression method:     pod ({compression_product_id})
    compressed basis size:  {compressed_rb_size}
    final compression time: {time_compression}
'''.format(**locals())
    else:
        #report
        report_string = '''
Greedy basis generation:
    used estimator:        {greedy_use_estimator}
    error norm:            {greedy_error_norm_id}
    extension method:      {extension_algorithm_id}
    prescribed basis size: {greedy_max_rb_size}
    prescribed error:      {greedy_target_error}
    actual basis size:     {rb_size}
    elapsed time:          {greedy_data[time]}
'''.format(**locals())

    return report_string, greedy_data
Beispiel #8
0
def deim(evaluations, modes=None, error_norm=None, product=None):
    '''Generate data for empirical operator interpolation using DEIM algorithm.

    Given evaluations of |Operators|, this method generates a collateral_basis and
    interpolation DOFs for empirical operator interpolation. The returned objects
    can be used to instantiate an |EmpiricalInterpolatedOperator|.

    The collateral basis is determined by the first POD modes of the operator
    evaluations.

    Parameters
    ----------
    evaluations
        A |VectorArray| of operator evaluations.
    modes
        Dimension of the collateral basis i.e. number of POD modes of the operator evaluations.
    error_norm
        Norm w.r.t. which to calculate the interpolation error. If `None`, the Euclidean norm
        is used.
    product
        Product |Operator| used for POD.

    Returns
    -------
    interpolation_dofs
        |NumPy array| of the DOFs at which the operators have to be evaluated.
    collateral_basis
        |VectorArray| containing the generated collateral basis.
    data
        Dict containing the following fields:

            :errors: sequence of maximum approximation errors during greedy search.
    '''

    assert isinstance(evaluations, VectorArrayInterface)

    logger = getLogger('pymor.algorithms.ei.deim')
    logger.info('Generating Interpolation Data ...')

    collateral_basis = pod(evaluations, modes, product=product)[0]

    interpolation_dofs = np.zeros((0,), dtype=np.int32)
    interpolation_matrix = np.zeros((0, 0))
    errs = []

    for i in xrange(len(collateral_basis)):

        if len(interpolation_dofs) > 0:
            coefficients = np.linalg.solve(interpolation_matrix,
                                           collateral_basis.components(interpolation_dofs, ind=i).T).T
            U_interpolated = collateral_basis.lincomb(coefficients, ind=range(len(interpolation_dofs)))
            ERR = collateral_basis.copy(ind=i)
            ERR -= U_interpolated
        else:
            ERR = collateral_basis.copy(ind=i)

        err = ERR.l2_norm() if error_norm is None else error_norm(ERR)

        logger.info('Interpolation error for basis vector {}: {}'.format(i, err))

        # compute new interpolation dof and collateral basis vector
        new_dof = ERR.amax()[0][0]

        if new_dof in interpolation_dofs:
            logger.info('DOF {} selected twice for interplation! Stopping extension loop.'.format(new_dof))
            break

        interpolation_dofs = np.hstack((interpolation_dofs, new_dof))
        interpolation_matrix = collateral_basis.components(interpolation_dofs, ind=range(len(interpolation_dofs))).T
        errs.append(err)

        logger.info('')

    if len(interpolation_dofs) < len(collateral_basis):
        collateral_basis.remove(ind=range(len(interpolation_dofs), len(collateral_basis)))

    logger.info('Finished.'.format(new_dof))

    data = {'errors': errs}

    return interpolation_dofs, collateral_basis, data
Beispiel #9
0
def thermalblock_demo(args):
    args['XBLOCKS'] = int(args['XBLOCKS'])
    args['YBLOCKS'] = int(args['YBLOCKS'])
    args['--grid'] = int(args['--grid'])
    args['SNAPSHOTS'] = int(args['SNAPSHOTS'])
    args['RBSIZE'] = int(args['RBSIZE'])
    args['--test'] = int(args['--test'])
    args['--pod-norm'] = args['--pod-norm'].lower()
    assert args['--pod-norm'] in {'trivial', 'h1'}

    print('Solving on TriaGrid(({0},{0}))'.format(args['--grid']))

    print('Setup Problem ...')
    problem = ThermalBlockProblem(num_blocks=(args['XBLOCKS'],
                                              args['YBLOCKS']))

    print('Discretize ...')
    discretization, _ = discretize_elliptic_cg(problem,
                                               diameter=1. / args['--grid'])

    print('The parameter type is {}'.format(discretization.parameter_type))

    if args['--plot-solutions']:
        print('Showing some solutions')
        Us = tuple()
        legend = tuple()
        for mu in discretization.parameter_space.sample_randomly(2):
            print('Solving for diffusion = \n{} ... '.format(mu['diffusion']))
            sys.stdout.flush()
            Us = Us + (discretization.solve(mu), )
            legend = legend + (str(mu['diffusion']), )
        discretization.visualize(
            Us,
            legend=legend,
            title='Detailed Solutions for different parameters',
            block=True)

    print('RB generation ...')

    tic = time.time()

    print('Solving on training set ...')
    S_train = list(
        discretization.parameter_space.sample_uniformly(args['SNAPSHOTS']))
    snapshots = discretization.operator.source.empty(reserve=len(S_train))
    for mu in S_train:
        snapshots.append(discretization.solve(mu))

    print('Performing POD ...')
    pod_product = discretization.h1_product if args[
        '--pod-norm'] == 'h1' else None
    rb = pod(snapshots, modes=args['RBSIZE'], product=pod_product)[0]

    print('Reducing ...')
    reductor = reduce_generic_rb
    rb_discretization, reconstructor, _ = reductor(discretization, rb)

    toc = time.time()
    t_offline = toc - tic

    print('\nSearching for maximum error on random snapshots ...')

    tic = time.time()
    h1_err_max = -1
    cond_max = -1
    for mu in discretization.parameter_space.sample_randomly(args['--test']):
        print('Solving RB-Scheme for mu = {} ... '.format(mu), end='')
        URB = reconstructor.reconstruct(rb_discretization.solve(mu))
        U = discretization.solve(mu)
        h1_err = discretization.h1_norm(U - URB)[0]
        cond = np.linalg.cond(rb_discretization.operator.assemble(mu)._matrix)
        if h1_err > h1_err_max:
            h1_err_max = h1_err
            mumax = mu
        if cond > cond_max:
            cond_max = cond
            cond_max_mu = mu
        print('H1-error = {}, condition = {}'.format(h1_err, cond))
    toc = time.time()
    t_est = toc - tic
    real_rb_size = len(rb)

    print('''
    *** RESULTS ***

    Problem:
       number of blocks:                   {args[XBLOCKS]}x{args[YBLOCKS]}
       h:                                  sqrt(2)/{args[--grid]}

    POD basis generation:
       number of snapshots:                {args[SNAPSHOTS]}^({args[XBLOCKS]}x{args[YBLOCKS]})
       pod norm:                           {args[--pod-norm]}
       prescribed basis size:              {args[RBSIZE]}
       actual basis size:                  {real_rb_size}
       elapsed time:                       {t_offline}

    Stochastic error estimation:
       number of samples:                  {args[--test]}
       maximal H1-error:                   {h1_err_max}  (mu = {mumax})
       maximal condition of system matrix: {cond_max}  (mu = {cond_max_mu})
       elapsed time:                       {t_est}
    '''.format(**locals()))

    sys.stdout.flush()
    if args['--plot-err']:
        discretization.visualize(
            (U, URB, U - URB),
            legend=('Detailed Solution', 'Reduced Solution', 'Error'),
            title='Maximum Error Solution',
            separate_colorbars=True,
            block=True)
Beispiel #10
0
def pod_basis_extension(basis,
                        U,
                        count=1,
                        copy_basis=True,
                        product=None,
                        orthonormalize=True):
    """Extend basis with the first `count` POD modes of the projection of `U` onto the
    orthogonal complement of the basis.

    Note that the provided basis is assumed to be orthonormal w.r.t. the provided
    scalar product!

    Parameters
    ----------
    basis
        |VectorArray| containing the basis to extend. The basis is expected to be
        orthonormal w.r.t. `product`.
    U
        |VectorArray| containing the vectors to which the POD is applied.
    count
        Number of POD modes that are to be appended to the basis.
    product
        The scalar product w.r.t. which to orthonormalize; if `None`, the Euclidean
        product is used.
    copy_basis
        If `copy_basis` is `False`, the old basis is extended in-place.
    orthonormalize
        If `True`, re-orthonormalize the new basis vectors obtained by the POD
        in order to improve numerical accuracy.

    Returns
    -------
    new_basis
        The extended basis.
    extension_data
        Dict containing the following fields:

            :hierarchic: `True` if `new_basis` contains `basis` as its first vectors.

    Raises
    ------
    ExtensionError
        POD produces no new vectors. This is the case when no vector in `U`
        is linearly independent from the basis.
    """
    if basis is None:
        return pod(U, modes=count, product=product)[0], {'hierarchic': True}

    basis_length = len(basis)

    new_basis = basis.copy() if copy_basis else basis

    if product is None:
        U_proj_err = U - basis.lincomb(U.dot(basis, pairwise=False))
    else:
        U_proj_err = U - basis.lincomb(product.apply2(U, basis,
                                                      pairwise=False))

    new_basis.append(
        pod(U_proj_err, modes=count, product=product, orthonormalize=False)[0])

    if orthonormalize:
        gram_schmidt(new_basis, offset=len(basis), product=product, copy=False)

    if len(new_basis) <= basis_length:
        raise ExtensionError

    return new_basis, {'hierarchic': True}