def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     diffusive_flux_aa_product = make_diffusive_flux_aa_product(
             self.grid, self.subdomain,
             self.block_space.local_space(self.subdomain),
             self.lambda_bar,
             lambda_u=self.lambda_xi, lambda_v=self.lambda_xi_prime,
             kappa=self.kappa,
             over_integrate=2)
     subdomain_walker = make_subdomain_walker(self.grid, self.subdomain)
     subdomain_walker.append(diffusive_flux_aa_product)
     subdomain_walker.walk()
     self.matrix = DuneXTMatrixOperator(diffusive_flux_aa_product.matrix(),
                                        range_id='domain_{}'.format(self.subdomain),
                                        source_id='domain_{}'.format(self.subdomain))
def assemble_estimator_diffusive_flux_aa(lambda_xi, lambda_xi_prime, grid, ii,
                                         block_space, lambda_hat, kappa,
                                         solution_space):
    local_subdomains, num_local_subdomains, num_global_subdomains = _get_subdomains(
        grid)
    diffusive_flux_aa_product = make_diffusive_flux_aa_product(
        grid,
        ii,
        block_space.local_space(ii),
        lambda_hat,
        lambda_u=lambda_xi,
        lambda_v=lambda_xi_prime,
        kappa=kappa,
        over_integrate=2)
    subdomain_walker = make_subdomain_walker(grid, ii)
    subdomain_walker.append(diffusive_flux_aa_product)
    subdomain_walker.walk()
    # , block_space.local_space(ii).dof_communicator,
    matrix = DuneXTMatrixOperator(diffusive_flux_aa_product.matrix(),
                                  range_id='domain_{}'.format(ii),
                                  source_id='domain_{}'.format(ii))
    df_ops = np.full((num_global_subdomains, ) * 2, None)
    df_ops[ii, ii] = matrix
    return BlockOperator(df_ops,
                         range_spaces=solution_space.subspaces,
                         source_spaces=solution_space.subspaces)
class DiffusiveFluxOperatorAB(EstimatorOperatorBase):

    RT_source = True

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        subdomain_space = self.block_space.local_space(self.subdomain)
        self.subdomain_rt_space = self.global_rt_space.restrict_to_dd_subdomain_part(self.grid, self.subdomain)
        diffusive_flux_ab_product = make_diffusive_flux_ab_product(
                self.grid, self.subdomain,
                range_space=subdomain_space,
                source_space=self.subdomain_rt_space,
                lambda_range=self.lambda_xi,
                lambda_hat=self.lambda_bar,
                kappa=self.kappa,
                over_integrate=2)
        subdomain_walker = make_subdomain_walker(self.grid, self.subdomain)
        subdomain_walker.append(diffusive_flux_ab_product)
        subdomain_walker.walk()
        self.matrix = DuneXTMatrixOperator(diffusive_flux_ab_product.matrix(),
                                           range_id='domain_{}'.format(self.subdomain),
                                           source_id='RT_{}'.format(self.subdomain))

    def apply(self, U, mu=None):
        raise NotImplementedError

    def apply2(self, V, U, mu=None):
        assert V in self.range and U in self.source

        reconstructed_uh_kk_on_subdomain = self.matrix.source.make_array(
                [self.subdomain_rt_space.restrict(u.impl) for u in U._list])

        return self.matrix.apply2(V, reconstructed_uh_kk_on_subdomain)
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     subdomain_space = self.block_space.local_space(self.subdomain)
     self.subdomain_rt_space = self.global_rt_space.restrict_to_dd_subdomain_part(self.grid, self.subdomain)
     diffusive_flux_ab_product = make_diffusive_flux_ab_product(
             self.grid, self.subdomain,
             range_space=subdomain_space,
             source_space=self.subdomain_rt_space,
             lambda_range=self.lambda_xi,
             lambda_hat=self.lambda_bar,
             kappa=self.kappa,
             over_integrate=2)
     subdomain_walker = make_subdomain_walker(self.grid, self.subdomain)
     subdomain_walker.append(diffusive_flux_ab_product)
     subdomain_walker.walk()
     self.matrix = DuneXTMatrixOperator(diffusive_flux_ab_product.matrix(),
                                        range_id='domain_{}'.format(self.subdomain),
                                        source_id='RT_{}'.format(self.subdomain))
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self.range = BlockVectorSpace([self.global_space.subspaces[ii] for ii in self.grid.neighborhood_of(self.jj)],
                                   'OI_{}'.format(self.jj))
     self.source = BlockVectorSpace([self.global_space.subspaces[ii] for ii in self.grid.neighborhood_of(self.kk)],
                                    'OI_{}'.format(self.kk))
     if self.subdomain not in self._matrices:
         matrix = make_local_elliptic_matrix_operator(self.grid, self.subdomain,
                                                      self.block_space.local_space(self.subdomain),
                                                      self.lambda_bar, self.kappa)
         matrix.assemble()
         matrix = matrix.matrix()
         self._matrices[self.subdomain] = DuneXTMatrixOperator(matrix,
                                                               range_id='domain_{}'.format(self.subdomain),
                                                               source_id='domain_{}'.format(self.subdomain))
     self.matrix = self._matrices[self.subdomain]
     self.range_index = self.grid.neighborhood_of(self.jj).index(self.subdomain)
     self.source_index = self.grid.neighborhood_of(self.kk).index(self.subdomain)
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     if self.subdomain not in self._subdomain_rt_spaces:
         self._subdomain_rt_spaces[self.subdomain] = self.global_rt_space.restrict_to_dd_subdomain_part(
                 self.grid, self.subdomain)
     self.subdomain_rt_space = self._subdomain_rt_spaces[self.subdomain]
     if self.subdomain not in self._matrices:
         h_div_semi_product = make_Hdiv_semi_product_matrix_operator_on_subdomain(
                 self.grid, self.subdomain,
                 self.subdomain_rt_space,
                 over_integrate=2)
         subdomain_walker = make_subdomain_walker(self.grid, self.subdomain)
         subdomain_walker.append(h_div_semi_product)
         subdomain_walker.walk()
         self._matrices[self.subdomain] = DuneXTMatrixOperator(h_div_semi_product.matrix(),
                                                               range_id='RT_{}'.format(self.subdomain),
                                                               source_id='RT_{}'.format(self.subdomain))
     self.matrix = self._matrices[self.subdomain]
def assemble_estimator_diffusive_flux_bb(grid, ii, subdomain_rt_spaces,
                                         lambda_hat, kappa,
                                         local_rt_projection):
    diffusive_flux_bb_product = make_diffusive_flux_bb_product(
        grid,
        ii,
        subdomain_rt_spaces[ii],
        lambda_hat,
        kappa=kappa,
        over_integrate=2)
    subdomain_walker = make_subdomain_walker(grid, ii)
    subdomain_walker.append(diffusive_flux_bb_product)
    subdomain_walker.walk()
    # subdomain_rt_spaces[ii].dof_communicator,
    matrix = DuneXTMatrixOperator(diffusive_flux_bb_product.matrix(),
                                  range_id='LOCALRT_{}'.format(ii),
                                  source_id='LOCALRT_{}'.format(ii))
    return Concatenation([local_rt_projection.T, matrix, local_rt_projection],
                         name='diffusive_flux_bb_{}'.format(ii))
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     if self.subdomain not in self._subdomain_rt_spaces:
         self._subdomain_rt_spaces[self.subdomain] = self.global_rt_space.restrict_to_dd_subdomain_part(
                 self.grid, self.subdomain)
     self.subdomain_rt_space = self._subdomain_rt_spaces[self.subdomain]
     if self.subdomain not in self._matrices:
         diffusive_flux_bb_product = make_diffusive_flux_bb_product(
                 self.grid, self.subdomain,
                 self.subdomain_rt_space,
                 self.lambda_bar,
                 kappa=self.kappa,
                 over_integrate=2)
         subdomain_walker = make_subdomain_walker(self.grid, self.subdomain)
         subdomain_walker.append(diffusive_flux_bb_product)
         subdomain_walker.walk()
         self._matrices[self.subdomain] = DuneXTMatrixOperator(diffusive_flux_bb_product.matrix(),
                                                               range_id='RT_{}'.format(self.subdomain),
                                                               source_id='RT_{}'.format(self.subdomain))
     self.matrix = self._matrices[self.subdomain]
class DiffusiveFluxOperatorAA(EstimatorOperatorBase):

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        diffusive_flux_aa_product = make_diffusive_flux_aa_product(
                self.grid, self.subdomain,
                self.block_space.local_space(self.subdomain),
                self.lambda_bar,
                lambda_u=self.lambda_xi, lambda_v=self.lambda_xi_prime,
                kappa=self.kappa,
                over_integrate=2)
        subdomain_walker = make_subdomain_walker(self.grid, self.subdomain)
        subdomain_walker.append(diffusive_flux_aa_product)
        subdomain_walker.walk()
        self.matrix = DuneXTMatrixOperator(diffusive_flux_aa_product.matrix(),
                                           range_id='domain_{}'.format(self.subdomain),
                                           source_id='domain_{}'.format(self.subdomain))

    def apply(self, U, mu=None):
        raise NotImplementedError

    def _apply2(self, V, U, mu=None):
        assert V in self.range and U in self.source
        return self.matrix.apply2(V, U)
def discretize_lhs(lambda_func, grid, block_space, local_patterns,
                   boundary_patterns, coupling_matrices, kappa,
                   local_all_neumann_boundary_info, boundary_info,
                   coupling_patterns, solver_options):
    logger = getLogger('discretize_lhs')
    logger.debug('...')
    local_subdomains, num_local_subdomains, num_global_subdomains = _get_subdomains(
        grid)
    local_matrices = [None] * num_global_subdomains
    boundary_matrices = {}
    logger.debug('discretize lhs coupling matrices ...')
    for ii in range(num_global_subdomains):
        local_matrices[ii] = Matrix(
            block_space.local_space(ii).size(),
            block_space.local_space(ii).size(), local_patterns[ii])
        if ii in grid.boundary_subdomains():
            boundary_matrices[ii] = Matrix(
                block_space.local_space(ii).size(),
                block_space.local_space(ii).size(), boundary_patterns[ii])

    logger.debug('discretize lhs ipdg ops ...')
    for ii in range(num_global_subdomains):
        ss = block_space.local_space(ii)
        ll = local_matrices[ii]
        ipdg_operator = make_elliptic_swipdg_matrix_operator(
            lambda_func,
            kappa,
            local_all_neumann_boundary_info,
            ll,
            ss,
            over_integrate=2)
        ipdg_operator.assemble(False)

    logger.debug('discretize lhs ops ...')
    local_ipdg_coupling_operator = make_local_elliptic_swipdg_coupling_operator(
        lambda_func, kappa)

    def assemble_coupling_contributions(subdomain, neighboring_subdomain):
        coupling_assembler = block_space.coupling_assembler(
            subdomain, neighboring_subdomain)
        coupling_assembler.append(
            local_ipdg_coupling_operator,
            coupling_matrices['in_in'][(subdomain, neighboring_subdomain)],
            coupling_matrices['out_out'][(subdomain, neighboring_subdomain)],
            coupling_matrices['in_out'][(subdomain, neighboring_subdomain)],
            coupling_matrices['out_in'][(subdomain, neighboring_subdomain)])
        coupling_assembler.assemble()

    for ii in range(num_global_subdomains):
        for jj in grid.neighboring_subdomains(ii):
            if ii < jj:  # Assemble primally (visit each coupling only once).
                assemble_coupling_contributions(ii, jj)

    logger.debug('discretize lhs boundary ...')
    local_ipdg_boundary_operator = make_local_elliptic_swipdg_boundary_operator(
        lambda_func, kappa)
    apply_on_dirichlet_intersections = make_apply_on_dirichlet_intersections(
        boundary_info)

    def assemble_boundary_contributions(subdomain):
        boundary_assembler = block_space.boundary_assembler(subdomain)
        boundary_assembler.append(local_ipdg_boundary_operator,
                                  boundary_matrices[subdomain],
                                  apply_on_dirichlet_intersections)
        boundary_assembler.assemble()

    for ii in grid.boundary_subdomains():
        assemble_boundary_contributions(ii)

    logger.debug('discretize lhs global contributions ...')
    global_pattern = SparsityPatternDefault(block_space.mapper.size)
    for ii in range(num_global_subdomains):
        block_space.mapper.copy_local_to_global(local_patterns[ii], ii,
                                                global_pattern)
        if ii in grid.boundary_subdomains():
            block_space.mapper.copy_local_to_global(boundary_patterns[ii], ii,
                                                    global_pattern)
        for jj in grid.neighboring_subdomains(ii):
            if ii < jj:  # Assemble primally (visit each coupling only once).
                block_space.mapper.copy_local_to_global(
                    coupling_patterns['in_in'][(ii, jj)], ii, ii,
                    global_pattern)
                block_space.mapper.copy_local_to_global(
                    coupling_patterns['out_out'][(ii, jj)], jj, jj,
                    global_pattern)
                block_space.mapper.copy_local_to_global(
                    coupling_patterns['in_out'][(ii, jj)], ii, jj,
                    global_pattern)
                block_space.mapper.copy_local_to_global(
                    coupling_patterns['out_in'][(ii, jj)], jj, ii,
                    global_pattern)

    system_matrix = Matrix(block_space.mapper.size, block_space.mapper.size,
                           global_pattern)
    for ii in range(num_global_subdomains):
        block_space.mapper.copy_local_to_global(local_matrices[ii],
                                                local_patterns[ii], ii,
                                                system_matrix)
        if ii in grid.boundary_subdomains():
            block_space.mapper.copy_local_to_global(boundary_matrices[ii],
                                                    boundary_patterns[ii], ii,
                                                    ii, system_matrix)
        for jj in grid.neighboring_subdomains(ii):
            if ii < jj:  # Assemble primally (visit each coupling only once).
                block_space.mapper.copy_local_to_global(
                    coupling_matrices['in_in'][(ii, jj)],
                    coupling_patterns['in_in'][(ii, jj)], ii, ii,
                    system_matrix)
                block_space.mapper.copy_local_to_global(
                    coupling_matrices['out_out'][(ii, jj)],
                    coupling_patterns['out_out'][(ii, jj)], jj, jj,
                    system_matrix)
                block_space.mapper.copy_local_to_global(
                    coupling_matrices['in_out'][(ii, jj)],
                    coupling_patterns['in_out'][(ii, jj)], ii, jj,
                    system_matrix)
                block_space.mapper.copy_local_to_global(
                    coupling_matrices['out_in'][(ii, jj)],
                    coupling_patterns['out_in'][(ii, jj)], jj, ii,
                    system_matrix)
    logger.debug('discretize lhs global op ...')
    op = DuneXTMatrixOperator(system_matrix,
                              dof_communicator=block_space.dof_communicator,
                              solver_options=solver_options)
    logger.debug('discretize lhs global op done ...')
    mats = np.full((num_global_subdomains, num_global_subdomains), None)
    for ii in range(num_global_subdomains):
        ii_size = block_space.local_space(ii).size()
        for jj in range(ii, num_global_subdomains):
            jj_size = block_space.local_space(jj).size()
            if ii == jj:
                mats[ii, ii] = Matrix(ii_size, ii_size, local_patterns[ii])
            elif (ii, jj) in coupling_matrices['in_out']:
                mats[ii, jj] = Matrix(ii_size, jj_size,
                                      coupling_patterns['in_out'][(ii, jj)])
                mats[jj, ii] = Matrix(jj_size, ii_size,
                                      coupling_patterns['out_in'][(ii, jj)])

    for ii in range(num_global_subdomains):
        for jj in range(ii, num_global_subdomains):
            if ii == jj:
                mats[ii, ii].axpy(1., local_matrices[ii])
                if ii in boundary_matrices:
                    mats[ii, ii].axpy(1., boundary_matrices[ii])
            elif (ii, jj) in coupling_matrices['in_out']:
                mats[ii, ii].axpy(1., coupling_matrices['in_in'][(ii, jj)])
                mats[jj, jj].axpy(1., coupling_matrices['out_out'][(ii, jj)])
                mats[ii, jj].axpy(1., coupling_matrices['in_out'][(ii, jj)])
                mats[jj, ii].axpy(1., coupling_matrices['out_in'][(ii, jj)])

    logger.debug('discretize lhs block op ...')
    ops = np.full((num_global_subdomains, num_global_subdomains), None)
    for (ii, jj), mat in np.ndenumerate(mats):
        ops[ii, jj] = DuneXTMatrixOperator(
            mat,
            name='local_block_{}-{}'.format(ii, jj),
            source_id='domain_{}'.format(jj),
            range_id='domain_{}'.format(ii)) if mat else None

    block_op = BlockOperator(ops,
                             dof_communicator=block_space.dof_communicator,
                             name='BlockOp')
    return op, block_op
    def solve_for_local_correction(self,
                                   subdomain,
                                   Us,
                                   mu=None,
                                   inverse_options=None):
        grid, local_boundary_info, affine_lambda, kappa, f, block_space = self.enrichment_data
        neighborhood = self.neighborhoods[subdomain]
        neighborhood_space = block_space.restricted_to_neighborhood(
            neighborhood)
        # Compute current solution restricted to the neighborhood to be usable as Dirichlet values for the correction
        # problem.
        current_solution = [U._list for U in Us]
        assert np.all(len(v) == 1 for v in current_solution)
        current_solution = [v[0].impl for v in current_solution]
        current_solution = neighborhood_space.project_onto_neighborhood(
            current_solution, neighborhood)
        current_solution = make_discrete_function(neighborhood_space,
                                                  current_solution)
        # Solve the local corrector problem.
        #   LHS
        ops = []
        for lambda_ in affine_lambda['functions']:
            ops.append(
                make_elliptic_swipdg_matrix_operator_on_neighborhood(
                    grid,
                    subdomain,
                    local_boundary_info,
                    neighborhood_space,
                    lambda_,
                    kappa,
                    over_integrate=0))
        ops_coeffs = affine_lambda['coefficients'].copy()
        #   RHS
        funcs = []

        # We don't have any boundary treatment right now. Things will probably
        # break in multiple ways in case of non-trivial boundary conditions,
        # so we can comment this out for now ..

        # for lambda_ in affine_lambda['functions']:
        #     funcs.append(make_elliptic_swipdg_vector_functional_on_neighborhood(
        #         grid, subdomain, local_boundary_info,
        #         neighborhood_space,
        #         current_solution, lambda_, kappa,
        #         over_integrate=0))
        # funcs_coeffs = affine_lambda['coefficients'].copy()
        funcs.append(
            make_l2_vector_functional_on_neighborhood(grid,
                                                      subdomain,
                                                      neighborhood_space,
                                                      f,
                                                      over_integrate=2))
        # funcs_coeffs.append(1.)
        funcs_coeffs = [1]
        #   assemble in one grid walk
        neighborhood_assembler = make_neighborhood_system_assembler(
            grid, subdomain, neighborhood_space)
        for op in ops:
            neighborhood_assembler.append(op)
        for func in funcs:
            neighborhood_assembler.append(func)
        neighborhood_assembler.assemble()
        # solve
        local_space_id = self.solution_space.subspaces[subdomain].id
        lhs = LincombOperator([
            DuneXTMatrixOperator(
                o.matrix(), source_id=local_space_id, range_id=local_space_id)
            for o in ops
        ], ops_coeffs)
        rhs = LincombOperator([
            VectorFunctional(lhs.range.make_array([v.vector()])) for v in funcs
        ], funcs_coeffs)
        correction = lhs.apply_inverse(rhs.as_source_array(mu),
                                       mu=mu,
                                       inverse_options=inverse_options)
        assert len(correction) == 1
        # restrict to subdomain
        local_sizes = [
            block_space.local_space(nn).size() for nn in neighborhood
        ]
        local_starts = [
            int(np.sum(local_sizes[:nn])) for nn in range(len(local_sizes))
        ]
        local_starts.append(neighborhood_space.mapper.size)
        localized_corrections_as_np = np.array(correction._list[0].impl,
                                               copy=False)
        localized_corrections_as_np = [
            localized_corrections_as_np[local_starts[nn]:local_starts[nn + 1]]
            for nn in range(len(local_sizes))
        ]
        subdomain_index_in_neighborhood = np.where(
            np.array(list(neighborhood)) == subdomain)[0]
        assert len(subdomain_index_in_neighborhood) == 1
        subdomain_index_in_neighborhood = subdomain_index_in_neighborhood[0]
        subdomain_correction = Vector(
            local_sizes[subdomain_index_in_neighborhood], 0.)
        subdomain_correction_as_np = np.array(subdomain_correction, copy=False)
        subdomain_correction_as_np[:] = localized_corrections_as_np[
            subdomain_index_in_neighborhood][:]
        return self.solution_space.subspaces[subdomain].make_array(
            [subdomain_correction])
def discretize(grid_and_problem_data, polorder=1, solver_options=None):

    logger = getLogger('discretize_elliptic_swipdg.discretize')
    logger.info('discretizing ... ')
    over_integrate = 2

    grid, boundary_info = grid_and_problem_data['grid'], grid_and_problem_data[
        'boundary_info']

    _lambda, kappa, f = (grid_and_problem_data['lambda'],
                         grid_and_problem_data['kappa'],
                         grid_and_problem_data['f'])
    lambda_bar, lambda_bar = grid_and_problem_data[
        'lambda_bar'], grid_and_problem_data['lambda_bar']
    mu_bar, mu_hat, parameter_range = (
        grid_and_problem_data['mu_bar'], grid_and_problem_data['mu_hat'],
        grid_and_problem_data['parameter_range'])
    space = make_dg_space(grid)
    # prepare operators and functionals
    if isinstance(_lambda, dict):
        system_ops = [
            make_elliptic_swipdg_matrix_operator(lambda_func, kappa,
                                                 boundary_info, space,
                                                 over_integrate)
            for lambda_func in _lambda['functions']
        ]
        elliptic_ops = [
            make_elliptic_matrix_operator(lambda_func, kappa, space,
                                          over_integrate)
            for lambda_func in _lambda['functions']
        ]
    else:
        system_ops = [
            make_elliptic_swipdg_matrix_operator(_lambda, kappa, boundary_info,
                                                 space, over_integrate),
        ]
        elliptic_ops = [
            make_elliptic_matrix_operator(_lambda, kappa, space,
                                          over_integrate),
        ]
    if isinstance(f, dict):
        rhs_functionals = [
            make_l2_volume_vector_functional(f_func, space, over_integrate)
            for f_func in f['functions']
        ]
    else:
        rhs_functionals = [
            make_l2_volume_vector_functional(f, space, over_integrate),
        ]
    l2_matrix_with_system_pattern = system_ops[0].matrix().copy()
    l2_operator = make_l2_matrix_operator(l2_matrix_with_system_pattern, space)
    # assemble everything in one grid walk
    system_assembler = make_system_assembler(space)
    for op in system_ops:
        system_assembler.append(op)
    for op in elliptic_ops:
        system_assembler.append(op)
    for func in rhs_functionals:
        system_assembler.append(func)
    system_assembler.append(l2_operator)
    system_assembler.walk()
    # wrap everything
    if isinstance(_lambda, dict):
        op = LincombOperator([
            DuneXTMatrixOperator(o.matrix(),
                                 dof_communicator=space.dof_communicator)
            for o in system_ops
        ], _lambda['coefficients'])
        elliptic_op = LincombOperator(
            [DuneXTMatrixOperator(o.matrix()) for o in elliptic_ops],
            _lambda['coefficients'])
    else:
        op = DuneXTMatrixOperator(system_ops[0].matrix())
        elliptic_op = DuneXTMatrixOperator(elliptic_ops[0].matrix())
    if isinstance(f, dict):
        rhs = LincombOperator([
            VectorFunctional(op.range.make_array([func.vector()]))
            for func in rhs_functionals
        ], f['coefficients'])
    else:
        rhs = VectorFunctional(
            op.range.make_array([rhs_functionals[0].vector()]))
    operators = {
        'l2':
        DuneXTMatrixOperator(l2_matrix_with_system_pattern),
        'elliptic':
        elliptic_op,
        'elliptic_mu_bar':
        DuneXTMatrixOperator(elliptic_op.assemble(mu=mu_bar).matrix)
    }
    d = StationaryDiscretization(op,
                                 rhs,
                                 operators=operators,
                                 visualizer=DuneGDTVisualizer(space))
    d = d.with_(parameter_space=CubicParameterSpace(
        d.parameter_type, parameter_range[0], parameter_range[1]))

    return d, {'space': space}
    def discretize_lhs_for_lambda(lambda_):
        local_matrices = [None]*grid.num_subdomains
        local_vectors = [None]*grid.num_subdomains
        boundary_matrices = {}
        coupling_matrices_in_in = {}
        coupling_matrices_out_out = {}
        coupling_matrices_in_out = {}
        coupling_matrices_out_in = {}
        for ii in range(grid.num_subdomains):
            local_matrices[ii] = Matrix(block_space.local_space(ii).size(),
                                        block_space.local_space(ii).size(),
                                        local_patterns[ii])
            local_vectors[ii] = Vector(block_space.local_space(ii).size())
            if ii in grid.boundary_subdomains():
                boundary_matrices[ii] = Matrix(block_space.local_space(ii).size(),
                                               block_space.local_space(ii).size(),
                                               boundary_patterns[ii])
            for jj in grid.neighboring_subdomains(ii):
                if ii < jj:  # Assemble primally (visit each coupling only once).
                    coupling_matrices_in_in[(ii, jj)] = Matrix(block_space.local_space(ii).size(),
                                                               block_space.local_space(ii).size(),
                                                               coupling_patterns_in_in[(ii, jj)])
                    coupling_matrices_out_out[(ii, jj)] = Matrix(block_space.local_space(jj).size(),
                                                                 block_space.local_space(jj).size(),
                                                                 coupling_patterns_out_out[(ii, jj)])
                    coupling_matrices_in_out[(ii, jj)] = Matrix(block_space.local_space(ii).size(),
                                                                block_space.local_space(jj).size(),
                                                                coupling_patterns_in_out[(ii, jj)])
                    coupling_matrices_out_in[(ii, jj)] = Matrix(block_space.local_space(jj).size(),
                                                                block_space.local_space(ii).size(),
                                                                coupling_patterns_out_in[(ii, jj)])

        def assemble_local_contributions(subdomain):
            ipdg_operator = make_elliptic_swipdg_matrix_operator(lambda_, kappa, local_all_neumann_boundary_info,
                                                                 local_matrices[subdomain],
                                                                 block_space.local_space(subdomain))
            l2_functional = make_l2_volume_vector_functional(f, local_vectors[subdomain],
                                                             block_space.local_space(subdomain))
            local_assembler = make_system_assembler(block_space.local_space(subdomain))
            local_assembler.append(ipdg_operator)
            local_assembler.append(l2_functional)
            local_assembler.assemble()

        for ii in range(grid.num_subdomains):
            assemble_local_contributions(ii)

        local_ipdg_coupling_operator = make_local_elliptic_swipdg_coupling_operator(lambda_, kappa)

        def assemble_coupling_contributions(subdomain, neighboring_subdomain):
            coupling_assembler = block_space.coupling_assembler(subdomain, neighboring_subdomain)
            coupling_assembler.append(local_ipdg_coupling_operator,
                                      coupling_matrices_in_in[(subdomain, neighboring_subdomain)],
                                      coupling_matrices_out_out[(subdomain, neighboring_subdomain)],
                                      coupling_matrices_in_out[(subdomain, neighboring_subdomain)],
                                      coupling_matrices_out_in[(subdomain, neighboring_subdomain)])
            coupling_assembler.assemble()

        for ii in range(grid.num_subdomains):
            for jj in grid.neighboring_subdomains(ii):
                if ii < jj:  # Assemble primally (visit each coupling only once).
                    assemble_coupling_contributions(ii, jj)

        local_ipdg_boundary_operator = make_local_elliptic_swipdg_boundary_operator(lambda_, kappa)
        apply_on_dirichlet_intersections = make_apply_on_dirichlet_intersections(boundary_info)

        def assemble_boundary_contributions(subdomain):
            boundary_assembler = block_space.boundary_assembler(subdomain)
            boundary_assembler.append(local_ipdg_boundary_operator,
                                      boundary_matrices[subdomain],
                                      apply_on_dirichlet_intersections)
            boundary_assembler.assemble()

        for ii in grid.boundary_subdomains():
            assemble_boundary_contributions(ii)

        global_pattern = SparsityPatternDefault(block_space.mapper.size)
        for ii in range(grid.num_subdomains):
            block_space.mapper.copy_local_to_global(local_patterns[ii], ii, global_pattern)
            if ii in grid.boundary_subdomains():
                block_space.mapper.copy_local_to_global(boundary_patterns[ii], ii, global_pattern)
            for jj in grid.neighboring_subdomains(ii):
                if ii < jj:  # Assemble primally (visit each coupling only once).
                    block_space.mapper.copy_local_to_global(coupling_patterns_in_in[(ii, jj)], ii, ii, global_pattern)
                    block_space.mapper.copy_local_to_global(coupling_patterns_out_out[(ii, jj)], jj, jj, global_pattern)
                    block_space.mapper.copy_local_to_global(coupling_patterns_in_out[(ii, jj)], ii, jj, global_pattern)
                    block_space.mapper.copy_local_to_global(coupling_patterns_out_in[(ii, jj)], jj, ii, global_pattern)

        system_matrix = Matrix(block_space.mapper.size, block_space.mapper.size, global_pattern)
        rhs_vector = Vector(block_space.mapper.size, 0.)
        for ii in range(grid.num_subdomains):
            block_space.mapper.copy_local_to_global(local_matrices[ii], local_patterns[ii], ii, system_matrix)
            block_space.mapper.copy_local_to_global(local_vectors[ii], ii, rhs_vector)
            if ii in grid.boundary_subdomains():
                block_space.mapper.copy_local_to_global(boundary_matrices[ii], boundary_patterns[ii], ii, ii, system_matrix)
            for jj in grid.neighboring_subdomains(ii):
                if ii < jj:  # Assemble primally (visit each coupling only once).
                    block_space.mapper.copy_local_to_global(coupling_matrices_in_in[(ii, jj)],
                                                            coupling_patterns_in_in[(ii, jj)],
                                                            ii, ii, system_matrix)
                    block_space.mapper.copy_local_to_global(coupling_matrices_out_out[(ii, jj)],
                                                            coupling_patterns_out_out[(ii, jj)],
                                                            jj, jj, system_matrix)
                    block_space.mapper.copy_local_to_global(coupling_matrices_in_out[(ii, jj)],
                                                            coupling_patterns_in_out[(ii, jj)],
                                                            ii, jj, system_matrix)
                    block_space.mapper.copy_local_to_global(coupling_matrices_out_in[(ii, jj)],
                                                            coupling_patterns_out_in[(ii, jj)],
                                                            jj, ii, system_matrix)

        op = DuneXTMatrixOperator(system_matrix)
        mats = np.full((grid.num_subdomains, grid.num_subdomains), None)
        for ii in range(grid.num_subdomains):
            for jj in range(ii, grid.num_subdomains):
                if ii == jj:
                    mats[ii, ii] = Matrix(block_space.local_space(ii).size(),
                                          block_space.local_space(ii).size(),
                                          local_patterns[ii])
                elif (ii, jj) in coupling_matrices_in_out:
                    mats[ii, jj] = Matrix(block_space.local_space(ii).size(),
                                          block_space.local_space(jj).size(),
                                          coupling_patterns_in_out[(ii, jj)])
                    mats[jj, ii] = Matrix(block_space.local_space(jj).size(),
                                          block_space.local_space(ii).size(),
                                          coupling_patterns_out_in[(ii, jj)])

        for ii in range(grid.num_subdomains):
            for jj in range(ii, grid.num_subdomains):
                if ii == jj:
                    mats[ii, ii].axpy(1.,  local_matrices[ii])
                    if ii in boundary_matrices:
                        mats[ii, ii].axpy(1.,  boundary_matrices[ii])
                elif (ii, jj) in coupling_matrices_in_out:
                    mats[ii, ii].axpy(1., coupling_matrices_in_in[(ii, jj)])
                    mats[jj, jj].axpy(1., coupling_matrices_out_out[(ii, jj)])
                    mats[ii, jj].axpy(1., coupling_matrices_in_out[(ii, jj)])
                    mats[jj, ii].axpy(1., coupling_matrices_out_in[(ii, jj)])

        ops = np.full((grid.num_subdomains, grid.num_subdomains), None)
        for (ii, jj), mat in np.ndenumerate(mats):
            ops[ii, jj] = DuneXTMatrixOperator(mat,
                                               source_id='domain_{}'.format(jj),
                                               range_id='domain_{}'.format(ii)) if mat else None
        block_op = BlockOperator(ops)

        rhs = VectorFunctional(op.range.make_array([rhs_vector]))
        rhss = []
        for ii in range(grid.num_subdomains):
            rhss.append(ops[ii, ii].range.make_array([local_vectors[ii]]))
        block_rhs = VectorFunctional(block_op.range.make_array(rhss))

        return op, block_op, rhs, block_rhs
def discretize(grid_and_problem_data, solver_options, mpi_comm):
    ################ Setup

    logger = getLogger('discretize_elliptic_block_swipdg.discretize')
    logger.info('discretizing ... ')

    grid, boundary_info = grid_and_problem_data['grid'], grid_and_problem_data[
        'boundary_info']
    local_all_dirichlet_boundary_info = make_subdomain_boundary_info(
        grid, {'type': 'xt.grid.boundaryinfo.alldirichlet'})
    local_subdomains, num_local_subdomains, num_global_subdomains = _get_subdomains(
        grid)
    local_all_neumann_boundary_info = make_subdomain_boundary_info(
        grid, {'type': 'xt.grid.boundaryinfo.allneumann'})

    block_space = make_block_dg_space(grid)
    global_rt_space = make_rt_space(grid)
    subdomain_rt_spaces = [
        global_rt_space.restrict_to_dd_subdomain_view(grid, ii)
        for ii in range(num_global_subdomains)
    ]

    local_patterns = [
        block_space.local_space(ii).compute_pattern('face_and_volume')
        for ii in range(block_space.num_blocks)
    ]
    coupling_patterns = {
        'in_in': {},
        'out_out': {},
        'in_out': {},
        'out_in': {}
    }
    coupling_matrices = {
        'in_in': {},
        'out_out': {},
        'in_out': {},
        'out_in': {}
    }

    for ii in range(num_global_subdomains):
        ii_size = block_space.local_space(ii).size()
        for jj in grid.neighboring_subdomains(ii):
            jj_size = block_space.local_space(jj).size()
            if ii < jj:  # Assemble primally (visit each coupling only once).
                coupling_patterns['in_in'][(ii, jj)] = block_space.local_space(
                    ii).compute_pattern('face_and_volume')
                coupling_patterns['out_out'][(
                    ii, jj)] = block_space.local_space(jj).compute_pattern(
                        'face_and_volume')
                coupling_patterns['in_out'][(
                    ii, jj)] = block_space.compute_coupling_pattern(
                        ii, jj, 'face')
                coupling_patterns['out_in'][(
                    ii, jj)] = block_space.compute_coupling_pattern(
                        jj, ii, 'face')
                coupling_matrices['in_in'][(ii, jj)] = Matrix(
                    ii_size, ii_size, coupling_patterns['in_in'][(ii, jj)])
                coupling_matrices['out_out'][(ii, jj)] = Matrix(
                    jj_size, jj_size, coupling_patterns['out_out'][(ii, jj)])
                coupling_matrices['in_out'][(ii, jj)] = Matrix(
                    ii_size, jj_size, coupling_patterns['in_out'][(ii, jj)])
                coupling_matrices['out_in'][(ii, jj)] = Matrix(
                    jj_size, ii_size, coupling_patterns['out_in'][(ii, jj)])
    boundary_patterns = {}
    for ii in grid.boundary_subdomains():
        boundary_patterns[ii] = block_space.local_space(ii).compute_pattern(
            'face_and_volume')

    ################ Assemble LHS and RHS

    lambda_, kappa = grid_and_problem_data['lambda'], grid_and_problem_data[
        'kappa']
    if isinstance(lambda_, dict):
        lambda_funcs = lambda_['functions']
        lambda_coeffs = lambda_['coefficients']
    else:
        lambda_funcs = [
            lambda_,
        ]
        lambda_coeffs = [
            1,
        ]

    logger.debug('block op ... ')
    ops, block_ops = zip(*(discretize_lhs(
        lf, grid, block_space, local_patterns, boundary_patterns,
        coupling_matrices, kappa, local_all_neumann_boundary_info,
        boundary_info, coupling_patterns, solver_options)
                           for lf in lambda_funcs))
    global_operator = LincombOperator(ops,
                                      lambda_coeffs,
                                      solver_options=solver_options,
                                      name='GlobalOperator')
    logger.debug('block op global done ')
    block_op = LincombOperator(block_ops,
                               lambda_coeffs,
                               name='lhs',
                               solver_options=solver_options)
    logger.debug('block op done ')

    f = grid_and_problem_data['f']
    if isinstance(f, dict):
        f_funcs = f['functions']
        f_coeffs = f['coefficients']
    else:
        f_funcs = [
            f,
        ]
        f_coeffs = [
            1,
        ]
    rhss, block_rhss = zip(*(discretize_rhs(
        ff, grid, block_space, global_operator, block_ops, block_op)
                             for ff in f_funcs))
    global_rhs = LincombOperator(rhss, f_coeffs)
    block_rhs = LincombOperator(block_rhss, f_coeffs)

    solution_space = block_op.source

    ################ Assemble interpolation and reconstruction operators
    logger.info('discretizing interpolation ')

    # Oswald interpolation error operator
    oi_op = BlockDiagonalOperator([
        OswaldInterpolationErrorOperator(ii, block_op.source, grid,
                                         block_space)
        for ii in range(num_global_subdomains)
    ],
                                  name='oswald_interpolation_error')

    # Flux reconstruction operator
    fr_op = LincombOperator([
        BlockDiagonalOperator([
            FluxReconstructionOperator(ii, block_op.source, grid, block_space,
                                       global_rt_space, subdomain_rt_spaces,
                                       lambda_xi, kappa)
            for ii in range(num_global_subdomains)
        ]) for lambda_xi in lambda_funcs
    ],
                            lambda_coeffs,
                            name='flux_reconstruction')

    ################ Assemble inner products and error estimator operators
    logger.info('discretizing inner products ')

    lambda_bar, lambda_hat = grid_and_problem_data[
        'lambda_bar'], grid_and_problem_data['lambda_hat']
    mu_bar, mu_hat = grid_and_problem_data['mu_bar'], grid_and_problem_data[
        'mu_hat']
    operators = {}
    local_projections = []
    local_rt_projections = []
    local_oi_projections = []
    local_div_ops = []
    local_l2_products = []
    data = dict(grid=grid,
                block_space=block_space,
                local_projections=local_projections,
                local_rt_projections=local_rt_projections,
                local_oi_projections=local_oi_projections,
                local_div_ops=local_div_ops,
                local_l2_products=local_l2_products)

    for ii in range(num_global_subdomains):

        neighborhood = grid.neighborhood_of(ii)

        ################ Assemble local inner products

        local_dg_space = block_space.local_space(ii)
        # we want a larger pattern to allow for axpy with other matrices
        tmp_local_matrix = Matrix(
            local_dg_space.size(), local_dg_space.size(),
            local_dg_space.compute_pattern('face_and_volume'))
        local_energy_product_ops = []
        local_energy_product_coeffs = []
        for func, coeff in zip(lambda_funcs, lambda_coeffs):
            local_energy_product_ops.append(
                make_elliptic_matrix_operator(func,
                                              kappa,
                                              tmp_local_matrix.copy(),
                                              local_dg_space,
                                              over_integrate=0))
            local_energy_product_coeffs.append(coeff)
            local_energy_product_ops.append(
                make_penalty_product_matrix_operator(
                    grid,
                    ii,
                    local_all_dirichlet_boundary_info,
                    local_dg_space,
                    func,
                    kappa,
                    over_integrate=0))
            local_energy_product_coeffs.append(coeff)
        local_l2_product = make_l2_matrix_operator(tmp_local_matrix.copy(),
                                                   local_dg_space)
        del tmp_local_matrix
        local_assembler = make_system_assembler(local_dg_space)
        for local_product_op in local_energy_product_ops:
            local_assembler.append(local_product_op)
        local_assembler.append(local_l2_product)
        local_assembler.assemble()
        local_energy_product_name = 'local_energy_dg_product_{}'.format(ii)
        local_energy_product = LincombOperator([
            DuneXTMatrixOperator(op.matrix(),
                                 source_id='domain_{}'.format(ii),
                                 range_id='domain_{}'.format(ii))
            for op in local_energy_product_ops
        ],
                                               local_energy_product_coeffs,
                                               name=local_energy_product_name)
        operators[local_energy_product_name] = \
            local_energy_product.assemble(mu_bar).with_(name=local_energy_product_name)

        local_l2_product = DuneXTMatrixOperator(
            local_l2_product.matrix(),
            source_id='domain_{}'.format(ii),
            range_id='domain_{}'.format(ii))
        local_l2_products.append(local_l2_product)

        # assemble local elliptic product
        matrix = make_local_elliptic_matrix_operator(grid, ii, local_dg_space,
                                                     lambda_bar, kappa)
        matrix.assemble()
        local_elliptic_product = DuneXTMatrixOperator(
            matrix.matrix(),
            range_id='domain_{}'.format(ii),
            source_id='domain_{}'.format(ii))

        ################ Assemble local to global projections

        # assemble projection (solution space) ->  (ii space)
        local_projection = BlockProjectionOperator(block_op.source, ii)
        local_projections.append(local_projection)

        # assemble projection (RT spaces on neighborhoods of subdomains) ->  (local RT space on ii)
        ops = np.full(num_global_subdomains, None)
        for kk in neighborhood:
            component = grid.neighborhood_of(kk).index(ii)
            assert fr_op.range.subspaces[kk].subspaces[
                component].id == 'LOCALRT_{}'.format(ii)
            ops[kk] = BlockProjectionOperator(fr_op.range.subspaces[kk],
                                              component)
        local_rt_projection = BlockRowOperator(
            ops,
            source_spaces=fr_op.range.subspaces,
            name='local_rt_projection_{}'.format(ii))
        local_rt_projections.append(local_rt_projection)

        # assemble projection (OI spaces on neighborhoods of subdomains) ->  (ii space)
        ops = np.full(num_global_subdomains, None)
        for kk in neighborhood:
            component = grid.neighborhood_of(kk).index(ii)
            assert oi_op.range.subspaces[kk].subspaces[
                component].id == 'domain_{}'.format(ii)
            ops[kk] = BlockProjectionOperator(oi_op.range.subspaces[kk],
                                              component)
        local_oi_projection = BlockRowOperator(
            ops,
            source_spaces=oi_op.range.subspaces,
            name='local_oi_projection_{}'.format(ii))
        local_oi_projections.append(local_oi_projection)

        ################ Assemble additional operators for error estimation

        # assemble local divergence operator
        local_rt_space = global_rt_space.restrict_to_dd_subdomain_view(
            grid, ii)
        local_div_op = make_divergence_matrix_operator_on_subdomain(
            grid, ii, local_dg_space, local_rt_space)
        local_div_op.assemble()
        local_div_op = DuneXTMatrixOperator(
            local_div_op.matrix(),
            source_id='LOCALRT_{}'.format(ii),
            range_id='domain_{}'.format(ii),
            name='local_divergence_{}'.format(ii))
        local_div_ops.append(local_div_op)

        ################ Assemble error estimator operators -- Nonconformity

        operators['nc_{}'.format(ii)] = \
            Concatenation([local_oi_projection.T, local_elliptic_product, local_oi_projection],
                          name='nonconformity_{}'.format(ii))

        ################ Assemble error estimator operators -- Residual

        if len(f_funcs) == 1:
            assert f_coeffs[0] == 1
            local_div = Concatenation([local_div_op, local_rt_projection])
            local_rhs = VectorFunctional(
                block_rhs.operators[0]._array._blocks[ii])

            operators['r_fd_{}'.format(ii)] = \
                Concatenation([local_rhs, local_div], name='r1_{}'.format(ii))

            operators['r_dd_{}'.format(ii)] = \
                Concatenation([local_div.T, local_l2_product, local_div], name='r2_{}'.format(ii))

        ################ Assemble error estimator operators -- Diffusive flux

        operators['df_aa_{}'.format(ii)] = LincombOperator(
            [
                assemble_estimator_diffusive_flux_aa(
                    lambda_xi, lambda_xi_prime, grid, ii, block_space,
                    lambda_hat, kappa, solution_space)
                for lambda_xi in lambda_funcs
                for lambda_xi_prime in lambda_funcs
            ], [
                ProductParameterFunctional([c1, c2]) for c1 in lambda_coeffs
                for c2 in lambda_coeffs
            ],
            name='diffusive_flux_aa_{}'.format(ii))

        operators['df_bb_{}'.format(
            ii)] = assemble_estimator_diffusive_flux_bb(
                grid, ii, subdomain_rt_spaces, lambda_hat, kappa,
                local_rt_projection)

        operators['df_ab_{}'.format(ii)] = LincombOperator(
            [
                assemble_estimator_diffusive_flux_ab(
                    lambda_xi, grid, ii, block_space, subdomain_rt_spaces,
                    lambda_hat, kappa, local_rt_projection, local_projection)
                for lambda_xi in lambda_funcs
            ],
            lambda_coeffs,
            name='diffusive_flux_ab_{}'.format(ii))

    ################ Final assembly
    logger.info('final assembly ')

    # instantiate error estimator
    min_diffusion_evs = np.array([
        min_diffusion_eigenvalue(grid, ii, lambda_hat, kappa)
        for ii in range(num_global_subdomains)
    ])
    subdomain_diameters = np.array(
        [subdomain_diameter(grid, ii) for ii in range(num_global_subdomains)])
    if len(f_funcs) == 1:
        assert f_coeffs[0] == 1
        local_eta_rf_squared = np.array([
            apply_l2_product(grid,
                             ii,
                             f_funcs[0],
                             f_funcs[0],
                             over_integrate=2)
            for ii in range(num_global_subdomains)
        ])
    else:
        local_eta_rf_squared = None
    estimator = EllipticEstimator(grid,
                                  min_diffusion_evs,
                                  subdomain_diameters,
                                  local_eta_rf_squared,
                                  lambda_coeffs,
                                  mu_bar,
                                  mu_hat,
                                  fr_op,
                                  oswald_interpolation_error=oi_op,
                                  mpi_comm=mpi_comm)
    l2_product = BlockDiagonalOperator(local_l2_products)

    # instantiate discretization
    neighborhoods = [
        grid.neighborhood_of(ii) for ii in range(num_global_subdomains)
    ]
    local_boundary_info = make_subdomain_boundary_info(
        grid_and_problem_data['grid'],
        {'type': 'xt.grid.boundaryinfo.alldirichlet'})
    d = DuneDiscretization(global_operator=global_operator,
                           global_rhs=global_rhs,
                           neighborhoods=neighborhoods,
                           enrichment_data=(grid, local_boundary_info, lambda_,
                                            kappa, f, block_space),
                           operator=block_op,
                           rhs=block_rhs,
                           visualizer=DuneGDTVisualizer(block_space),
                           operators=operators,
                           products={'l2': l2_product},
                           estimator=estimator,
                           data=data)
    parameter_range = grid_and_problem_data['parameter_range']
    logger.info('final assembly B')
    d = d.with_(parameter_space=CubicParameterSpace(
        d.parameter_type, parameter_range[0], parameter_range[1]))
    logger.info('final assembly C')
    return d, data
def discretize(grid_and_problem_data):
    logger = getLogger('discretize_elliptic.discretize_block_SWIPDG')
    logger.info('discretizing ... ')

    grid, boundary_info, inner_boundary_id = (grid_and_problem_data['grid'],
                                              grid_and_problem_data['boundary_info'],
                                              grid_and_problem_data['inner_boundary_id'])
    local_all_dirichlet_boundary_info = make_subdomain_boundary_info(grid, {'type': 'xt.grid.boundaryinfo.alldirichlet'})
    local_all_neumann_boundary_info = make_subdomain_boundary_info(grid, {'type': 'xt.grid.boundaryinfo.allneumann'})
    neighborhood_boundary_info = make_subdomain_boundary_info(
        grid,
        {'type': 'xt.grid.boundaryinfo.boundarysegmentindexbased',
         'default': 'dirichlet',
         'neumann': '[{} {}]'.format(inner_boundary_id, inner_boundary_id+1)})

    affine_lambda, kappa, f = (grid_and_problem_data['lambda'],
                               grid_and_problem_data['kappa'],
                               grid_and_problem_data['f'])
    lambda_bar, lambda_hat = grid_and_problem_data['lambda_bar'], grid_and_problem_data['lambda_hat']
    mu_bar, mu_hat, parameter_range  = (grid_and_problem_data['mu_bar'],
                                        grid_and_problem_data['mu_hat'],
                                        grid_and_problem_data['parameter_range'])

    block_space = make_block_space(grid)

    local_patterns = [block_space.local_space(ii).compute_pattern('face_and_volume')
                      for ii in range(block_space.num_blocks)]
    coupling_patterns_in_in = {}
    coupling_patterns_out_out = {}
    coupling_patterns_in_out = {}
    coupling_patterns_out_in = {}
    for ii in range(grid.num_subdomains):
        for jj in grid.neighboring_subdomains(ii):
            if ii < jj:  # Assemble primally (visit each coupling only once).
                coupling_patterns_in_in[(ii, jj)] = block_space.local_space(ii).compute_pattern('face_and_volume')
                coupling_patterns_out_out[(ii, jj)] = block_space.local_space(jj).compute_pattern('face_and_volume')
                coupling_patterns_in_out[(ii, jj)] = block_space.compute_coupling_pattern(ii, jj, 'face')
                coupling_patterns_out_in[(ii, jj)] = block_space.compute_coupling_pattern(jj, ii, 'face')
    boundary_patterns = {}
    for ii in grid.boundary_subdomains():
        boundary_patterns[ii] = block_space.local_space(ii).compute_pattern('face_and_volume')

    def discretize_lhs_for_lambda(lambda_):
        local_matrices = [None]*grid.num_subdomains
        local_vectors = [None]*grid.num_subdomains
        boundary_matrices = {}
        coupling_matrices_in_in = {}
        coupling_matrices_out_out = {}
        coupling_matrices_in_out = {}
        coupling_matrices_out_in = {}
        for ii in range(grid.num_subdomains):
            local_matrices[ii] = Matrix(block_space.local_space(ii).size(),
                                        block_space.local_space(ii).size(),
                                        local_patterns[ii])
            local_vectors[ii] = Vector(block_space.local_space(ii).size())
            if ii in grid.boundary_subdomains():
                boundary_matrices[ii] = Matrix(block_space.local_space(ii).size(),
                                               block_space.local_space(ii).size(),
                                               boundary_patterns[ii])
            for jj in grid.neighboring_subdomains(ii):
                if ii < jj:  # Assemble primally (visit each coupling only once).
                    coupling_matrices_in_in[(ii, jj)] = Matrix(block_space.local_space(ii).size(),
                                                               block_space.local_space(ii).size(),
                                                               coupling_patterns_in_in[(ii, jj)])
                    coupling_matrices_out_out[(ii, jj)] = Matrix(block_space.local_space(jj).size(),
                                                                 block_space.local_space(jj).size(),
                                                                 coupling_patterns_out_out[(ii, jj)])
                    coupling_matrices_in_out[(ii, jj)] = Matrix(block_space.local_space(ii).size(),
                                                                block_space.local_space(jj).size(),
                                                                coupling_patterns_in_out[(ii, jj)])
                    coupling_matrices_out_in[(ii, jj)] = Matrix(block_space.local_space(jj).size(),
                                                                block_space.local_space(ii).size(),
                                                                coupling_patterns_out_in[(ii, jj)])

        def assemble_local_contributions(subdomain):
            ipdg_operator = make_elliptic_swipdg_matrix_operator(lambda_, kappa, local_all_neumann_boundary_info,
                                                                 local_matrices[subdomain],
                                                                 block_space.local_space(subdomain))
            l2_functional = make_l2_volume_vector_functional(f, local_vectors[subdomain],
                                                             block_space.local_space(subdomain))
            local_assembler = make_system_assembler(block_space.local_space(subdomain))
            local_assembler.append(ipdg_operator)
            local_assembler.append(l2_functional)
            local_assembler.assemble()

        for ii in range(grid.num_subdomains):
            assemble_local_contributions(ii)

        local_ipdg_coupling_operator = make_local_elliptic_swipdg_coupling_operator(lambda_, kappa)

        def assemble_coupling_contributions(subdomain, neighboring_subdomain):
            coupling_assembler = block_space.coupling_assembler(subdomain, neighboring_subdomain)
            coupling_assembler.append(local_ipdg_coupling_operator,
                                      coupling_matrices_in_in[(subdomain, neighboring_subdomain)],
                                      coupling_matrices_out_out[(subdomain, neighboring_subdomain)],
                                      coupling_matrices_in_out[(subdomain, neighboring_subdomain)],
                                      coupling_matrices_out_in[(subdomain, neighboring_subdomain)])
            coupling_assembler.assemble()

        for ii in range(grid.num_subdomains):
            for jj in grid.neighboring_subdomains(ii):
                if ii < jj:  # Assemble primally (visit each coupling only once).
                    assemble_coupling_contributions(ii, jj)

        local_ipdg_boundary_operator = make_local_elliptic_swipdg_boundary_operator(lambda_, kappa)
        apply_on_dirichlet_intersections = make_apply_on_dirichlet_intersections(boundary_info)

        def assemble_boundary_contributions(subdomain):
            boundary_assembler = block_space.boundary_assembler(subdomain)
            boundary_assembler.append(local_ipdg_boundary_operator,
                                      boundary_matrices[subdomain],
                                      apply_on_dirichlet_intersections)
            boundary_assembler.assemble()

        for ii in grid.boundary_subdomains():
            assemble_boundary_contributions(ii)

        global_pattern = SparsityPatternDefault(block_space.mapper.size)
        for ii in range(grid.num_subdomains):
            block_space.mapper.copy_local_to_global(local_patterns[ii], ii, global_pattern)
            if ii in grid.boundary_subdomains():
                block_space.mapper.copy_local_to_global(boundary_patterns[ii], ii, global_pattern)
            for jj in grid.neighboring_subdomains(ii):
                if ii < jj:  # Assemble primally (visit each coupling only once).
                    block_space.mapper.copy_local_to_global(coupling_patterns_in_in[(ii, jj)], ii, ii, global_pattern)
                    block_space.mapper.copy_local_to_global(coupling_patterns_out_out[(ii, jj)], jj, jj, global_pattern)
                    block_space.mapper.copy_local_to_global(coupling_patterns_in_out[(ii, jj)], ii, jj, global_pattern)
                    block_space.mapper.copy_local_to_global(coupling_patterns_out_in[(ii, jj)], jj, ii, global_pattern)

        system_matrix = Matrix(block_space.mapper.size, block_space.mapper.size, global_pattern)
        rhs_vector = Vector(block_space.mapper.size, 0.)
        for ii in range(grid.num_subdomains):
            block_space.mapper.copy_local_to_global(local_matrices[ii], local_patterns[ii], ii, system_matrix)
            block_space.mapper.copy_local_to_global(local_vectors[ii], ii, rhs_vector)
            if ii in grid.boundary_subdomains():
                block_space.mapper.copy_local_to_global(boundary_matrices[ii], boundary_patterns[ii], ii, ii, system_matrix)
            for jj in grid.neighboring_subdomains(ii):
                if ii < jj:  # Assemble primally (visit each coupling only once).
                    block_space.mapper.copy_local_to_global(coupling_matrices_in_in[(ii, jj)],
                                                            coupling_patterns_in_in[(ii, jj)],
                                                            ii, ii, system_matrix)
                    block_space.mapper.copy_local_to_global(coupling_matrices_out_out[(ii, jj)],
                                                            coupling_patterns_out_out[(ii, jj)],
                                                            jj, jj, system_matrix)
                    block_space.mapper.copy_local_to_global(coupling_matrices_in_out[(ii, jj)],
                                                            coupling_patterns_in_out[(ii, jj)],
                                                            ii, jj, system_matrix)
                    block_space.mapper.copy_local_to_global(coupling_matrices_out_in[(ii, jj)],
                                                            coupling_patterns_out_in[(ii, jj)],
                                                            jj, ii, system_matrix)

        op = DuneXTMatrixOperator(system_matrix)
        mats = np.full((grid.num_subdomains, grid.num_subdomains), None)
        for ii in range(grid.num_subdomains):
            for jj in range(ii, grid.num_subdomains):
                if ii == jj:
                    mats[ii, ii] = Matrix(block_space.local_space(ii).size(),
                                          block_space.local_space(ii).size(),
                                          local_patterns[ii])
                elif (ii, jj) in coupling_matrices_in_out:
                    mats[ii, jj] = Matrix(block_space.local_space(ii).size(),
                                          block_space.local_space(jj).size(),
                                          coupling_patterns_in_out[(ii, jj)])
                    mats[jj, ii] = Matrix(block_space.local_space(jj).size(),
                                          block_space.local_space(ii).size(),
                                          coupling_patterns_out_in[(ii, jj)])

        for ii in range(grid.num_subdomains):
            for jj in range(ii, grid.num_subdomains):
                if ii == jj:
                    mats[ii, ii].axpy(1.,  local_matrices[ii])
                    if ii in boundary_matrices:
                        mats[ii, ii].axpy(1.,  boundary_matrices[ii])
                elif (ii, jj) in coupling_matrices_in_out:
                    mats[ii, ii].axpy(1., coupling_matrices_in_in[(ii, jj)])
                    mats[jj, jj].axpy(1., coupling_matrices_out_out[(ii, jj)])
                    mats[ii, jj].axpy(1., coupling_matrices_in_out[(ii, jj)])
                    mats[jj, ii].axpy(1., coupling_matrices_out_in[(ii, jj)])

        ops = np.full((grid.num_subdomains, grid.num_subdomains), None)
        for (ii, jj), mat in np.ndenumerate(mats):
            ops[ii, jj] = DuneXTMatrixOperator(mat,
                                               source_id='domain_{}'.format(jj),
                                               range_id='domain_{}'.format(ii)) if mat else None
        block_op = BlockOperator(ops)

        rhs = VectorFunctional(op.range.make_array([rhs_vector]))
        rhss = []
        for ii in range(grid.num_subdomains):
            rhss.append(ops[ii, ii].range.make_array([local_vectors[ii]]))
        block_rhs = VectorFunctional(block_op.range.make_array(rhss))

        return op, block_op, rhs, block_rhs

    ops, block_ops, rhss, block_rhss = zip(*(discretize_lhs_for_lambda(l) for l in affine_lambda['functions']))
    rhs = rhss[0]
    block_rhs = block_rhss[0]

    lambda_coeffs = affine_lambda['coefficients']
    op = LincombOperator(ops, lambda_coeffs)
    block_op = LincombOperator(block_ops, lambda_coeffs, name='lhs')

    operators = {'global_op': op, 'global_rhs': rhs}
    global_rt_space = make_rt_space(grid)

    def assemble_oswald_interpolation_error():
        oi_ops = [OswaldInterpolationErrorOperator(ii, ii, ii, block_op.source, grid, block_space,
                                                   global_rt_space, neighborhood_boundary_info,
                                                   None, None, None, None)
                  for ii in range(grid.num_subdomains)]
        return BlockDiagonalOperator(oi_ops, name='oswald_interpolation_error')

    oi_op = assemble_oswald_interpolation_error()

    def assemble_flux_reconstruction(lambda_xi):
        fr_ops = [FluxReconstructionOperator(ii, ii, ii, block_op.source, grid, block_space,
                                             global_rt_space, neighborhood_boundary_info,
                                             lambda_hat, lambda_xi, lambda_xi, kappa)
                  for ii in range(grid.num_subdomains)]
        return BlockDiagonalOperator(fr_ops)

    fr_op = LincombOperator([assemble_flux_reconstruction(lambda_xi) for lambda_xi in affine_lambda['functions']],
                            lambda_coeffs, name='flux_reconstruction')

    spaces = block_op.source.subspaces
    rt_spaces = fr_op.range.subspaces

    # assemble local products
    for ii in range(grid.num_subdomains):
        local_space = block_space.local_space(ii)
        # we want a larger pattern for the elliptic part, to allow for axpy with the penalty part
        tmp_local_matrix = Matrix(local_space.size(),
                                  local_space.size(),
                                  local_space.compute_pattern('face_and_volume'))
        local_product_ops = []
        local_product_coeffs = []
        for func, coeff in zip(affine_lambda['functions'], affine_lambda['coefficients']):
            local_product_ops.append(make_elliptic_matrix_operator(
                func, kappa, tmp_local_matrix.copy(), local_space, over_integrate=0))
            local_product_coeffs.append(coeff)
            local_product_ops.append(make_penalty_product_matrix_operator(
                grid, ii, local_all_dirichlet_boundary_info,
                local_space,
                func, kappa, over_integrate=0))
            local_product_coeffs.append(coeff)
        del tmp_local_matrix
        local_assembler = make_system_assembler(local_space)
        for local_product_op in local_product_ops:
            local_assembler.append(local_product_op)
        local_assembler.assemble()
        local_product_name = 'local_energy_dg_product_{}'.format(ii)
        local_product = LincombOperator([DuneXTMatrixOperator(op.matrix(),
                                                              source_id='domain_{}'.format(ii),
                                                              range_id='domain_{}'.format(ii))
                                         for op in local_product_ops],
                                        local_product_coeffs,
                                        name=local_product_name)
        operators[local_product_name] = local_product.assemble(mu_bar).with_(name=local_product_name)

    # assemble error estimator
    for ii in range(grid.num_subdomains):

        neighborhood = grid.neighborhood_of(ii)

        def assemble_estimator_noconformity():
            nc_ops = np.full((grid.num_subdomains,) * 2, None)
            for jj in neighborhood:
                for kk in neighborhood:
                    nc_ops[jj, kk] = NonconformityOperator(ii, jj, kk, block_op.source, grid, block_space,
                                                           global_rt_space, neighborhood_boundary_info,
                                                           lambda_bar, None, None, kappa)
            return BlockOperator(nc_ops, range_spaces=oi_op.range.subspaces, source_spaces=oi_op.range.subspaces,
                                 name='nonconformity_{}'.format(ii))

        def assemble_estimator_diffusive_flux_aa(lambda_xi, lambda_xi_prime):
            df_ops = np.full((grid.num_subdomains,) * 2, None)
            df_ops[ii, ii] = DiffusiveFluxOperatorAA(ii, ii, ii, block_op.source, grid, block_space,
                                                     global_rt_space, neighborhood_boundary_info,
                                                     lambda_hat, lambda_xi, lambda_xi_prime, kappa)

            return BlockOperator(df_ops, range_spaces=spaces, source_spaces=spaces)

        def assemble_estimator_diffusive_flux_bb():
            df_ops = np.full((grid.num_subdomains,) * 2, None)
            for jj in neighborhood:
                for kk in neighborhood:
                    df_ops[jj, kk] = DiffusiveFluxOperatorBB(ii, jj, kk, block_op.source, grid, block_space,
                                                             global_rt_space, neighborhood_boundary_info,
                                                             lambda_hat, None, None, kappa)

            return BlockOperator(df_ops, range_spaces=rt_spaces, source_spaces=rt_spaces,
                                 name='diffusive_flux_bb_{}'.format(ii))

        def assemble_estimator_diffusive_flux_ab(lambda_xi):
            df_ops = np.full((grid.num_subdomains,) * 2, None)
            for kk in neighborhood:
                df_ops[ii, kk] = DiffusiveFluxOperatorAB(ii, ii, kk, block_op.source, grid, block_space,
                                                         global_rt_space, neighborhood_boundary_info,
                                                         lambda_hat, lambda_xi, None, kappa)

            return BlockOperator(df_ops, range_spaces=spaces, source_spaces=rt_spaces)

        def assemble_estimator_residual():
            r2_ops = np.full((grid.num_subdomains,) * 2, None)
            for jj in neighborhood:
                for kk in neighborhood:
                    r2_ops[jj, kk] = ResidualPartOperator(ii, jj, kk, block_op.source, grid, block_space,
                                                          global_rt_space, neighborhood_boundary_info,
                                                          lambda_hat, None, None, kappa)

            return BlockOperator(r2_ops, range_spaces=rt_spaces, source_spaces=rt_spaces, name='residual_{}'.format(ii))

        def assemble_estimator_residual_functional():
            r1_ops = np.full((1, grid.num_subdomains,), None)
            for jj in neighborhood:
                r1_ops[0, jj] = ResidualPartFunctional(f, ii, jj, jj, block_op.source, grid, block_space,
                                                       global_rt_space, neighborhood_boundary_info,
                                                       lambda_hat, None, None, kappa)
            return BlockOperator(r1_ops, source_spaces=rt_spaces, name='residual_functional_{}'.format(ii))

        operators['nc_{}'.format(ii)] = assemble_estimator_noconformity()

        operators['r1_{}'.format(ii)] = assemble_estimator_residual_functional()
        operators['r2_{}'.format(ii)] = assemble_estimator_residual()

        operators['df_aa_{}'.format(ii)] = LincombOperator(
            [assemble_estimator_diffusive_flux_aa(lambda_xi, lambda_xi_prime)
             for lambda_xi in affine_lambda['functions']
             for lambda_xi_prime in affine_lambda['functions']],
            [ProductParameterFunctional([c1, c2])
             for c1 in lambda_coeffs
             for c2 in lambda_coeffs],
            name='diffusive_flux_aa_{}'.format(ii))
        operators['df_bb_{}'.format(ii)] = assemble_estimator_diffusive_flux_bb()
        operators['df_ab_{}'.format(ii)] = LincombOperator(
            [assemble_estimator_diffusive_flux_ab(lambda_xi) for lambda_xi in affine_lambda['functions']],
            lambda_coeffs,
            name='diffusive_flux_ab_{}'.format(ii)
        )

    min_diffusion_evs = np.array([min_diffusion_eigenvalue(grid, ii, lambda_hat, kappa) for ii in
                                  range(grid.num_subdomains)])
    subdomain_diameters = np.array([subdomain_diameter(grid, ii) for ii in range(grid.num_subdomains)])
    local_eta_rf_squared = np.array([apply_l2_product(grid, ii, f, f, over_integrate=2) for ii in
                                     range(grid.num_subdomains)])
    estimator = Estimator(min_diffusion_evs, subdomain_diameters, local_eta_rf_squared, lambda_coeffs, mu_bar, mu_hat,
                          fr_op, oi_op)

    neighborhoods = [grid.neighborhood_of(ii) for ii in range(grid.num_subdomains)]
    local_boundary_info = make_subdomain_boundary_info(grid_and_problem_data['grid'],
                                                       {'type': 'xt.grid.boundaryinfo.alldirichlet'})
    d = DuneDiscretization(block_op, block_rhs,
                           neighborhoods,
                           (grid, local_boundary_info, affine_lambda, kappa, f, block_space),
                           visualizer=DuneGDTVisualizer(block_space),
                           operators=operators, estimator=estimator)
    d = d.with_(parameter_space=CubicParameterSpace(d.parameter_type, parameter_range[0], parameter_range[1]))

    return d, block_space
Ejemplo n.º 16
0
def make_marix_operator(mat, ID):
    return DuneXTMatrixOperator(mat, source_id=ID, range_id=ID)
def discretize(grid_and_problem_data, T, nt):
    d, d_data = discretize_ell(grid_and_problem_data)
    assert isinstance(d.parameter_space, CubicParameterSpace)
    parameter_range = grid_and_problem_data['parameter_range']
    block_space = d_data['block_space']
    # assemble global L2 product
    l2_mat = d.global_operator.operators[0].matrix.copy(
    )  # to ensure matching pattern
    l2_mat.scal(0.)
    for ii in range(block_space.num_blocks):
        local_l2_product = d.l2_product._blocks[ii, ii]
        block_space.mapper.copy_local_to_global(
            local_l2_product.matrix, local_l2_product.matrix.pattern(), ii,
            l2_mat)
    mass = d.l2_product
    operators = {
        k: v
        for k, v in d.operators.items() if k not in d.special_operators
    }
    global_mass = DuneXTMatrixOperator(l2_mat)

    local_div_ops, local_l2_products, local_projections, local_rt_projections = \
        d_data['local_div_ops'], d_data['local_l2_products'], d_data['local_projections'], d_data['local_rt_projections']

    for ii in range(d_data['grid'].num_subdomains):

        local_div = Concatenation(
            [local_div_ops[ii], local_rt_projections[ii]])

        operators['r_ud_{}'.format(ii)] = \
            Concatenation([local_projections[ii].T, local_l2_products[ii], local_div], name='r_ud_{}'.format(ii))

        operators['r_l2_{}'.format(ii)] = \
            Concatenation([local_projections[ii].T, local_l2_products[ii], local_projections[ii]],
                          name='r_l2_{}'.format(ii))

    e = d.estimator
    estimator = ParabolicEstimator(e.min_diffusion_evs, e.subdomain_diameters,
                                   e.local_eta_rf_squared, e.lambda_coeffs,
                                   e.mu_bar, e.mu_hat, e.flux_reconstruction,
                                   e.oswald_interpolation_error)

    d = InstationaryDuneDiscretization(
        d.global_operator,
        d.global_rhs,
        global_mass,
        T,
        d.operator.source.zeros(1),
        d.operator,
        d.rhs,
        mass=mass,
        time_stepper=ImplicitEulerTimeStepper(nt=nt,
                                              solver_options='operator'),
        products=d.products,
        operators=operators,
        estimator=estimator,
        visualizer=DuneGDTVisualizer(block_space))
    d = d.with_(parameter_space=CubicParameterSpace(
        d.parameter_type, parameter_range[0], parameter_range[1]))

    return d, d_data