def action_BlockOperatorBase(self, op): if op.blocked_range: if self.range_basis is not None: range_bases = self.range_basis._blocks else: range_bases = [None] * len(op.range.subspaces) else: range_bases = [self.range_basis] if op.blocked_source: if self.source_basis is not None: source_bases = self.source_basis._blocks else: source_bases = [None] * len(op.source.subspaces) else: source_bases = [self.source_basis] projected_ops = np.array([[ project(op.blocks[i, j], rb, sb) for j, sb in enumerate(source_bases) ] for i, rb in enumerate(range_bases)]) if self.range_basis is None and op.blocked_range: return BlockColumnOperator(np.sum(projected_ops, axis=1)) elif self.source_basis is None and op.blocked_source: return BlockRowOperator(np.sum(projected_ops, axis=0)) else: return np.sum(projected_ops)
def discretize(grid_and_problem_data, solver_options, mpi_comm): ################ Setup logger = getLogger('discretize_elliptic_block_swipdg.discretize') logger.info('discretizing ... ') grid, boundary_info = grid_and_problem_data['grid'], grid_and_problem_data[ 'boundary_info'] local_all_dirichlet_boundary_info = make_subdomain_boundary_info( grid, {'type': 'xt.grid.boundaryinfo.alldirichlet'}) local_subdomains, num_local_subdomains, num_global_subdomains = _get_subdomains( grid) local_all_neumann_boundary_info = make_subdomain_boundary_info( grid, {'type': 'xt.grid.boundaryinfo.allneumann'}) block_space = make_block_dg_space(grid) global_rt_space = make_rt_space(grid) subdomain_rt_spaces = [ global_rt_space.restrict_to_dd_subdomain_view(grid, ii) for ii in range(num_global_subdomains) ] local_patterns = [ block_space.local_space(ii).compute_pattern('face_and_volume') for ii in range(block_space.num_blocks) ] coupling_patterns = { 'in_in': {}, 'out_out': {}, 'in_out': {}, 'out_in': {} } coupling_matrices = { 'in_in': {}, 'out_out': {}, 'in_out': {}, 'out_in': {} } for ii in range(num_global_subdomains): ii_size = block_space.local_space(ii).size() for jj in grid.neighboring_subdomains(ii): jj_size = block_space.local_space(jj).size() if ii < jj: # Assemble primally (visit each coupling only once). coupling_patterns['in_in'][(ii, jj)] = block_space.local_space( ii).compute_pattern('face_and_volume') coupling_patterns['out_out'][( ii, jj)] = block_space.local_space(jj).compute_pattern( 'face_and_volume') coupling_patterns['in_out'][( ii, jj)] = block_space.compute_coupling_pattern( ii, jj, 'face') coupling_patterns['out_in'][( ii, jj)] = block_space.compute_coupling_pattern( jj, ii, 'face') coupling_matrices['in_in'][(ii, jj)] = Matrix( ii_size, ii_size, coupling_patterns['in_in'][(ii, jj)]) coupling_matrices['out_out'][(ii, jj)] = Matrix( jj_size, jj_size, coupling_patterns['out_out'][(ii, jj)]) coupling_matrices['in_out'][(ii, jj)] = Matrix( ii_size, jj_size, coupling_patterns['in_out'][(ii, jj)]) coupling_matrices['out_in'][(ii, jj)] = Matrix( jj_size, ii_size, coupling_patterns['out_in'][(ii, jj)]) boundary_patterns = {} for ii in grid.boundary_subdomains(): boundary_patterns[ii] = block_space.local_space(ii).compute_pattern( 'face_and_volume') ################ Assemble LHS and RHS lambda_, kappa = grid_and_problem_data['lambda'], grid_and_problem_data[ 'kappa'] if isinstance(lambda_, dict): lambda_funcs = lambda_['functions'] lambda_coeffs = lambda_['coefficients'] else: lambda_funcs = [ lambda_, ] lambda_coeffs = [ 1, ] logger.debug('block op ... ') ops, block_ops = zip(*(discretize_lhs( lf, grid, block_space, local_patterns, boundary_patterns, coupling_matrices, kappa, local_all_neumann_boundary_info, boundary_info, coupling_patterns, solver_options) for lf in lambda_funcs)) global_operator = LincombOperator(ops, lambda_coeffs, solver_options=solver_options, name='GlobalOperator') logger.debug('block op global done ') block_op = LincombOperator(block_ops, lambda_coeffs, name='lhs', solver_options=solver_options) logger.debug('block op done ') f = grid_and_problem_data['f'] if isinstance(f, dict): f_funcs = f['functions'] f_coeffs = f['coefficients'] else: f_funcs = [ f, ] f_coeffs = [ 1, ] rhss, block_rhss = zip(*(discretize_rhs( ff, grid, block_space, global_operator, block_ops, block_op) for ff in f_funcs)) global_rhs = LincombOperator(rhss, f_coeffs) block_rhs = LincombOperator(block_rhss, f_coeffs) solution_space = block_op.source ################ Assemble interpolation and reconstruction operators logger.info('discretizing interpolation ') # Oswald interpolation error operator oi_op = BlockDiagonalOperator([ OswaldInterpolationErrorOperator(ii, block_op.source, grid, block_space) for ii in range(num_global_subdomains) ], name='oswald_interpolation_error') # Flux reconstruction operator fr_op = LincombOperator([ BlockDiagonalOperator([ FluxReconstructionOperator(ii, block_op.source, grid, block_space, global_rt_space, subdomain_rt_spaces, lambda_xi, kappa) for ii in range(num_global_subdomains) ]) for lambda_xi in lambda_funcs ], lambda_coeffs, name='flux_reconstruction') ################ Assemble inner products and error estimator operators logger.info('discretizing inner products ') lambda_bar, lambda_hat = grid_and_problem_data[ 'lambda_bar'], grid_and_problem_data['lambda_hat'] mu_bar, mu_hat = grid_and_problem_data['mu_bar'], grid_and_problem_data[ 'mu_hat'] operators = {} local_projections = [] local_rt_projections = [] local_oi_projections = [] local_div_ops = [] local_l2_products = [] data = dict(grid=grid, block_space=block_space, local_projections=local_projections, local_rt_projections=local_rt_projections, local_oi_projections=local_oi_projections, local_div_ops=local_div_ops, local_l2_products=local_l2_products) for ii in range(num_global_subdomains): neighborhood = grid.neighborhood_of(ii) ################ Assemble local inner products local_dg_space = block_space.local_space(ii) # we want a larger pattern to allow for axpy with other matrices tmp_local_matrix = Matrix( local_dg_space.size(), local_dg_space.size(), local_dg_space.compute_pattern('face_and_volume')) local_energy_product_ops = [] local_energy_product_coeffs = [] for func, coeff in zip(lambda_funcs, lambda_coeffs): local_energy_product_ops.append( make_elliptic_matrix_operator(func, kappa, tmp_local_matrix.copy(), local_dg_space, over_integrate=0)) local_energy_product_coeffs.append(coeff) local_energy_product_ops.append( make_penalty_product_matrix_operator( grid, ii, local_all_dirichlet_boundary_info, local_dg_space, func, kappa, over_integrate=0)) local_energy_product_coeffs.append(coeff) local_l2_product = make_l2_matrix_operator(tmp_local_matrix.copy(), local_dg_space) del tmp_local_matrix local_assembler = make_system_assembler(local_dg_space) for local_product_op in local_energy_product_ops: local_assembler.append(local_product_op) local_assembler.append(local_l2_product) local_assembler.assemble() local_energy_product_name = 'local_energy_dg_product_{}'.format(ii) local_energy_product = LincombOperator([ DuneXTMatrixOperator(op.matrix(), source_id='domain_{}'.format(ii), range_id='domain_{}'.format(ii)) for op in local_energy_product_ops ], local_energy_product_coeffs, name=local_energy_product_name) operators[local_energy_product_name] = \ local_energy_product.assemble(mu_bar).with_(name=local_energy_product_name) local_l2_product = DuneXTMatrixOperator( local_l2_product.matrix(), source_id='domain_{}'.format(ii), range_id='domain_{}'.format(ii)) local_l2_products.append(local_l2_product) # assemble local elliptic product matrix = make_local_elliptic_matrix_operator(grid, ii, local_dg_space, lambda_bar, kappa) matrix.assemble() local_elliptic_product = DuneXTMatrixOperator( matrix.matrix(), range_id='domain_{}'.format(ii), source_id='domain_{}'.format(ii)) ################ Assemble local to global projections # assemble projection (solution space) -> (ii space) local_projection = BlockProjectionOperator(block_op.source, ii) local_projections.append(local_projection) # assemble projection (RT spaces on neighborhoods of subdomains) -> (local RT space on ii) ops = np.full(num_global_subdomains, None) for kk in neighborhood: component = grid.neighborhood_of(kk).index(ii) assert fr_op.range.subspaces[kk].subspaces[ component].id == 'LOCALRT_{}'.format(ii) ops[kk] = BlockProjectionOperator(fr_op.range.subspaces[kk], component) local_rt_projection = BlockRowOperator( ops, source_spaces=fr_op.range.subspaces, name='local_rt_projection_{}'.format(ii)) local_rt_projections.append(local_rt_projection) # assemble projection (OI spaces on neighborhoods of subdomains) -> (ii space) ops = np.full(num_global_subdomains, None) for kk in neighborhood: component = grid.neighborhood_of(kk).index(ii) assert oi_op.range.subspaces[kk].subspaces[ component].id == 'domain_{}'.format(ii) ops[kk] = BlockProjectionOperator(oi_op.range.subspaces[kk], component) local_oi_projection = BlockRowOperator( ops, source_spaces=oi_op.range.subspaces, name='local_oi_projection_{}'.format(ii)) local_oi_projections.append(local_oi_projection) ################ Assemble additional operators for error estimation # assemble local divergence operator local_rt_space = global_rt_space.restrict_to_dd_subdomain_view( grid, ii) local_div_op = make_divergence_matrix_operator_on_subdomain( grid, ii, local_dg_space, local_rt_space) local_div_op.assemble() local_div_op = DuneXTMatrixOperator( local_div_op.matrix(), source_id='LOCALRT_{}'.format(ii), range_id='domain_{}'.format(ii), name='local_divergence_{}'.format(ii)) local_div_ops.append(local_div_op) ################ Assemble error estimator operators -- Nonconformity operators['nc_{}'.format(ii)] = \ Concatenation([local_oi_projection.T, local_elliptic_product, local_oi_projection], name='nonconformity_{}'.format(ii)) ################ Assemble error estimator operators -- Residual if len(f_funcs) == 1: assert f_coeffs[0] == 1 local_div = Concatenation([local_div_op, local_rt_projection]) local_rhs = VectorFunctional( block_rhs.operators[0]._array._blocks[ii]) operators['r_fd_{}'.format(ii)] = \ Concatenation([local_rhs, local_div], name='r1_{}'.format(ii)) operators['r_dd_{}'.format(ii)] = \ Concatenation([local_div.T, local_l2_product, local_div], name='r2_{}'.format(ii)) ################ Assemble error estimator operators -- Diffusive flux operators['df_aa_{}'.format(ii)] = LincombOperator( [ assemble_estimator_diffusive_flux_aa( lambda_xi, lambda_xi_prime, grid, ii, block_space, lambda_hat, kappa, solution_space) for lambda_xi in lambda_funcs for lambda_xi_prime in lambda_funcs ], [ ProductParameterFunctional([c1, c2]) for c1 in lambda_coeffs for c2 in lambda_coeffs ], name='diffusive_flux_aa_{}'.format(ii)) operators['df_bb_{}'.format( ii)] = assemble_estimator_diffusive_flux_bb( grid, ii, subdomain_rt_spaces, lambda_hat, kappa, local_rt_projection) operators['df_ab_{}'.format(ii)] = LincombOperator( [ assemble_estimator_diffusive_flux_ab( lambda_xi, grid, ii, block_space, subdomain_rt_spaces, lambda_hat, kappa, local_rt_projection, local_projection) for lambda_xi in lambda_funcs ], lambda_coeffs, name='diffusive_flux_ab_{}'.format(ii)) ################ Final assembly logger.info('final assembly ') # instantiate error estimator min_diffusion_evs = np.array([ min_diffusion_eigenvalue(grid, ii, lambda_hat, kappa) for ii in range(num_global_subdomains) ]) subdomain_diameters = np.array( [subdomain_diameter(grid, ii) for ii in range(num_global_subdomains)]) if len(f_funcs) == 1: assert f_coeffs[0] == 1 local_eta_rf_squared = np.array([ apply_l2_product(grid, ii, f_funcs[0], f_funcs[0], over_integrate=2) for ii in range(num_global_subdomains) ]) else: local_eta_rf_squared = None estimator = EllipticEstimator(grid, min_diffusion_evs, subdomain_diameters, local_eta_rf_squared, lambda_coeffs, mu_bar, mu_hat, fr_op, oswald_interpolation_error=oi_op, mpi_comm=mpi_comm) l2_product = BlockDiagonalOperator(local_l2_products) # instantiate discretization neighborhoods = [ grid.neighborhood_of(ii) for ii in range(num_global_subdomains) ] local_boundary_info = make_subdomain_boundary_info( grid_and_problem_data['grid'], {'type': 'xt.grid.boundaryinfo.alldirichlet'}) d = DuneDiscretization(global_operator=global_operator, global_rhs=global_rhs, neighborhoods=neighborhoods, enrichment_data=(grid, local_boundary_info, lambda_, kappa, f, block_space), operator=block_op, rhs=block_rhs, visualizer=DuneGDTVisualizer(block_space), operators=operators, products={'l2': l2_product}, estimator=estimator, data=data) parameter_range = grid_and_problem_data['parameter_range'] logger.info('final assembly B') d = d.with_(parameter_space=CubicParameterSpace( d.parameter_type, parameter_range[0], parameter_range[1])) logger.info('final assembly C') return d, data
def misc_operator_with_arrays_and_products_factory(n): if n == 0: from pymor.operators.constructions import ComponentProjection _, _, U, V, sp, rp = numpy_matrix_operator_with_arrays_and_products_factory( 100, 10, 4, 3, n) op = ComponentProjection(np.random.randint(0, 100, 10), U.space) return op, _, U, V, sp, rp elif n == 1: from pymor.operators.constructions import ComponentProjection _, _, U, V, sp, rp = numpy_matrix_operator_with_arrays_and_products_factory( 100, 0, 4, 3, n) op = ComponentProjection([], U.space) return op, _, U, V, sp, rp elif n == 2: from pymor.operators.constructions import ComponentProjection _, _, U, V, sp, rp = numpy_matrix_operator_with_arrays_and_products_factory( 100, 3, 4, 3, n) op = ComponentProjection([3, 3, 3], U.space) return op, _, U, V, sp, rp elif n == 3: from pymor.operators.constructions import AdjointOperator op, _, U, V, sp, rp = numpy_matrix_operator_with_arrays_and_products_factory( 100, 20, 4, 3, n) op = AdjointOperator(op, with_apply_inverse=True) return op, _, V, U, rp, sp elif n == 4: from pymor.operators.constructions import AdjointOperator op, _, U, V, sp, rp = numpy_matrix_operator_with_arrays_and_products_factory( 100, 20, 4, 3, n) op = AdjointOperator(op, with_apply_inverse=False) return op, _, V, U, rp, sp elif 5 <= n <= 7: from pymor.operators.constructions import SelectionOperator from pymor.parameters.functionals import ProjectionParameterFunctional op0, _, U, V, sp, rp = numpy_matrix_operator_with_arrays_and_products_factory( 30, 30, 4, 3, n) op1 = NumpyMatrixOperator(np.random.random((30, 30))) op2 = NumpyMatrixOperator(np.random.random((30, 30))) op = SelectionOperator([op0, op1, op2], ProjectionParameterFunctional('x'), [0.3, 0.6]) return op, op.parameters.parse((n - 5) / 2), V, U, rp, sp elif n == 8: from pymor.operators.block import BlockDiagonalOperator op0, _, U0, V0, sp0, rp0 = numpy_matrix_operator_with_arrays_and_products_factory( 10, 10, 4, 3, n) op1, _, U1, V1, sp1, rp1 = numpy_matrix_operator_with_arrays_and_products_factory( 20, 20, 4, 3, n + 1) op2, _, U2, V2, sp2, rp2 = numpy_matrix_operator_with_arrays_and_products_factory( 30, 30, 4, 3, n + 2) op = BlockDiagonalOperator([op0, op1, op2]) sp = BlockDiagonalOperator([sp0, sp1, sp2]) rp = BlockDiagonalOperator([rp0, rp1, rp2]) U = op.source.make_array([U0, U1, U2]) V = op.range.make_array([V0, V1, V2]) return op, _, U, V, sp, rp elif n == 9: from pymor.operators.block import BlockDiagonalOperator, BlockOperator from pymor.parameters.functionals import ProjectionParameterFunctional op0a, _, U0, V0, sp0, rp0 = numpy_matrix_operator_with_arrays_and_products_factory( 10, 10, 4, 3, n) op0b, _, _, _, _, _ = numpy_matrix_operator_with_arrays_and_products_factory( 10, 10, 4, 3, n) op0c, _, _, _, _, _ = numpy_matrix_operator_with_arrays_and_products_factory( 10, 10, 4, 3, n) op1, _, U1, V1, sp1, rp1 = numpy_matrix_operator_with_arrays_and_products_factory( 20, 20, 4, 3, n + 1) op2a, _, _, _, _, _ = numpy_matrix_operator_with_arrays_and_products_factory( 20, 10, 4, 3, n + 2) op2b, _, _, _, _, _ = numpy_matrix_operator_with_arrays_and_products_factory( 20, 10, 4, 3, n + 2) op0 = (op0a * ProjectionParameterFunctional('p', 3, 0) + op0b * ProjectionParameterFunctional('p', 3, 1) + op0c * ProjectionParameterFunctional('p', 3, 1)) op2 = (op2a * ProjectionParameterFunctional('p', 3, 0) + op2b * ProjectionParameterFunctional('q', 1)) op = BlockOperator([[op0, op2], [None, op1]]) mu = op.parameters.parse({'p': [1, 2, 3], 'q': 4}) sp = BlockDiagonalOperator([sp0, sp1]) rp = BlockDiagonalOperator([rp0, rp1]) U = op.source.make_array([U0, U1]) V = op.range.make_array([V0, V1]) return op, mu, U, V, sp, rp elif n == 10: from pymor.operators.block import BlockDiagonalOperator, BlockColumnOperator from pymor.parameters.functionals import ProjectionParameterFunctional op0, _, U0, V0, sp0, rp0 = numpy_matrix_operator_with_arrays_and_products_factory( 10, 10, 4, 3, n) op1, _, U1, V1, sp1, rp1 = numpy_matrix_operator_with_arrays_and_products_factory( 20, 20, 4, 3, n + 1) op2a, _, _, _, _, _ = numpy_matrix_operator_with_arrays_and_products_factory( 20, 10, 4, 3, n + 2) op2b, _, _, _, _, _ = numpy_matrix_operator_with_arrays_and_products_factory( 20, 10, 4, 3, n + 2) op2 = (op2a * ProjectionParameterFunctional('p', 3, 0) + op2b * ProjectionParameterFunctional('q', 1)) op = BlockColumnOperator([op2, op1]) mu = op.parameters.parse({'p': [1, 2, 3], 'q': 4}) sp = sp1 rp = BlockDiagonalOperator([rp0, rp1]) U = U1 V = op.range.make_array([V0, V1]) return op, mu, U, V, sp, rp elif n == 11: from pymor.operators.block import BlockDiagonalOperator, BlockRowOperator from pymor.parameters.functionals import ProjectionParameterFunctional op0, _, U0, V0, sp0, rp0 = numpy_matrix_operator_with_arrays_and_products_factory( 10, 10, 4, 3, n) op1, _, U1, V1, sp1, rp1 = numpy_matrix_operator_with_arrays_and_products_factory( 20, 20, 4, 3, n + 1) op2a, _, _, _, _, _ = numpy_matrix_operator_with_arrays_and_products_factory( 20, 10, 4, 3, n + 2) op2b, _, _, _, _, _ = numpy_matrix_operator_with_arrays_and_products_factory( 20, 10, 4, 3, n + 2) op2 = (op2a * ProjectionParameterFunctional('p', 3, 0) + op2b * ProjectionParameterFunctional('q', 1)) op = BlockRowOperator([op0, op2]) mu = op.parameters.parse({'p': [1, 2, 3], 'q': 4}) sp = BlockDiagonalOperator([sp0, sp1]) rp = rp0 U = op.source.make_array([U0, U1]) V = V0 return op, mu, U, V, sp, rp else: assert False