def discretize(num_refinements, polorder=1): grid = GridProvider( [0, 0], [5, 1], [100, 20]) # The OS2015 test is [0, 5]x[0, 1], 100x20 elements, ... grid.refine(num_refinements) dg_space = DiscontinuousLagrangeSpace(grid, polorder) lhs_op = LincombOperator([ make_marix_operator( assemble_SWIPDG_matrix(dg_space, diff, diffusion_tensor), 'PRESSURE') for diff in diffusion_factor['functions'] ], diffusion_factor['coefficients']) rhs_func = VectorFunctional( lhs_op.range.make_array((assemble_L2_vector(dg_space, f), ))) dg_product = make_marix_operator( assemble_DG_product_matrix(dg_space, diffusion_factor_bar, diffusion_tensor), 'PRESSURE') fom = StationaryDiscretization(lhs_op, rhs_func, products={'energy_penalty': dg_product}, visualizer=DuneGDTVisualizer(dg_space)) fom = fom.with_( parameter_space=CubicParameterSpace(fom.parameter_type, 0.1, 1.)) fom.enable_caching('disk') return grid, dg_space, dg_product, fom
def discretize(num_refinements): grid = GridProvider([-1, -1], [1, 1], [4, 4]) # The ESV2007 test is [-1, 1]^2, 4x4 elements, ... grid.refine(num_refinements) dg_space = DiscontinuousLagrangeSpace(grid, 1) lhs_op = LincombOperator( [make_marix_operator(assemble_SWIPDG_matrix(dg_space, diff), 'PRESSURE') for diff in diffusion['functions']], diffusion['coefficients']) rhs_func = VectorFunctional(lhs_op.range.make_array((assemble_L2_vector(dg_space, f),))) dg_product = make_marix_operator(assemble_DG_product_matrix(dg_space), 'PRESSURE') fom = StationaryDiscretization( lhs_op, rhs_func, products={'energy_penalty': dg_product}, visualizer=DuneGDTVisualizer(dg_space)) fom = fom.with_(parameter_space=CubicParameterSpace(fom.parameter_type, 0.1, 1.)) fom.enable_caching('disk') return grid, dg_space, dg_product, fom
def discretize(grid_and_problem_data, T, nt, polorder): d, data = discretize_stationary(grid_and_problem_data, polorder) assert isinstance(d.parameter_space, CubicParameterSpace) parameter_range = grid_and_problem_data['parameter_range'] d = InstationaryDiscretization(T, d.operator.source.zeros(1), d.operator, d.rhs, mass=d.operators['l2'], time_stepper=ImplicitEulerTimeStepper(nt=nt, solver_options='operator'), products=d.products, operators={kk: vv for kk, vv in d.operators.items() if kk not in ('operator', 'rhs') and vv not in (d.operators, d.rhs)}, visualizer=DuneGDTVisualizer(data['space'])) d = d.with_(parameter_space=CubicParameterSpace(d.parameter_type, parameter_range[0], parameter_range[1])) return d, data
def discretize(grid_and_problem_data, solver_options, mpi_comm): ################ Setup logger = getLogger('discretize_elliptic_block_swipdg.discretize') logger.info('discretizing ... ') grid, boundary_info = grid_and_problem_data['grid'], grid_and_problem_data[ 'boundary_info'] local_all_dirichlet_boundary_info = make_subdomain_boundary_info( grid, {'type': 'xt.grid.boundaryinfo.alldirichlet'}) local_subdomains, num_local_subdomains, num_global_subdomains = _get_subdomains( grid) local_all_neumann_boundary_info = make_subdomain_boundary_info( grid, {'type': 'xt.grid.boundaryinfo.allneumann'}) block_space = make_block_dg_space(grid) global_rt_space = make_rt_space(grid) subdomain_rt_spaces = [ global_rt_space.restrict_to_dd_subdomain_view(grid, ii) for ii in range(num_global_subdomains) ] local_patterns = [ block_space.local_space(ii).compute_pattern('face_and_volume') for ii in range(block_space.num_blocks) ] coupling_patterns = { 'in_in': {}, 'out_out': {}, 'in_out': {}, 'out_in': {} } coupling_matrices = { 'in_in': {}, 'out_out': {}, 'in_out': {}, 'out_in': {} } for ii in range(num_global_subdomains): ii_size = block_space.local_space(ii).size() for jj in grid.neighboring_subdomains(ii): jj_size = block_space.local_space(jj).size() if ii < jj: # Assemble primally (visit each coupling only once). coupling_patterns['in_in'][(ii, jj)] = block_space.local_space( ii).compute_pattern('face_and_volume') coupling_patterns['out_out'][( ii, jj)] = block_space.local_space(jj).compute_pattern( 'face_and_volume') coupling_patterns['in_out'][( ii, jj)] = block_space.compute_coupling_pattern( ii, jj, 'face') coupling_patterns['out_in'][( ii, jj)] = block_space.compute_coupling_pattern( jj, ii, 'face') coupling_matrices['in_in'][(ii, jj)] = Matrix( ii_size, ii_size, coupling_patterns['in_in'][(ii, jj)]) coupling_matrices['out_out'][(ii, jj)] = Matrix( jj_size, jj_size, coupling_patterns['out_out'][(ii, jj)]) coupling_matrices['in_out'][(ii, jj)] = Matrix( ii_size, jj_size, coupling_patterns['in_out'][(ii, jj)]) coupling_matrices['out_in'][(ii, jj)] = Matrix( jj_size, ii_size, coupling_patterns['out_in'][(ii, jj)]) boundary_patterns = {} for ii in grid.boundary_subdomains(): boundary_patterns[ii] = block_space.local_space(ii).compute_pattern( 'face_and_volume') ################ Assemble LHS and RHS lambda_, kappa = grid_and_problem_data['lambda'], grid_and_problem_data[ 'kappa'] if isinstance(lambda_, dict): lambda_funcs = lambda_['functions'] lambda_coeffs = lambda_['coefficients'] else: lambda_funcs = [ lambda_, ] lambda_coeffs = [ 1, ] logger.debug('block op ... ') ops, block_ops = zip(*(discretize_lhs( lf, grid, block_space, local_patterns, boundary_patterns, coupling_matrices, kappa, local_all_neumann_boundary_info, boundary_info, coupling_patterns, solver_options) for lf in lambda_funcs)) global_operator = LincombOperator(ops, lambda_coeffs, solver_options=solver_options, name='GlobalOperator') logger.debug('block op global done ') block_op = LincombOperator(block_ops, lambda_coeffs, name='lhs', solver_options=solver_options) logger.debug('block op done ') f = grid_and_problem_data['f'] if isinstance(f, dict): f_funcs = f['functions'] f_coeffs = f['coefficients'] else: f_funcs = [ f, ] f_coeffs = [ 1, ] rhss, block_rhss = zip(*(discretize_rhs( ff, grid, block_space, global_operator, block_ops, block_op) for ff in f_funcs)) global_rhs = LincombOperator(rhss, f_coeffs) block_rhs = LincombOperator(block_rhss, f_coeffs) solution_space = block_op.source ################ Assemble interpolation and reconstruction operators logger.info('discretizing interpolation ') # Oswald interpolation error operator oi_op = BlockDiagonalOperator([ OswaldInterpolationErrorOperator(ii, block_op.source, grid, block_space) for ii in range(num_global_subdomains) ], name='oswald_interpolation_error') # Flux reconstruction operator fr_op = LincombOperator([ BlockDiagonalOperator([ FluxReconstructionOperator(ii, block_op.source, grid, block_space, global_rt_space, subdomain_rt_spaces, lambda_xi, kappa) for ii in range(num_global_subdomains) ]) for lambda_xi in lambda_funcs ], lambda_coeffs, name='flux_reconstruction') ################ Assemble inner products and error estimator operators logger.info('discretizing inner products ') lambda_bar, lambda_hat = grid_and_problem_data[ 'lambda_bar'], grid_and_problem_data['lambda_hat'] mu_bar, mu_hat = grid_and_problem_data['mu_bar'], grid_and_problem_data[ 'mu_hat'] operators = {} local_projections = [] local_rt_projections = [] local_oi_projections = [] local_div_ops = [] local_l2_products = [] data = dict(grid=grid, block_space=block_space, local_projections=local_projections, local_rt_projections=local_rt_projections, local_oi_projections=local_oi_projections, local_div_ops=local_div_ops, local_l2_products=local_l2_products) for ii in range(num_global_subdomains): neighborhood = grid.neighborhood_of(ii) ################ Assemble local inner products local_dg_space = block_space.local_space(ii) # we want a larger pattern to allow for axpy with other matrices tmp_local_matrix = Matrix( local_dg_space.size(), local_dg_space.size(), local_dg_space.compute_pattern('face_and_volume')) local_energy_product_ops = [] local_energy_product_coeffs = [] for func, coeff in zip(lambda_funcs, lambda_coeffs): local_energy_product_ops.append( make_elliptic_matrix_operator(func, kappa, tmp_local_matrix.copy(), local_dg_space, over_integrate=0)) local_energy_product_coeffs.append(coeff) local_energy_product_ops.append( make_penalty_product_matrix_operator( grid, ii, local_all_dirichlet_boundary_info, local_dg_space, func, kappa, over_integrate=0)) local_energy_product_coeffs.append(coeff) local_l2_product = make_l2_matrix_operator(tmp_local_matrix.copy(), local_dg_space) del tmp_local_matrix local_assembler = make_system_assembler(local_dg_space) for local_product_op in local_energy_product_ops: local_assembler.append(local_product_op) local_assembler.append(local_l2_product) local_assembler.assemble() local_energy_product_name = 'local_energy_dg_product_{}'.format(ii) local_energy_product = LincombOperator([ DuneXTMatrixOperator(op.matrix(), source_id='domain_{}'.format(ii), range_id='domain_{}'.format(ii)) for op in local_energy_product_ops ], local_energy_product_coeffs, name=local_energy_product_name) operators[local_energy_product_name] = \ local_energy_product.assemble(mu_bar).with_(name=local_energy_product_name) local_l2_product = DuneXTMatrixOperator( local_l2_product.matrix(), source_id='domain_{}'.format(ii), range_id='domain_{}'.format(ii)) local_l2_products.append(local_l2_product) # assemble local elliptic product matrix = make_local_elliptic_matrix_operator(grid, ii, local_dg_space, lambda_bar, kappa) matrix.assemble() local_elliptic_product = DuneXTMatrixOperator( matrix.matrix(), range_id='domain_{}'.format(ii), source_id='domain_{}'.format(ii)) ################ Assemble local to global projections # assemble projection (solution space) -> (ii space) local_projection = BlockProjectionOperator(block_op.source, ii) local_projections.append(local_projection) # assemble projection (RT spaces on neighborhoods of subdomains) -> (local RT space on ii) ops = np.full(num_global_subdomains, None) for kk in neighborhood: component = grid.neighborhood_of(kk).index(ii) assert fr_op.range.subspaces[kk].subspaces[ component].id == 'LOCALRT_{}'.format(ii) ops[kk] = BlockProjectionOperator(fr_op.range.subspaces[kk], component) local_rt_projection = BlockRowOperator( ops, source_spaces=fr_op.range.subspaces, name='local_rt_projection_{}'.format(ii)) local_rt_projections.append(local_rt_projection) # assemble projection (OI spaces on neighborhoods of subdomains) -> (ii space) ops = np.full(num_global_subdomains, None) for kk in neighborhood: component = grid.neighborhood_of(kk).index(ii) assert oi_op.range.subspaces[kk].subspaces[ component].id == 'domain_{}'.format(ii) ops[kk] = BlockProjectionOperator(oi_op.range.subspaces[kk], component) local_oi_projection = BlockRowOperator( ops, source_spaces=oi_op.range.subspaces, name='local_oi_projection_{}'.format(ii)) local_oi_projections.append(local_oi_projection) ################ Assemble additional operators for error estimation # assemble local divergence operator local_rt_space = global_rt_space.restrict_to_dd_subdomain_view( grid, ii) local_div_op = make_divergence_matrix_operator_on_subdomain( grid, ii, local_dg_space, local_rt_space) local_div_op.assemble() local_div_op = DuneXTMatrixOperator( local_div_op.matrix(), source_id='LOCALRT_{}'.format(ii), range_id='domain_{}'.format(ii), name='local_divergence_{}'.format(ii)) local_div_ops.append(local_div_op) ################ Assemble error estimator operators -- Nonconformity operators['nc_{}'.format(ii)] = \ Concatenation([local_oi_projection.T, local_elliptic_product, local_oi_projection], name='nonconformity_{}'.format(ii)) ################ Assemble error estimator operators -- Residual if len(f_funcs) == 1: assert f_coeffs[0] == 1 local_div = Concatenation([local_div_op, local_rt_projection]) local_rhs = VectorFunctional( block_rhs.operators[0]._array._blocks[ii]) operators['r_fd_{}'.format(ii)] = \ Concatenation([local_rhs, local_div], name='r1_{}'.format(ii)) operators['r_dd_{}'.format(ii)] = \ Concatenation([local_div.T, local_l2_product, local_div], name='r2_{}'.format(ii)) ################ Assemble error estimator operators -- Diffusive flux operators['df_aa_{}'.format(ii)] = LincombOperator( [ assemble_estimator_diffusive_flux_aa( lambda_xi, lambda_xi_prime, grid, ii, block_space, lambda_hat, kappa, solution_space) for lambda_xi in lambda_funcs for lambda_xi_prime in lambda_funcs ], [ ProductParameterFunctional([c1, c2]) for c1 in lambda_coeffs for c2 in lambda_coeffs ], name='diffusive_flux_aa_{}'.format(ii)) operators['df_bb_{}'.format( ii)] = assemble_estimator_diffusive_flux_bb( grid, ii, subdomain_rt_spaces, lambda_hat, kappa, local_rt_projection) operators['df_ab_{}'.format(ii)] = LincombOperator( [ assemble_estimator_diffusive_flux_ab( lambda_xi, grid, ii, block_space, subdomain_rt_spaces, lambda_hat, kappa, local_rt_projection, local_projection) for lambda_xi in lambda_funcs ], lambda_coeffs, name='diffusive_flux_ab_{}'.format(ii)) ################ Final assembly logger.info('final assembly ') # instantiate error estimator min_diffusion_evs = np.array([ min_diffusion_eigenvalue(grid, ii, lambda_hat, kappa) for ii in range(num_global_subdomains) ]) subdomain_diameters = np.array( [subdomain_diameter(grid, ii) for ii in range(num_global_subdomains)]) if len(f_funcs) == 1: assert f_coeffs[0] == 1 local_eta_rf_squared = np.array([ apply_l2_product(grid, ii, f_funcs[0], f_funcs[0], over_integrate=2) for ii in range(num_global_subdomains) ]) else: local_eta_rf_squared = None estimator = EllipticEstimator(grid, min_diffusion_evs, subdomain_diameters, local_eta_rf_squared, lambda_coeffs, mu_bar, mu_hat, fr_op, oswald_interpolation_error=oi_op, mpi_comm=mpi_comm) l2_product = BlockDiagonalOperator(local_l2_products) # instantiate discretization neighborhoods = [ grid.neighborhood_of(ii) for ii in range(num_global_subdomains) ] local_boundary_info = make_subdomain_boundary_info( grid_and_problem_data['grid'], {'type': 'xt.grid.boundaryinfo.alldirichlet'}) d = DuneDiscretization(global_operator=global_operator, global_rhs=global_rhs, neighborhoods=neighborhoods, enrichment_data=(grid, local_boundary_info, lambda_, kappa, f, block_space), operator=block_op, rhs=block_rhs, visualizer=DuneGDTVisualizer(block_space), operators=operators, products={'l2': l2_product}, estimator=estimator, data=data) parameter_range = grid_and_problem_data['parameter_range'] logger.info('final assembly B') d = d.with_(parameter_space=CubicParameterSpace( d.parameter_type, parameter_range[0], parameter_range[1])) logger.info('final assembly C') return d, data
def discretize(grid_and_problem_data, polorder=1, solver_options=None): logger = getLogger('discretize_elliptic_swipdg.discretize') logger.info('discretizing ... ') over_integrate = 2 grid, boundary_info = grid_and_problem_data['grid'], grid_and_problem_data[ 'boundary_info'] _lambda, kappa, f = (grid_and_problem_data['lambda'], grid_and_problem_data['kappa'], grid_and_problem_data['f']) lambda_bar, lambda_bar = grid_and_problem_data[ 'lambda_bar'], grid_and_problem_data['lambda_bar'] mu_bar, mu_hat, parameter_range = ( grid_and_problem_data['mu_bar'], grid_and_problem_data['mu_hat'], grid_and_problem_data['parameter_range']) space = make_dg_space(grid) # prepare operators and functionals if isinstance(_lambda, dict): system_ops = [ make_elliptic_swipdg_matrix_operator(lambda_func, kappa, boundary_info, space, over_integrate) for lambda_func in _lambda['functions'] ] elliptic_ops = [ make_elliptic_matrix_operator(lambda_func, kappa, space, over_integrate) for lambda_func in _lambda['functions'] ] else: system_ops = [ make_elliptic_swipdg_matrix_operator(_lambda, kappa, boundary_info, space, over_integrate), ] elliptic_ops = [ make_elliptic_matrix_operator(_lambda, kappa, space, over_integrate), ] if isinstance(f, dict): rhs_functionals = [ make_l2_volume_vector_functional(f_func, space, over_integrate) for f_func in f['functions'] ] else: rhs_functionals = [ make_l2_volume_vector_functional(f, space, over_integrate), ] l2_matrix_with_system_pattern = system_ops[0].matrix().copy() l2_operator = make_l2_matrix_operator(l2_matrix_with_system_pattern, space) # assemble everything in one grid walk system_assembler = make_system_assembler(space) for op in system_ops: system_assembler.append(op) for op in elliptic_ops: system_assembler.append(op) for func in rhs_functionals: system_assembler.append(func) system_assembler.append(l2_operator) system_assembler.walk() # wrap everything if isinstance(_lambda, dict): op = LincombOperator([ DuneXTMatrixOperator(o.matrix(), dof_communicator=space.dof_communicator) for o in system_ops ], _lambda['coefficients']) elliptic_op = LincombOperator( [DuneXTMatrixOperator(o.matrix()) for o in elliptic_ops], _lambda['coefficients']) else: op = DuneXTMatrixOperator(system_ops[0].matrix()) elliptic_op = DuneXTMatrixOperator(elliptic_ops[0].matrix()) if isinstance(f, dict): rhs = LincombOperator([ VectorFunctional(op.range.make_array([func.vector()])) for func in rhs_functionals ], f['coefficients']) else: rhs = VectorFunctional( op.range.make_array([rhs_functionals[0].vector()])) operators = { 'l2': DuneXTMatrixOperator(l2_matrix_with_system_pattern), 'elliptic': elliptic_op, 'elliptic_mu_bar': DuneXTMatrixOperator(elliptic_op.assemble(mu=mu_bar).matrix) } d = StationaryDiscretization(op, rhs, operators=operators, visualizer=DuneGDTVisualizer(space)) d = d.with_(parameter_space=CubicParameterSpace( d.parameter_type, parameter_range[0], parameter_range[1])) return d, {'space': space}
def discretize(grid_and_problem_data): logger = getLogger('discretize_elliptic.discretize_block_SWIPDG') logger.info('discretizing ... ') grid, boundary_info, inner_boundary_id = (grid_and_problem_data['grid'], grid_and_problem_data['boundary_info'], grid_and_problem_data['inner_boundary_id']) local_all_dirichlet_boundary_info = make_subdomain_boundary_info(grid, {'type': 'xt.grid.boundaryinfo.alldirichlet'}) local_all_neumann_boundary_info = make_subdomain_boundary_info(grid, {'type': 'xt.grid.boundaryinfo.allneumann'}) neighborhood_boundary_info = make_subdomain_boundary_info( grid, {'type': 'xt.grid.boundaryinfo.boundarysegmentindexbased', 'default': 'dirichlet', 'neumann': '[{} {}]'.format(inner_boundary_id, inner_boundary_id+1)}) affine_lambda, kappa, f = (grid_and_problem_data['lambda'], grid_and_problem_data['kappa'], grid_and_problem_data['f']) lambda_bar, lambda_hat = grid_and_problem_data['lambda_bar'], grid_and_problem_data['lambda_hat'] mu_bar, mu_hat, parameter_range = (grid_and_problem_data['mu_bar'], grid_and_problem_data['mu_hat'], grid_and_problem_data['parameter_range']) block_space = make_block_space(grid) local_patterns = [block_space.local_space(ii).compute_pattern('face_and_volume') for ii in range(block_space.num_blocks)] coupling_patterns_in_in = {} coupling_patterns_out_out = {} coupling_patterns_in_out = {} coupling_patterns_out_in = {} for ii in range(grid.num_subdomains): for jj in grid.neighboring_subdomains(ii): if ii < jj: # Assemble primally (visit each coupling only once). coupling_patterns_in_in[(ii, jj)] = block_space.local_space(ii).compute_pattern('face_and_volume') coupling_patterns_out_out[(ii, jj)] = block_space.local_space(jj).compute_pattern('face_and_volume') coupling_patterns_in_out[(ii, jj)] = block_space.compute_coupling_pattern(ii, jj, 'face') coupling_patterns_out_in[(ii, jj)] = block_space.compute_coupling_pattern(jj, ii, 'face') boundary_patterns = {} for ii in grid.boundary_subdomains(): boundary_patterns[ii] = block_space.local_space(ii).compute_pattern('face_and_volume') def discretize_lhs_for_lambda(lambda_): local_matrices = [None]*grid.num_subdomains local_vectors = [None]*grid.num_subdomains boundary_matrices = {} coupling_matrices_in_in = {} coupling_matrices_out_out = {} coupling_matrices_in_out = {} coupling_matrices_out_in = {} for ii in range(grid.num_subdomains): local_matrices[ii] = Matrix(block_space.local_space(ii).size(), block_space.local_space(ii).size(), local_patterns[ii]) local_vectors[ii] = Vector(block_space.local_space(ii).size()) if ii in grid.boundary_subdomains(): boundary_matrices[ii] = Matrix(block_space.local_space(ii).size(), block_space.local_space(ii).size(), boundary_patterns[ii]) for jj in grid.neighboring_subdomains(ii): if ii < jj: # Assemble primally (visit each coupling only once). coupling_matrices_in_in[(ii, jj)] = Matrix(block_space.local_space(ii).size(), block_space.local_space(ii).size(), coupling_patterns_in_in[(ii, jj)]) coupling_matrices_out_out[(ii, jj)] = Matrix(block_space.local_space(jj).size(), block_space.local_space(jj).size(), coupling_patterns_out_out[(ii, jj)]) coupling_matrices_in_out[(ii, jj)] = Matrix(block_space.local_space(ii).size(), block_space.local_space(jj).size(), coupling_patterns_in_out[(ii, jj)]) coupling_matrices_out_in[(ii, jj)] = Matrix(block_space.local_space(jj).size(), block_space.local_space(ii).size(), coupling_patterns_out_in[(ii, jj)]) def assemble_local_contributions(subdomain): ipdg_operator = make_elliptic_swipdg_matrix_operator(lambda_, kappa, local_all_neumann_boundary_info, local_matrices[subdomain], block_space.local_space(subdomain)) l2_functional = make_l2_volume_vector_functional(f, local_vectors[subdomain], block_space.local_space(subdomain)) local_assembler = make_system_assembler(block_space.local_space(subdomain)) local_assembler.append(ipdg_operator) local_assembler.append(l2_functional) local_assembler.assemble() for ii in range(grid.num_subdomains): assemble_local_contributions(ii) local_ipdg_coupling_operator = make_local_elliptic_swipdg_coupling_operator(lambda_, kappa) def assemble_coupling_contributions(subdomain, neighboring_subdomain): coupling_assembler = block_space.coupling_assembler(subdomain, neighboring_subdomain) coupling_assembler.append(local_ipdg_coupling_operator, coupling_matrices_in_in[(subdomain, neighboring_subdomain)], coupling_matrices_out_out[(subdomain, neighboring_subdomain)], coupling_matrices_in_out[(subdomain, neighboring_subdomain)], coupling_matrices_out_in[(subdomain, neighboring_subdomain)]) coupling_assembler.assemble() for ii in range(grid.num_subdomains): for jj in grid.neighboring_subdomains(ii): if ii < jj: # Assemble primally (visit each coupling only once). assemble_coupling_contributions(ii, jj) local_ipdg_boundary_operator = make_local_elliptic_swipdg_boundary_operator(lambda_, kappa) apply_on_dirichlet_intersections = make_apply_on_dirichlet_intersections(boundary_info) def assemble_boundary_contributions(subdomain): boundary_assembler = block_space.boundary_assembler(subdomain) boundary_assembler.append(local_ipdg_boundary_operator, boundary_matrices[subdomain], apply_on_dirichlet_intersections) boundary_assembler.assemble() for ii in grid.boundary_subdomains(): assemble_boundary_contributions(ii) global_pattern = SparsityPatternDefault(block_space.mapper.size) for ii in range(grid.num_subdomains): block_space.mapper.copy_local_to_global(local_patterns[ii], ii, global_pattern) if ii in grid.boundary_subdomains(): block_space.mapper.copy_local_to_global(boundary_patterns[ii], ii, global_pattern) for jj in grid.neighboring_subdomains(ii): if ii < jj: # Assemble primally (visit each coupling only once). block_space.mapper.copy_local_to_global(coupling_patterns_in_in[(ii, jj)], ii, ii, global_pattern) block_space.mapper.copy_local_to_global(coupling_patterns_out_out[(ii, jj)], jj, jj, global_pattern) block_space.mapper.copy_local_to_global(coupling_patterns_in_out[(ii, jj)], ii, jj, global_pattern) block_space.mapper.copy_local_to_global(coupling_patterns_out_in[(ii, jj)], jj, ii, global_pattern) system_matrix = Matrix(block_space.mapper.size, block_space.mapper.size, global_pattern) rhs_vector = Vector(block_space.mapper.size, 0.) for ii in range(grid.num_subdomains): block_space.mapper.copy_local_to_global(local_matrices[ii], local_patterns[ii], ii, system_matrix) block_space.mapper.copy_local_to_global(local_vectors[ii], ii, rhs_vector) if ii in grid.boundary_subdomains(): block_space.mapper.copy_local_to_global(boundary_matrices[ii], boundary_patterns[ii], ii, ii, system_matrix) for jj in grid.neighboring_subdomains(ii): if ii < jj: # Assemble primally (visit each coupling only once). block_space.mapper.copy_local_to_global(coupling_matrices_in_in[(ii, jj)], coupling_patterns_in_in[(ii, jj)], ii, ii, system_matrix) block_space.mapper.copy_local_to_global(coupling_matrices_out_out[(ii, jj)], coupling_patterns_out_out[(ii, jj)], jj, jj, system_matrix) block_space.mapper.copy_local_to_global(coupling_matrices_in_out[(ii, jj)], coupling_patterns_in_out[(ii, jj)], ii, jj, system_matrix) block_space.mapper.copy_local_to_global(coupling_matrices_out_in[(ii, jj)], coupling_patterns_out_in[(ii, jj)], jj, ii, system_matrix) op = DuneXTMatrixOperator(system_matrix) mats = np.full((grid.num_subdomains, grid.num_subdomains), None) for ii in range(grid.num_subdomains): for jj in range(ii, grid.num_subdomains): if ii == jj: mats[ii, ii] = Matrix(block_space.local_space(ii).size(), block_space.local_space(ii).size(), local_patterns[ii]) elif (ii, jj) in coupling_matrices_in_out: mats[ii, jj] = Matrix(block_space.local_space(ii).size(), block_space.local_space(jj).size(), coupling_patterns_in_out[(ii, jj)]) mats[jj, ii] = Matrix(block_space.local_space(jj).size(), block_space.local_space(ii).size(), coupling_patterns_out_in[(ii, jj)]) for ii in range(grid.num_subdomains): for jj in range(ii, grid.num_subdomains): if ii == jj: mats[ii, ii].axpy(1., local_matrices[ii]) if ii in boundary_matrices: mats[ii, ii].axpy(1., boundary_matrices[ii]) elif (ii, jj) in coupling_matrices_in_out: mats[ii, ii].axpy(1., coupling_matrices_in_in[(ii, jj)]) mats[jj, jj].axpy(1., coupling_matrices_out_out[(ii, jj)]) mats[ii, jj].axpy(1., coupling_matrices_in_out[(ii, jj)]) mats[jj, ii].axpy(1., coupling_matrices_out_in[(ii, jj)]) ops = np.full((grid.num_subdomains, grid.num_subdomains), None) for (ii, jj), mat in np.ndenumerate(mats): ops[ii, jj] = DuneXTMatrixOperator(mat, source_id='domain_{}'.format(jj), range_id='domain_{}'.format(ii)) if mat else None block_op = BlockOperator(ops) rhs = VectorFunctional(op.range.make_array([rhs_vector])) rhss = [] for ii in range(grid.num_subdomains): rhss.append(ops[ii, ii].range.make_array([local_vectors[ii]])) block_rhs = VectorFunctional(block_op.range.make_array(rhss)) return op, block_op, rhs, block_rhs ops, block_ops, rhss, block_rhss = zip(*(discretize_lhs_for_lambda(l) for l in affine_lambda['functions'])) rhs = rhss[0] block_rhs = block_rhss[0] lambda_coeffs = affine_lambda['coefficients'] op = LincombOperator(ops, lambda_coeffs) block_op = LincombOperator(block_ops, lambda_coeffs, name='lhs') operators = {'global_op': op, 'global_rhs': rhs} global_rt_space = make_rt_space(grid) def assemble_oswald_interpolation_error(): oi_ops = [OswaldInterpolationErrorOperator(ii, ii, ii, block_op.source, grid, block_space, global_rt_space, neighborhood_boundary_info, None, None, None, None) for ii in range(grid.num_subdomains)] return BlockDiagonalOperator(oi_ops, name='oswald_interpolation_error') oi_op = assemble_oswald_interpolation_error() def assemble_flux_reconstruction(lambda_xi): fr_ops = [FluxReconstructionOperator(ii, ii, ii, block_op.source, grid, block_space, global_rt_space, neighborhood_boundary_info, lambda_hat, lambda_xi, lambda_xi, kappa) for ii in range(grid.num_subdomains)] return BlockDiagonalOperator(fr_ops) fr_op = LincombOperator([assemble_flux_reconstruction(lambda_xi) for lambda_xi in affine_lambda['functions']], lambda_coeffs, name='flux_reconstruction') spaces = block_op.source.subspaces rt_spaces = fr_op.range.subspaces # assemble local products for ii in range(grid.num_subdomains): local_space = block_space.local_space(ii) # we want a larger pattern for the elliptic part, to allow for axpy with the penalty part tmp_local_matrix = Matrix(local_space.size(), local_space.size(), local_space.compute_pattern('face_and_volume')) local_product_ops = [] local_product_coeffs = [] for func, coeff in zip(affine_lambda['functions'], affine_lambda['coefficients']): local_product_ops.append(make_elliptic_matrix_operator( func, kappa, tmp_local_matrix.copy(), local_space, over_integrate=0)) local_product_coeffs.append(coeff) local_product_ops.append(make_penalty_product_matrix_operator( grid, ii, local_all_dirichlet_boundary_info, local_space, func, kappa, over_integrate=0)) local_product_coeffs.append(coeff) del tmp_local_matrix local_assembler = make_system_assembler(local_space) for local_product_op in local_product_ops: local_assembler.append(local_product_op) local_assembler.assemble() local_product_name = 'local_energy_dg_product_{}'.format(ii) local_product = LincombOperator([DuneXTMatrixOperator(op.matrix(), source_id='domain_{}'.format(ii), range_id='domain_{}'.format(ii)) for op in local_product_ops], local_product_coeffs, name=local_product_name) operators[local_product_name] = local_product.assemble(mu_bar).with_(name=local_product_name) # assemble error estimator for ii in range(grid.num_subdomains): neighborhood = grid.neighborhood_of(ii) def assemble_estimator_noconformity(): nc_ops = np.full((grid.num_subdomains,) * 2, None) for jj in neighborhood: for kk in neighborhood: nc_ops[jj, kk] = NonconformityOperator(ii, jj, kk, block_op.source, grid, block_space, global_rt_space, neighborhood_boundary_info, lambda_bar, None, None, kappa) return BlockOperator(nc_ops, range_spaces=oi_op.range.subspaces, source_spaces=oi_op.range.subspaces, name='nonconformity_{}'.format(ii)) def assemble_estimator_diffusive_flux_aa(lambda_xi, lambda_xi_prime): df_ops = np.full((grid.num_subdomains,) * 2, None) df_ops[ii, ii] = DiffusiveFluxOperatorAA(ii, ii, ii, block_op.source, grid, block_space, global_rt_space, neighborhood_boundary_info, lambda_hat, lambda_xi, lambda_xi_prime, kappa) return BlockOperator(df_ops, range_spaces=spaces, source_spaces=spaces) def assemble_estimator_diffusive_flux_bb(): df_ops = np.full((grid.num_subdomains,) * 2, None) for jj in neighborhood: for kk in neighborhood: df_ops[jj, kk] = DiffusiveFluxOperatorBB(ii, jj, kk, block_op.source, grid, block_space, global_rt_space, neighborhood_boundary_info, lambda_hat, None, None, kappa) return BlockOperator(df_ops, range_spaces=rt_spaces, source_spaces=rt_spaces, name='diffusive_flux_bb_{}'.format(ii)) def assemble_estimator_diffusive_flux_ab(lambda_xi): df_ops = np.full((grid.num_subdomains,) * 2, None) for kk in neighborhood: df_ops[ii, kk] = DiffusiveFluxOperatorAB(ii, ii, kk, block_op.source, grid, block_space, global_rt_space, neighborhood_boundary_info, lambda_hat, lambda_xi, None, kappa) return BlockOperator(df_ops, range_spaces=spaces, source_spaces=rt_spaces) def assemble_estimator_residual(): r2_ops = np.full((grid.num_subdomains,) * 2, None) for jj in neighborhood: for kk in neighborhood: r2_ops[jj, kk] = ResidualPartOperator(ii, jj, kk, block_op.source, grid, block_space, global_rt_space, neighborhood_boundary_info, lambda_hat, None, None, kappa) return BlockOperator(r2_ops, range_spaces=rt_spaces, source_spaces=rt_spaces, name='residual_{}'.format(ii)) def assemble_estimator_residual_functional(): r1_ops = np.full((1, grid.num_subdomains,), None) for jj in neighborhood: r1_ops[0, jj] = ResidualPartFunctional(f, ii, jj, jj, block_op.source, grid, block_space, global_rt_space, neighborhood_boundary_info, lambda_hat, None, None, kappa) return BlockOperator(r1_ops, source_spaces=rt_spaces, name='residual_functional_{}'.format(ii)) operators['nc_{}'.format(ii)] = assemble_estimator_noconformity() operators['r1_{}'.format(ii)] = assemble_estimator_residual_functional() operators['r2_{}'.format(ii)] = assemble_estimator_residual() operators['df_aa_{}'.format(ii)] = LincombOperator( [assemble_estimator_diffusive_flux_aa(lambda_xi, lambda_xi_prime) for lambda_xi in affine_lambda['functions'] for lambda_xi_prime in affine_lambda['functions']], [ProductParameterFunctional([c1, c2]) for c1 in lambda_coeffs for c2 in lambda_coeffs], name='diffusive_flux_aa_{}'.format(ii)) operators['df_bb_{}'.format(ii)] = assemble_estimator_diffusive_flux_bb() operators['df_ab_{}'.format(ii)] = LincombOperator( [assemble_estimator_diffusive_flux_ab(lambda_xi) for lambda_xi in affine_lambda['functions']], lambda_coeffs, name='diffusive_flux_ab_{}'.format(ii) ) min_diffusion_evs = np.array([min_diffusion_eigenvalue(grid, ii, lambda_hat, kappa) for ii in range(grid.num_subdomains)]) subdomain_diameters = np.array([subdomain_diameter(grid, ii) for ii in range(grid.num_subdomains)]) local_eta_rf_squared = np.array([apply_l2_product(grid, ii, f, f, over_integrate=2) for ii in range(grid.num_subdomains)]) estimator = Estimator(min_diffusion_evs, subdomain_diameters, local_eta_rf_squared, lambda_coeffs, mu_bar, mu_hat, fr_op, oi_op) neighborhoods = [grid.neighborhood_of(ii) for ii in range(grid.num_subdomains)] local_boundary_info = make_subdomain_boundary_info(grid_and_problem_data['grid'], {'type': 'xt.grid.boundaryinfo.alldirichlet'}) d = DuneDiscretization(block_op, block_rhs, neighborhoods, (grid, local_boundary_info, affine_lambda, kappa, f, block_space), visualizer=DuneGDTVisualizer(block_space), operators=operators, estimator=estimator) d = d.with_(parameter_space=CubicParameterSpace(d.parameter_type, parameter_range[0], parameter_range[1])) return d, block_space
def discretize(grid_and_problem_data, T, nt): d, d_data = discretize_ell(grid_and_problem_data) assert isinstance(d.parameter_space, CubicParameterSpace) parameter_range = grid_and_problem_data['parameter_range'] block_space = d_data['block_space'] # assemble global L2 product l2_mat = d.global_operator.operators[0].matrix.copy( ) # to ensure matching pattern l2_mat.scal(0.) for ii in range(block_space.num_blocks): local_l2_product = d.l2_product._blocks[ii, ii] block_space.mapper.copy_local_to_global( local_l2_product.matrix, local_l2_product.matrix.pattern(), ii, l2_mat) mass = d.l2_product operators = { k: v for k, v in d.operators.items() if k not in d.special_operators } global_mass = DuneXTMatrixOperator(l2_mat) local_div_ops, local_l2_products, local_projections, local_rt_projections = \ d_data['local_div_ops'], d_data['local_l2_products'], d_data['local_projections'], d_data['local_rt_projections'] for ii in range(d_data['grid'].num_subdomains): local_div = Concatenation( [local_div_ops[ii], local_rt_projections[ii]]) operators['r_ud_{}'.format(ii)] = \ Concatenation([local_projections[ii].T, local_l2_products[ii], local_div], name='r_ud_{}'.format(ii)) operators['r_l2_{}'.format(ii)] = \ Concatenation([local_projections[ii].T, local_l2_products[ii], local_projections[ii]], name='r_l2_{}'.format(ii)) e = d.estimator estimator = ParabolicEstimator(e.min_diffusion_evs, e.subdomain_diameters, e.local_eta_rf_squared, e.lambda_coeffs, e.mu_bar, e.mu_hat, e.flux_reconstruction, e.oswald_interpolation_error) d = InstationaryDuneDiscretization( d.global_operator, d.global_rhs, global_mass, T, d.operator.source.zeros(1), d.operator, d.rhs, mass=mass, time_stepper=ImplicitEulerTimeStepper(nt=nt, solver_options='operator'), products=d.products, operators=operators, estimator=estimator, visualizer=DuneGDTVisualizer(block_space)) d = d.with_(parameter_space=CubicParameterSpace( d.parameter_type, parameter_range[0], parameter_range[1])) return d, d_data