def discretize_rhs(f_func, grid, block_space, global_operator, block_ops, block_op): logger = getLogger('discretizing_rhs') logger.debug('...') local_subdomains, num_local_subdomains, num_global_subdomains = _get_subdomains( grid) local_vectors = [None] * num_global_subdomains rhs_vector = Vector(block_space.mapper.size, 0.) for ii in range(num_global_subdomains): local_vectors[ii] = Vector(block_space.local_space(ii).size()) l2_functional = make_l2_volume_vector_functional( f_func, local_vectors[ii], block_space.local_space(ii), over_integrate=2) l2_functional.assemble() block_space.mapper.copy_local_to_global(local_vectors[ii], ii, rhs_vector) rhs = VectorFunctional(global_operator.range.make_array([rhs_vector])) rhss = [] for ii in range(num_global_subdomains): rhss.append(block_ops[0]._blocks[ii, ii].range.make_array( [local_vectors[ii]])) block_rhs = VectorFunctional(block_op.range.make_array(rhss)) return rhs, block_rhs
def thermalblock_vectorfunc_factory(product, xblocks, yblocks, diameter, seed): from pymor.operators.constructions import VectorFunctional _, _, U, V, sp, rp = thermalblock_factory(xblocks, yblocks, diameter, seed) op = VectorFunctional(U.copy(ind=0), product=sp if product else None) U = V V = NumpyVectorArray(np.random.random((7, 1)), copy=False) sp = rp rp = NumpyMatrixOperator(np.eye(1) * 2) return op, None, U, V, sp, rp
def action_NumpyMatrixOperator(self, op): vector = op.source.dim == 1 functional = op.range.dim == 1 if vector and functional: raise NotImplementedError if vector: space = NumpyListVectorSpace(op.range.dim, op.range.id) return VectorOperator(space.from_numpy(op.matrix.reshape((1, -1))), op.name) elif functional: space = NumpyListVectorSpace(op.source.dim, op.source.id) return VectorFunctional(space.from_numpy(op.matrix.ravel()), op.name) else: return op.with_(new_type=NumpyListVectorArrayMatrixOperator)
def discretize(n, nt, blocks): h = 1. / blocks ops = [ WrappedDiffusionOperator.create(n, h * i, h * (i + 1)) for i in range(blocks) ] pfs = [ ProjectionParameterFunctional('diffusion_coefficients', (blocks, ), (i, )) for i in range(blocks) ] operator = LincombOperator(ops, pfs) initial_data = operator.source.zeros() # use data property of WrappedVector to setup rhs # note that we cannot use the data property of ListVectorArray, # since ListVectorArray will always return a copy rhs_vec = operator.range.zeros() rhs_data = rhs_vec._list[0].data rhs_data[:] = np.ones(len(rhs_data)) rhs_data[0] = 0 rhs_data[len(rhs_data) - 1] = 0 rhs = VectorFunctional(rhs_vec) # hack together a visualizer ... grid = OnedGrid(domain=(0, 1), num_intervals=n) visualizer = Matplotlib1DVisualizer(grid) time_stepper = ExplicitEulerTimeStepper(nt) parameter_space = CubicParameterSpace(operator.parameter_type, 0.1, 1) d = InstationaryDiscretization(T=1e-0, operator=operator, rhs=rhs, initial_data=initial_data, time_stepper=time_stepper, num_values=20, parameter_space=parameter_space, visualizer=visualizer, name='C++-Discretization', cache_region=None) return d
def discretize(grid_and_problem_data, solver_options, mpi_comm): ################ Setup logger = getLogger('discretize_elliptic_block_swipdg.discretize') logger.info('discretizing ... ') grid, boundary_info = grid_and_problem_data['grid'], grid_and_problem_data[ 'boundary_info'] local_all_dirichlet_boundary_info = make_subdomain_boundary_info( grid, {'type': 'xt.grid.boundaryinfo.alldirichlet'}) local_subdomains, num_local_subdomains, num_global_subdomains = _get_subdomains( grid) local_all_neumann_boundary_info = make_subdomain_boundary_info( grid, {'type': 'xt.grid.boundaryinfo.allneumann'}) block_space = make_block_dg_space(grid) global_rt_space = make_rt_space(grid) subdomain_rt_spaces = [ global_rt_space.restrict_to_dd_subdomain_view(grid, ii) for ii in range(num_global_subdomains) ] local_patterns = [ block_space.local_space(ii).compute_pattern('face_and_volume') for ii in range(block_space.num_blocks) ] coupling_patterns = { 'in_in': {}, 'out_out': {}, 'in_out': {}, 'out_in': {} } coupling_matrices = { 'in_in': {}, 'out_out': {}, 'in_out': {}, 'out_in': {} } for ii in range(num_global_subdomains): ii_size = block_space.local_space(ii).size() for jj in grid.neighboring_subdomains(ii): jj_size = block_space.local_space(jj).size() if ii < jj: # Assemble primally (visit each coupling only once). coupling_patterns['in_in'][(ii, jj)] = block_space.local_space( ii).compute_pattern('face_and_volume') coupling_patterns['out_out'][( ii, jj)] = block_space.local_space(jj).compute_pattern( 'face_and_volume') coupling_patterns['in_out'][( ii, jj)] = block_space.compute_coupling_pattern( ii, jj, 'face') coupling_patterns['out_in'][( ii, jj)] = block_space.compute_coupling_pattern( jj, ii, 'face') coupling_matrices['in_in'][(ii, jj)] = Matrix( ii_size, ii_size, coupling_patterns['in_in'][(ii, jj)]) coupling_matrices['out_out'][(ii, jj)] = Matrix( jj_size, jj_size, coupling_patterns['out_out'][(ii, jj)]) coupling_matrices['in_out'][(ii, jj)] = Matrix( ii_size, jj_size, coupling_patterns['in_out'][(ii, jj)]) coupling_matrices['out_in'][(ii, jj)] = Matrix( jj_size, ii_size, coupling_patterns['out_in'][(ii, jj)]) boundary_patterns = {} for ii in grid.boundary_subdomains(): boundary_patterns[ii] = block_space.local_space(ii).compute_pattern( 'face_and_volume') ################ Assemble LHS and RHS lambda_, kappa = grid_and_problem_data['lambda'], grid_and_problem_data[ 'kappa'] if isinstance(lambda_, dict): lambda_funcs = lambda_['functions'] lambda_coeffs = lambda_['coefficients'] else: lambda_funcs = [ lambda_, ] lambda_coeffs = [ 1, ] logger.debug('block op ... ') ops, block_ops = zip(*(discretize_lhs( lf, grid, block_space, local_patterns, boundary_patterns, coupling_matrices, kappa, local_all_neumann_boundary_info, boundary_info, coupling_patterns, solver_options) for lf in lambda_funcs)) global_operator = LincombOperator(ops, lambda_coeffs, solver_options=solver_options, name='GlobalOperator') logger.debug('block op global done ') block_op = LincombOperator(block_ops, lambda_coeffs, name='lhs', solver_options=solver_options) logger.debug('block op done ') f = grid_and_problem_data['f'] if isinstance(f, dict): f_funcs = f['functions'] f_coeffs = f['coefficients'] else: f_funcs = [ f, ] f_coeffs = [ 1, ] rhss, block_rhss = zip(*(discretize_rhs( ff, grid, block_space, global_operator, block_ops, block_op) for ff in f_funcs)) global_rhs = LincombOperator(rhss, f_coeffs) block_rhs = LincombOperator(block_rhss, f_coeffs) solution_space = block_op.source ################ Assemble interpolation and reconstruction operators logger.info('discretizing interpolation ') # Oswald interpolation error operator oi_op = BlockDiagonalOperator([ OswaldInterpolationErrorOperator(ii, block_op.source, grid, block_space) for ii in range(num_global_subdomains) ], name='oswald_interpolation_error') # Flux reconstruction operator fr_op = LincombOperator([ BlockDiagonalOperator([ FluxReconstructionOperator(ii, block_op.source, grid, block_space, global_rt_space, subdomain_rt_spaces, lambda_xi, kappa) for ii in range(num_global_subdomains) ]) for lambda_xi in lambda_funcs ], lambda_coeffs, name='flux_reconstruction') ################ Assemble inner products and error estimator operators logger.info('discretizing inner products ') lambda_bar, lambda_hat = grid_and_problem_data[ 'lambda_bar'], grid_and_problem_data['lambda_hat'] mu_bar, mu_hat = grid_and_problem_data['mu_bar'], grid_and_problem_data[ 'mu_hat'] operators = {} local_projections = [] local_rt_projections = [] local_oi_projections = [] local_div_ops = [] local_l2_products = [] data = dict(grid=grid, block_space=block_space, local_projections=local_projections, local_rt_projections=local_rt_projections, local_oi_projections=local_oi_projections, local_div_ops=local_div_ops, local_l2_products=local_l2_products) for ii in range(num_global_subdomains): neighborhood = grid.neighborhood_of(ii) ################ Assemble local inner products local_dg_space = block_space.local_space(ii) # we want a larger pattern to allow for axpy with other matrices tmp_local_matrix = Matrix( local_dg_space.size(), local_dg_space.size(), local_dg_space.compute_pattern('face_and_volume')) local_energy_product_ops = [] local_energy_product_coeffs = [] for func, coeff in zip(lambda_funcs, lambda_coeffs): local_energy_product_ops.append( make_elliptic_matrix_operator(func, kappa, tmp_local_matrix.copy(), local_dg_space, over_integrate=0)) local_energy_product_coeffs.append(coeff) local_energy_product_ops.append( make_penalty_product_matrix_operator( grid, ii, local_all_dirichlet_boundary_info, local_dg_space, func, kappa, over_integrate=0)) local_energy_product_coeffs.append(coeff) local_l2_product = make_l2_matrix_operator(tmp_local_matrix.copy(), local_dg_space) del tmp_local_matrix local_assembler = make_system_assembler(local_dg_space) for local_product_op in local_energy_product_ops: local_assembler.append(local_product_op) local_assembler.append(local_l2_product) local_assembler.assemble() local_energy_product_name = 'local_energy_dg_product_{}'.format(ii) local_energy_product = LincombOperator([ DuneXTMatrixOperator(op.matrix(), source_id='domain_{}'.format(ii), range_id='domain_{}'.format(ii)) for op in local_energy_product_ops ], local_energy_product_coeffs, name=local_energy_product_name) operators[local_energy_product_name] = \ local_energy_product.assemble(mu_bar).with_(name=local_energy_product_name) local_l2_product = DuneXTMatrixOperator( local_l2_product.matrix(), source_id='domain_{}'.format(ii), range_id='domain_{}'.format(ii)) local_l2_products.append(local_l2_product) # assemble local elliptic product matrix = make_local_elliptic_matrix_operator(grid, ii, local_dg_space, lambda_bar, kappa) matrix.assemble() local_elliptic_product = DuneXTMatrixOperator( matrix.matrix(), range_id='domain_{}'.format(ii), source_id='domain_{}'.format(ii)) ################ Assemble local to global projections # assemble projection (solution space) -> (ii space) local_projection = BlockProjectionOperator(block_op.source, ii) local_projections.append(local_projection) # assemble projection (RT spaces on neighborhoods of subdomains) -> (local RT space on ii) ops = np.full(num_global_subdomains, None) for kk in neighborhood: component = grid.neighborhood_of(kk).index(ii) assert fr_op.range.subspaces[kk].subspaces[ component].id == 'LOCALRT_{}'.format(ii) ops[kk] = BlockProjectionOperator(fr_op.range.subspaces[kk], component) local_rt_projection = BlockRowOperator( ops, source_spaces=fr_op.range.subspaces, name='local_rt_projection_{}'.format(ii)) local_rt_projections.append(local_rt_projection) # assemble projection (OI spaces on neighborhoods of subdomains) -> (ii space) ops = np.full(num_global_subdomains, None) for kk in neighborhood: component = grid.neighborhood_of(kk).index(ii) assert oi_op.range.subspaces[kk].subspaces[ component].id == 'domain_{}'.format(ii) ops[kk] = BlockProjectionOperator(oi_op.range.subspaces[kk], component) local_oi_projection = BlockRowOperator( ops, source_spaces=oi_op.range.subspaces, name='local_oi_projection_{}'.format(ii)) local_oi_projections.append(local_oi_projection) ################ Assemble additional operators for error estimation # assemble local divergence operator local_rt_space = global_rt_space.restrict_to_dd_subdomain_view( grid, ii) local_div_op = make_divergence_matrix_operator_on_subdomain( grid, ii, local_dg_space, local_rt_space) local_div_op.assemble() local_div_op = DuneXTMatrixOperator( local_div_op.matrix(), source_id='LOCALRT_{}'.format(ii), range_id='domain_{}'.format(ii), name='local_divergence_{}'.format(ii)) local_div_ops.append(local_div_op) ################ Assemble error estimator operators -- Nonconformity operators['nc_{}'.format(ii)] = \ Concatenation([local_oi_projection.T, local_elliptic_product, local_oi_projection], name='nonconformity_{}'.format(ii)) ################ Assemble error estimator operators -- Residual if len(f_funcs) == 1: assert f_coeffs[0] == 1 local_div = Concatenation([local_div_op, local_rt_projection]) local_rhs = VectorFunctional( block_rhs.operators[0]._array._blocks[ii]) operators['r_fd_{}'.format(ii)] = \ Concatenation([local_rhs, local_div], name='r1_{}'.format(ii)) operators['r_dd_{}'.format(ii)] = \ Concatenation([local_div.T, local_l2_product, local_div], name='r2_{}'.format(ii)) ################ Assemble error estimator operators -- Diffusive flux operators['df_aa_{}'.format(ii)] = LincombOperator( [ assemble_estimator_diffusive_flux_aa( lambda_xi, lambda_xi_prime, grid, ii, block_space, lambda_hat, kappa, solution_space) for lambda_xi in lambda_funcs for lambda_xi_prime in lambda_funcs ], [ ProductParameterFunctional([c1, c2]) for c1 in lambda_coeffs for c2 in lambda_coeffs ], name='diffusive_flux_aa_{}'.format(ii)) operators['df_bb_{}'.format( ii)] = assemble_estimator_diffusive_flux_bb( grid, ii, subdomain_rt_spaces, lambda_hat, kappa, local_rt_projection) operators['df_ab_{}'.format(ii)] = LincombOperator( [ assemble_estimator_diffusive_flux_ab( lambda_xi, grid, ii, block_space, subdomain_rt_spaces, lambda_hat, kappa, local_rt_projection, local_projection) for lambda_xi in lambda_funcs ], lambda_coeffs, name='diffusive_flux_ab_{}'.format(ii)) ################ Final assembly logger.info('final assembly ') # instantiate error estimator min_diffusion_evs = np.array([ min_diffusion_eigenvalue(grid, ii, lambda_hat, kappa) for ii in range(num_global_subdomains) ]) subdomain_diameters = np.array( [subdomain_diameter(grid, ii) for ii in range(num_global_subdomains)]) if len(f_funcs) == 1: assert f_coeffs[0] == 1 local_eta_rf_squared = np.array([ apply_l2_product(grid, ii, f_funcs[0], f_funcs[0], over_integrate=2) for ii in range(num_global_subdomains) ]) else: local_eta_rf_squared = None estimator = EllipticEstimator(grid, min_diffusion_evs, subdomain_diameters, local_eta_rf_squared, lambda_coeffs, mu_bar, mu_hat, fr_op, oswald_interpolation_error=oi_op, mpi_comm=mpi_comm) l2_product = BlockDiagonalOperator(local_l2_products) # instantiate discretization neighborhoods = [ grid.neighborhood_of(ii) for ii in range(num_global_subdomains) ] local_boundary_info = make_subdomain_boundary_info( grid_and_problem_data['grid'], {'type': 'xt.grid.boundaryinfo.alldirichlet'}) d = DuneDiscretization(global_operator=global_operator, global_rhs=global_rhs, neighborhoods=neighborhoods, enrichment_data=(grid, local_boundary_info, lambda_, kappa, f, block_space), operator=block_op, rhs=block_rhs, visualizer=DuneGDTVisualizer(block_space), operators=operators, products={'l2': l2_product}, estimator=estimator, data=data) parameter_range = grid_and_problem_data['parameter_range'] logger.info('final assembly B') d = d.with_(parameter_space=CubicParameterSpace( d.parameter_type, parameter_range[0], parameter_range[1])) logger.info('final assembly C') return d, data
def solve_for_local_correction(self, subdomain, Us, mu=None, inverse_options=None): grid, local_boundary_info, affine_lambda, kappa, f, block_space = self.enrichment_data neighborhood = self.neighborhoods[subdomain] neighborhood_space = block_space.restricted_to_neighborhood( neighborhood) # Compute current solution restricted to the neighborhood to be usable as Dirichlet values for the correction # problem. current_solution = [U._list for U in Us] assert np.all(len(v) == 1 for v in current_solution) current_solution = [v[0].impl for v in current_solution] current_solution = neighborhood_space.project_onto_neighborhood( current_solution, neighborhood) current_solution = make_discrete_function(neighborhood_space, current_solution) # Solve the local corrector problem. # LHS ops = [] for lambda_ in affine_lambda['functions']: ops.append( make_elliptic_swipdg_matrix_operator_on_neighborhood( grid, subdomain, local_boundary_info, neighborhood_space, lambda_, kappa, over_integrate=0)) ops_coeffs = affine_lambda['coefficients'].copy() # RHS funcs = [] # We don't have any boundary treatment right now. Things will probably # break in multiple ways in case of non-trivial boundary conditions, # so we can comment this out for now .. # for lambda_ in affine_lambda['functions']: # funcs.append(make_elliptic_swipdg_vector_functional_on_neighborhood( # grid, subdomain, local_boundary_info, # neighborhood_space, # current_solution, lambda_, kappa, # over_integrate=0)) # funcs_coeffs = affine_lambda['coefficients'].copy() funcs.append( make_l2_vector_functional_on_neighborhood(grid, subdomain, neighborhood_space, f, over_integrate=2)) # funcs_coeffs.append(1.) funcs_coeffs = [1] # assemble in one grid walk neighborhood_assembler = make_neighborhood_system_assembler( grid, subdomain, neighborhood_space) for op in ops: neighborhood_assembler.append(op) for func in funcs: neighborhood_assembler.append(func) neighborhood_assembler.assemble() # solve local_space_id = self.solution_space.subspaces[subdomain].id lhs = LincombOperator([ DuneXTMatrixOperator( o.matrix(), source_id=local_space_id, range_id=local_space_id) for o in ops ], ops_coeffs) rhs = LincombOperator([ VectorFunctional(lhs.range.make_array([v.vector()])) for v in funcs ], funcs_coeffs) correction = lhs.apply_inverse(rhs.as_source_array(mu), mu=mu, inverse_options=inverse_options) assert len(correction) == 1 # restrict to subdomain local_sizes = [ block_space.local_space(nn).size() for nn in neighborhood ] local_starts = [ int(np.sum(local_sizes[:nn])) for nn in range(len(local_sizes)) ] local_starts.append(neighborhood_space.mapper.size) localized_corrections_as_np = np.array(correction._list[0].impl, copy=False) localized_corrections_as_np = [ localized_corrections_as_np[local_starts[nn]:local_starts[nn + 1]] for nn in range(len(local_sizes)) ] subdomain_index_in_neighborhood = np.where( np.array(list(neighborhood)) == subdomain)[0] assert len(subdomain_index_in_neighborhood) == 1 subdomain_index_in_neighborhood = subdomain_index_in_neighborhood[0] subdomain_correction = Vector( local_sizes[subdomain_index_in_neighborhood], 0.) subdomain_correction_as_np = np.array(subdomain_correction, copy=False) subdomain_correction_as_np[:] = localized_corrections_as_np[ subdomain_index_in_neighborhood][:] return self.solution_space.subspaces[subdomain].make_array( [subdomain_correction])
def discretize(grid_and_problem_data, polorder=1, solver_options=None): logger = getLogger('discretize_elliptic_swipdg.discretize') logger.info('discretizing ... ') over_integrate = 2 grid, boundary_info = grid_and_problem_data['grid'], grid_and_problem_data[ 'boundary_info'] _lambda, kappa, f = (grid_and_problem_data['lambda'], grid_and_problem_data['kappa'], grid_and_problem_data['f']) lambda_bar, lambda_bar = grid_and_problem_data[ 'lambda_bar'], grid_and_problem_data['lambda_bar'] mu_bar, mu_hat, parameter_range = ( grid_and_problem_data['mu_bar'], grid_and_problem_data['mu_hat'], grid_and_problem_data['parameter_range']) space = make_dg_space(grid) # prepare operators and functionals if isinstance(_lambda, dict): system_ops = [ make_elliptic_swipdg_matrix_operator(lambda_func, kappa, boundary_info, space, over_integrate) for lambda_func in _lambda['functions'] ] elliptic_ops = [ make_elliptic_matrix_operator(lambda_func, kappa, space, over_integrate) for lambda_func in _lambda['functions'] ] else: system_ops = [ make_elliptic_swipdg_matrix_operator(_lambda, kappa, boundary_info, space, over_integrate), ] elliptic_ops = [ make_elliptic_matrix_operator(_lambda, kappa, space, over_integrate), ] if isinstance(f, dict): rhs_functionals = [ make_l2_volume_vector_functional(f_func, space, over_integrate) for f_func in f['functions'] ] else: rhs_functionals = [ make_l2_volume_vector_functional(f, space, over_integrate), ] l2_matrix_with_system_pattern = system_ops[0].matrix().copy() l2_operator = make_l2_matrix_operator(l2_matrix_with_system_pattern, space) # assemble everything in one grid walk system_assembler = make_system_assembler(space) for op in system_ops: system_assembler.append(op) for op in elliptic_ops: system_assembler.append(op) for func in rhs_functionals: system_assembler.append(func) system_assembler.append(l2_operator) system_assembler.walk() # wrap everything if isinstance(_lambda, dict): op = LincombOperator([ DuneXTMatrixOperator(o.matrix(), dof_communicator=space.dof_communicator) for o in system_ops ], _lambda['coefficients']) elliptic_op = LincombOperator( [DuneXTMatrixOperator(o.matrix()) for o in elliptic_ops], _lambda['coefficients']) else: op = DuneXTMatrixOperator(system_ops[0].matrix()) elliptic_op = DuneXTMatrixOperator(elliptic_ops[0].matrix()) if isinstance(f, dict): rhs = LincombOperator([ VectorFunctional(op.range.make_array([func.vector()])) for func in rhs_functionals ], f['coefficients']) else: rhs = VectorFunctional( op.range.make_array([rhs_functionals[0].vector()])) operators = { 'l2': DuneXTMatrixOperator(l2_matrix_with_system_pattern), 'elliptic': elliptic_op, 'elliptic_mu_bar': DuneXTMatrixOperator(elliptic_op.assemble(mu=mu_bar).matrix) } d = StationaryDiscretization(op, rhs, operators=operators, visualizer=DuneGDTVisualizer(space)) d = d.with_(parameter_space=CubicParameterSpace( d.parameter_type, parameter_range[0], parameter_range[1])) return d, {'space': space}
def _discretize_fenics(xblocks, yblocks, grid_num_intervals, element_order): # assemble system matrices - FEniCS code ######################################## import dolfin as df mesh = df.UnitSquareMesh(grid_num_intervals, grid_num_intervals, 'crossed') V = df.FunctionSpace(mesh, 'Lagrange', element_order) u = df.TrialFunction(V) v = df.TestFunction(V) diffusion = df.Expression( '(lower0 <= x[0]) * (open0 ? (x[0] < upper0) : (x[0] <= upper0)) *' + '(lower1 <= x[1]) * (open1 ? (x[1] < upper1) : (x[1] <= upper1))', lower0=0., upper0=0., open0=0, lower1=0., upper1=0., open1=0, element=df.FunctionSpace(mesh, 'DG', 0).ufl_element()) def assemble_matrix(x, y, nx, ny): diffusion.user_parameters['lower0'] = x / nx diffusion.user_parameters['lower1'] = y / ny diffusion.user_parameters['upper0'] = (x + 1) / nx diffusion.user_parameters['upper1'] = (y + 1) / ny diffusion.user_parameters['open0'] = (x + 1 == nx) diffusion.user_parameters['open1'] = (y + 1 == ny) return df.assemble( df.inner(diffusion * df.nabla_grad(u), df.nabla_grad(v)) * df.dx) mats = [ assemble_matrix(x, y, xblocks, yblocks) for x in range(xblocks) for y in range(yblocks) ] mat0 = mats[0].copy() mat0.zero() h1_mat = df.assemble(df.inner(df.nabla_grad(u), df.nabla_grad(v)) * df.dx) l2_mat = df.assemble(u * v * df.dx) f = df.Constant(1.) * v * df.dx F = df.assemble(f) bc = df.DirichletBC(V, 0., df.DomainBoundary()) for m in mats: bc.zero(m) bc.apply(mat0) bc.apply(h1_mat) bc.apply(F) # wrap everything as a pyMOR discretization ########################################### # FEniCS wrappers from pymor.gui.fenics import FenicsVisualizer from pymor.operators.fenics import FenicsMatrixOperator from pymor.vectorarrays.fenics import FenicsVector # generic pyMOR classes from pymor.discretizations.basic import StationaryDiscretization from pymor.operators.constructions import LincombOperator, VectorFunctional from pymor.parameters.functionals import ProjectionParameterFunctional from pymor.parameters.spaces import CubicParameterSpace from pymor.vectorarrays.list import ListVectorArray # define parameter functionals (same as in pymor.analyticalproblems.thermalblock) def parameter_functional_factory(x, y): return ProjectionParameterFunctional( component_name='diffusion', component_shape=(yblocks, xblocks), coordinates=(yblocks - y - 1, x), name='diffusion_{}_{}'.format(x, y)) parameter_functionals = tuple( parameter_functional_factory(x, y) for x in range(xblocks) for y in range(yblocks)) # wrap operators ops = [FenicsMatrixOperator(mat0, V, V) ] + [FenicsMatrixOperator(m, V, V) for m in mats] op = LincombOperator(ops, (1., ) + parameter_functionals) rhs = VectorFunctional(ListVectorArray([FenicsVector(F, V)])) h1_product = FenicsMatrixOperator(h1_mat, V, V, name='h1_0_semi') l2_product = FenicsMatrixOperator(l2_mat, V, V, name='l2') # build discretization visualizer = FenicsVisualizer(V) parameter_space = CubicParameterSpace(op.parameter_type, 0.1, 1.) d = StationaryDiscretization(op, rhs, products={ 'h1_0_semi': h1_product, 'l2': l2_product }, parameter_space=parameter_space, visualizer=visualizer) return d