def discretize_rhs(f_func, grid, block_space, global_operator, block_ops, block_op): logger = getLogger('discretizing_rhs') logger.debug('...') local_subdomains, num_local_subdomains, num_global_subdomains = _get_subdomains( grid) local_vectors = [None] * num_global_subdomains rhs_vector = Vector(block_space.mapper.size, 0.) for ii in range(num_global_subdomains): local_vectors[ii] = Vector(block_space.local_space(ii).size()) l2_functional = make_l2_volume_vector_functional( f_func, local_vectors[ii], block_space.local_space(ii), over_integrate=2) l2_functional.assemble() block_space.mapper.copy_local_to_global(local_vectors[ii], ii, rhs_vector) rhs = VectorFunctional(global_operator.range.make_array([rhs_vector])) rhss = [] for ii in range(num_global_subdomains): rhss.append(block_ops[0]._blocks[ii, ii].range.make_array( [local_vectors[ii]])) block_rhs = VectorFunctional(block_op.range.make_array(rhss)) return rhs, block_rhs
def apply(self, U, mu=None): assert U in self.source result = self.range.empty(reserve=len(U)) local_subdomains, num_local_subdomains, num_global_subdomains = _get_subdomains( self.grid) for u_i in range(len(U)): subdomain_uhs_with_global_support = \ make_discrete_function( self.block_space, self.block_space.project_onto_neighborhood( [U._list[u_i].impl if nn == self.subdomain else Vector(self.block_space.local_space(nn).size(), 0.) for nn in range(num_global_subdomains)], [nn for nn in range(num_global_subdomains)] ) ) reconstructed_uh_kk_with_global_support = make_discrete_function( self.global_rt_space) apply_diffusive_flux_reconstruction_in_neighborhood( self.grid, self.subdomain, self.lambda_xi, self.kappa, subdomain_uhs_with_global_support, reconstructed_uh_kk_with_global_support) blocks = [ s.make_array([ self.subdomain_rt_spaces[ii].restrict( reconstructed_uh_kk_with_global_support.vector_copy()) ]) # NOQA for s, ii in zip(self.range.subspaces, self.grid.neighborhood_of(self.subdomain)) ] result.append(self.range.make_array(blocks)) return result
def apply(self, U, mu=None): assert U in self.source results = self.range.empty(reserve=len(U)) for u_i in range(len(U)): result = self.range.zeros() result._blocks[self.neighborhood.index(self.subdomain)].axpy( 1, U[u_i]) for i_ii, ii in enumerate(self.neighborhood): ii_neighborhood = self.grid.neighborhood_of(ii) ii_neighborhood_space = self.block_space.restricted_to_neighborhood( ii_neighborhood) subdomain_uh_with_neighborhood_support = make_discrete_function( ii_neighborhood_space, ii_neighborhood_space.project_onto_neighborhood([ U._list[u_i].impl if nn == self.subdomain else Vector( self.block_space.local_space(nn).size(), 0.) for nn in ii_neighborhood ], ii_neighborhood)) interpolated_u_vector = ii_neighborhood_space.project_onto_neighborhood( [ Vector(self.block_space.local_space(nn).size(), 0.) for nn in ii_neighborhood ], ii_neighborhood) interpolated_u = make_discrete_function( ii_neighborhood_space, interpolated_u_vector) apply_oswald_interpolation_operator( self.grid, ii, make_subdomain_boundary_info( self.grid, {'type': 'xt.grid.boundaryinfo.alldirichlet'}), subdomain_uh_with_neighborhood_support, interpolated_u) local_sizes = np.array([ ii_neighborhood_space.local_space(nn).size() for nn in ii_neighborhood ]) offsets = np.hstack(([0], np.cumsum(local_sizes))) ind = ii_neighborhood.index(ii) result._blocks[i_ii]._list[0].data[:] -= \ np.frombuffer(interpolated_u_vector)[offsets[ind]:offsets[ind+1]] results.append(result) return results
def localize_to_subdomain_with_global_support(self, U, ss): assert len(U) == 1 return make_discrete_function( self.block_space, self.block_space.project_onto_neighborhood( [U._list[0].impl if nn == ss else Vector(self.block_space.local_space(nn).size(), 0.) for nn in range(self.grid.num_subdomains)], [nn for nn in range(self.grid.num_subdomains)] ) )
def shape_functions(self, subdomain, order=0): assert 0 <= order <= 1 local_space = self.solution_space.subspaces[subdomain] U = local_space.make_array([Vector(local_space.dim, 1.)]) if order == 1: from dune.gdt import make_discrete_function, project dune_local_space = self.visualizer.space.local_space(subdomain) tmp_discrete_function = make_discrete_function(dune_local_space) for expression in ('x[0]', 'x[1]', 'x[0]*x[1]'): func = make_expression_function_1x1(grid, 'x', expression, order=2) project(func, tmp_discrete_function) U.append(local_space.make_array([tmp_discrete_function.vector_copy()])) return U
def solve_for_local_correction(self, subdomain, Us, mu=None, inverse_options=None): grid, local_boundary_info, affine_lambda, kappa, f, block_space = self.enrichment_data neighborhood = self.neighborhoods[subdomain] neighborhood_space = block_space.restricted_to_neighborhood( neighborhood) # Compute current solution restricted to the neighborhood to be usable as Dirichlet values for the correction # problem. current_solution = [U._list for U in Us] assert np.all(len(v) == 1 for v in current_solution) current_solution = [v[0].impl for v in current_solution] current_solution = neighborhood_space.project_onto_neighborhood( current_solution, neighborhood) current_solution = make_discrete_function(neighborhood_space, current_solution) # Solve the local corrector problem. # LHS ops = [] for lambda_ in affine_lambda['functions']: ops.append( make_elliptic_swipdg_matrix_operator_on_neighborhood( grid, subdomain, local_boundary_info, neighborhood_space, lambda_, kappa, over_integrate=0)) ops_coeffs = affine_lambda['coefficients'].copy() # RHS funcs = [] # We don't have any boundary treatment right now. Things will probably # break in multiple ways in case of non-trivial boundary conditions, # so we can comment this out for now .. # for lambda_ in affine_lambda['functions']: # funcs.append(make_elliptic_swipdg_vector_functional_on_neighborhood( # grid, subdomain, local_boundary_info, # neighborhood_space, # current_solution, lambda_, kappa, # over_integrate=0)) # funcs_coeffs = affine_lambda['coefficients'].copy() funcs.append( make_l2_vector_functional_on_neighborhood(grid, subdomain, neighborhood_space, f, over_integrate=2)) # funcs_coeffs.append(1.) funcs_coeffs = [1] # assemble in one grid walk neighborhood_assembler = make_neighborhood_system_assembler( grid, subdomain, neighborhood_space) for op in ops: neighborhood_assembler.append(op) for func in funcs: neighborhood_assembler.append(func) neighborhood_assembler.assemble() # solve local_space_id = self.solution_space.subspaces[subdomain].id lhs = LincombOperator([ DuneXTMatrixOperator( o.matrix(), source_id=local_space_id, range_id=local_space_id) for o in ops ], ops_coeffs) rhs = LincombOperator([ VectorFunctional(lhs.range.make_array([v.vector()])) for v in funcs ], funcs_coeffs) correction = lhs.apply_inverse(rhs.as_source_array(mu), mu=mu, inverse_options=inverse_options) assert len(correction) == 1 # restrict to subdomain local_sizes = [ block_space.local_space(nn).size() for nn in neighborhood ] local_starts = [ int(np.sum(local_sizes[:nn])) for nn in range(len(local_sizes)) ] local_starts.append(neighborhood_space.mapper.size) localized_corrections_as_np = np.array(correction._list[0].impl, copy=False) localized_corrections_as_np = [ localized_corrections_as_np[local_starts[nn]:local_starts[nn + 1]] for nn in range(len(local_sizes)) ] subdomain_index_in_neighborhood = np.where( np.array(list(neighborhood)) == subdomain)[0] assert len(subdomain_index_in_neighborhood) == 1 subdomain_index_in_neighborhood = subdomain_index_in_neighborhood[0] subdomain_correction = Vector( local_sizes[subdomain_index_in_neighborhood], 0.) subdomain_correction_as_np = np.array(subdomain_correction, copy=False) subdomain_correction_as_np[:] = localized_corrections_as_np[ subdomain_index_in_neighborhood][:] return self.solution_space.subspaces[subdomain].make_array( [subdomain_correction])
def discretize_lhs_for_lambda(lambda_): local_matrices = [None]*grid.num_subdomains local_vectors = [None]*grid.num_subdomains boundary_matrices = {} coupling_matrices_in_in = {} coupling_matrices_out_out = {} coupling_matrices_in_out = {} coupling_matrices_out_in = {} for ii in range(grid.num_subdomains): local_matrices[ii] = Matrix(block_space.local_space(ii).size(), block_space.local_space(ii).size(), local_patterns[ii]) local_vectors[ii] = Vector(block_space.local_space(ii).size()) if ii in grid.boundary_subdomains(): boundary_matrices[ii] = Matrix(block_space.local_space(ii).size(), block_space.local_space(ii).size(), boundary_patterns[ii]) for jj in grid.neighboring_subdomains(ii): if ii < jj: # Assemble primally (visit each coupling only once). coupling_matrices_in_in[(ii, jj)] = Matrix(block_space.local_space(ii).size(), block_space.local_space(ii).size(), coupling_patterns_in_in[(ii, jj)]) coupling_matrices_out_out[(ii, jj)] = Matrix(block_space.local_space(jj).size(), block_space.local_space(jj).size(), coupling_patterns_out_out[(ii, jj)]) coupling_matrices_in_out[(ii, jj)] = Matrix(block_space.local_space(ii).size(), block_space.local_space(jj).size(), coupling_patterns_in_out[(ii, jj)]) coupling_matrices_out_in[(ii, jj)] = Matrix(block_space.local_space(jj).size(), block_space.local_space(ii).size(), coupling_patterns_out_in[(ii, jj)]) def assemble_local_contributions(subdomain): ipdg_operator = make_elliptic_swipdg_matrix_operator(lambda_, kappa, local_all_neumann_boundary_info, local_matrices[subdomain], block_space.local_space(subdomain)) l2_functional = make_l2_volume_vector_functional(f, local_vectors[subdomain], block_space.local_space(subdomain)) local_assembler = make_system_assembler(block_space.local_space(subdomain)) local_assembler.append(ipdg_operator) local_assembler.append(l2_functional) local_assembler.assemble() for ii in range(grid.num_subdomains): assemble_local_contributions(ii) local_ipdg_coupling_operator = make_local_elliptic_swipdg_coupling_operator(lambda_, kappa) def assemble_coupling_contributions(subdomain, neighboring_subdomain): coupling_assembler = block_space.coupling_assembler(subdomain, neighboring_subdomain) coupling_assembler.append(local_ipdg_coupling_operator, coupling_matrices_in_in[(subdomain, neighboring_subdomain)], coupling_matrices_out_out[(subdomain, neighboring_subdomain)], coupling_matrices_in_out[(subdomain, neighboring_subdomain)], coupling_matrices_out_in[(subdomain, neighboring_subdomain)]) coupling_assembler.assemble() for ii in range(grid.num_subdomains): for jj in grid.neighboring_subdomains(ii): if ii < jj: # Assemble primally (visit each coupling only once). assemble_coupling_contributions(ii, jj) local_ipdg_boundary_operator = make_local_elliptic_swipdg_boundary_operator(lambda_, kappa) apply_on_dirichlet_intersections = make_apply_on_dirichlet_intersections(boundary_info) def assemble_boundary_contributions(subdomain): boundary_assembler = block_space.boundary_assembler(subdomain) boundary_assembler.append(local_ipdg_boundary_operator, boundary_matrices[subdomain], apply_on_dirichlet_intersections) boundary_assembler.assemble() for ii in grid.boundary_subdomains(): assemble_boundary_contributions(ii) global_pattern = SparsityPatternDefault(block_space.mapper.size) for ii in range(grid.num_subdomains): block_space.mapper.copy_local_to_global(local_patterns[ii], ii, global_pattern) if ii in grid.boundary_subdomains(): block_space.mapper.copy_local_to_global(boundary_patterns[ii], ii, global_pattern) for jj in grid.neighboring_subdomains(ii): if ii < jj: # Assemble primally (visit each coupling only once). block_space.mapper.copy_local_to_global(coupling_patterns_in_in[(ii, jj)], ii, ii, global_pattern) block_space.mapper.copy_local_to_global(coupling_patterns_out_out[(ii, jj)], jj, jj, global_pattern) block_space.mapper.copy_local_to_global(coupling_patterns_in_out[(ii, jj)], ii, jj, global_pattern) block_space.mapper.copy_local_to_global(coupling_patterns_out_in[(ii, jj)], jj, ii, global_pattern) system_matrix = Matrix(block_space.mapper.size, block_space.mapper.size, global_pattern) rhs_vector = Vector(block_space.mapper.size, 0.) for ii in range(grid.num_subdomains): block_space.mapper.copy_local_to_global(local_matrices[ii], local_patterns[ii], ii, system_matrix) block_space.mapper.copy_local_to_global(local_vectors[ii], ii, rhs_vector) if ii in grid.boundary_subdomains(): block_space.mapper.copy_local_to_global(boundary_matrices[ii], boundary_patterns[ii], ii, ii, system_matrix) for jj in grid.neighboring_subdomains(ii): if ii < jj: # Assemble primally (visit each coupling only once). block_space.mapper.copy_local_to_global(coupling_matrices_in_in[(ii, jj)], coupling_patterns_in_in[(ii, jj)], ii, ii, system_matrix) block_space.mapper.copy_local_to_global(coupling_matrices_out_out[(ii, jj)], coupling_patterns_out_out[(ii, jj)], jj, jj, system_matrix) block_space.mapper.copy_local_to_global(coupling_matrices_in_out[(ii, jj)], coupling_patterns_in_out[(ii, jj)], ii, jj, system_matrix) block_space.mapper.copy_local_to_global(coupling_matrices_out_in[(ii, jj)], coupling_patterns_out_in[(ii, jj)], jj, ii, system_matrix) op = DuneXTMatrixOperator(system_matrix) mats = np.full((grid.num_subdomains, grid.num_subdomains), None) for ii in range(grid.num_subdomains): for jj in range(ii, grid.num_subdomains): if ii == jj: mats[ii, ii] = Matrix(block_space.local_space(ii).size(), block_space.local_space(ii).size(), local_patterns[ii]) elif (ii, jj) in coupling_matrices_in_out: mats[ii, jj] = Matrix(block_space.local_space(ii).size(), block_space.local_space(jj).size(), coupling_patterns_in_out[(ii, jj)]) mats[jj, ii] = Matrix(block_space.local_space(jj).size(), block_space.local_space(ii).size(), coupling_patterns_out_in[(ii, jj)]) for ii in range(grid.num_subdomains): for jj in range(ii, grid.num_subdomains): if ii == jj: mats[ii, ii].axpy(1., local_matrices[ii]) if ii in boundary_matrices: mats[ii, ii].axpy(1., boundary_matrices[ii]) elif (ii, jj) in coupling_matrices_in_out: mats[ii, ii].axpy(1., coupling_matrices_in_in[(ii, jj)]) mats[jj, jj].axpy(1., coupling_matrices_out_out[(ii, jj)]) mats[ii, jj].axpy(1., coupling_matrices_in_out[(ii, jj)]) mats[jj, ii].axpy(1., coupling_matrices_out_in[(ii, jj)]) ops = np.full((grid.num_subdomains, grid.num_subdomains), None) for (ii, jj), mat in np.ndenumerate(mats): ops[ii, jj] = DuneXTMatrixOperator(mat, source_id='domain_{}'.format(jj), range_id='domain_{}'.format(ii)) if mat else None block_op = BlockOperator(ops) rhs = VectorFunctional(op.range.make_array([rhs_vector])) rhss = [] for ii in range(grid.num_subdomains): rhss.append(ops[ii, ii].range.make_array([local_vectors[ii]])) block_rhs = VectorFunctional(block_op.range.make_array(rhss)) return op, block_op, rhs, block_rhs
for ii in range(jj, N): pattern.insert(ii, ii - jj) for ii in range(N - jj): pattern.insert(ii, ii + jj) pattern.sort() mat = Matrix(N, N, pattern) for ii in range(N): mat.unit_row(ii) print('done (took {}s)'.format(time.time() - t)) print('preparing {} input vectors ... '.format(M), end='') sys.stdout.flush() t = time.time() Us = [Vector(N, ii) for ii in range(M)] #Vs = [Vector(N, 0.) for ii in range(M)] print('done (took {}s)'.format(time.time() - t)) print('doing mv with {} threads ... '.format(W), end='') sys.stdout.flush() def do_work(ii): V = Us[ii].copy() mat.mv(Us[ii], V) return V t = time.time()