def jacobian(self, U, mu=None): mu = self.parse_parameter(mu) options = self.solver_options.get('jacobian') if self.solver_options else None if len(self.interpolation_dofs) == 0: if isinstance(self.source, NumpyVectorSpace) and isinstance(self.range, NumpyVectorSpace): return NumpyMatrixOperator(np.zeros((self.range.dim, self.source.dim)), solver_options=options, source_id=self.source.id, range_id=self.range.id, name=self.name + '_jacobian') else: return ZeroOperator(self.range, self.source, name=self.name + '_jacobian') elif hasattr(self, 'operator'): return EmpiricalInterpolatedOperator(self.operator.jacobian(U, mu=mu), self.interpolation_dofs, self.collateral_basis, self.triangular, solver_options=options, name=self.name + '_jacobian') else: restricted_source = self.restricted_operator.source U_dofs = restricted_source.make_array(U.dofs(self.source_dofs)) JU = self.restricted_operator.jacobian(U_dofs, mu=mu) \ .apply(restricted_source.make_array(np.eye(len(self.source_dofs)))) try: if self.triangular: interpolation_coefficients = solve_triangular(self.interpolation_matrix, JU.to_numpy().T, lower=True, unit_diagonal=True).T else: interpolation_coefficients = solve(self.interpolation_matrix, JU.to_numpy().T).T except ValueError: # this exception occurs when AU contains NaNs ... interpolation_coefficients = np.empty((len(JU), len(self.collateral_basis))) + np.nan J = self.collateral_basis.lincomb(interpolation_coefficients) if isinstance(J.space, NumpyVectorSpace): J = NumpyMatrixOperator(J.to_numpy().T, range_id=self.range.id) else: J = VectorArrayOperator(J) return Concatenation([J, ComponentProjection(self.source_dofs, self.source)], solver_options=options, name=self.name + '_jacobian')
def apply_inverse_adjoint(self, U, ind=None, mu=None, source_product=None, range_product=None, least_squares=False): if source_product or range_product: return super(NumpyMatrixOperator, self).apply_inverse_adjoint(U, ind=ind, mu=mu, source_product=source_product, range_product=range_product, least_squares=least_squares) else: options = {'inverse': self.solver_options.get('inverse_adjoint') if self.solver_options else None} adjoint_op = NumpyMatrixOperator(self._matrix.T, solver_options=options) return adjoint_op.apply_inverse(U, ind=ind, mu=mu, least_squares=least_squares)
def test_complex(): np.random.seed(0) I = np.eye(5) A = np.random.randn(5, 5) B = np.random.randn(5, 5) C = np.random.randn(3, 5) Iop = NumpyMatrixOperator(I) Aop = NumpyMatrixOperator(A) Bop = NumpyMatrixOperator(B) Cva = NumpyVectorSpace.from_numpy(C) # lincombs assert not np.iscomplexobj((Iop * 1 + Bop * 1).assemble().matrix) assert not np.iscomplexobj((Aop * 1 + Bop * 1).assemble().matrix) assert np.iscomplexobj((Aop * (1+0j) + Bop * (1+0j)).assemble().matrix) assert np.iscomplexobj((Aop * 1j + Bop * 1).assemble().matrix) assert np.iscomplexobj((Bop * 1 + Aop * 1j).assemble().matrix) # apply_inverse assert not np.iscomplexobj(Aop.apply_inverse(Cva).to_numpy()) assert np.iscomplexobj((Aop * 1j).apply_inverse(Cva).to_numpy()) assert np.iscomplexobj((Aop * 1 + Bop * 1j).assemble().apply_inverse(Cva).to_numpy()) assert np.iscomplexobj(Aop.apply_inverse(Cva * 1j).to_numpy()) # append for rsrv in (0, 10): for o_ind in (slice(None), [0]): va = NumpyVectorSpace(5).empty(reserve=rsrv) va.append(Cva) D = np.random.randn(1, 5) + 1j * np.random.randn(1, 5) Dva = NumpyVectorSpace.from_numpy(D) assert not np.iscomplexobj(va.to_numpy()) assert np.iscomplexobj(Dva.to_numpy()) va.append(Dva[o_ind]) assert np.iscomplexobj(va.to_numpy()) # scal assert not np.iscomplexobj(Cva.to_numpy()) assert np.iscomplexobj((Cva * 1j).to_numpy()) assert np.iscomplexobj((Cva * (1 + 0j)).to_numpy()) # axpy assert not np.iscomplexobj(Cva.to_numpy()) Cva[0].axpy(1, Dva) assert np.iscomplexobj(Cva.to_numpy()) Cva = NumpyVectorSpace.from_numpy(C) assert not np.iscomplexobj(Cva.to_numpy()) Cva[0].axpy(1j, Dva) assert np.iscomplexobj(Cva.to_numpy())
def apply_adjoint(self, V, mu=None): assert V in self.range if self.functional: U = super().apply_adjoint(V, mu=mu) return self.source.from_numpy(U.to_numpy()) adj_op = NumpyMatrixOperator(self.matrix).H U = [adj_op.apply(adj_op.source.make_array(v._array)).to_numpy().ravel() for v in V._list] if self.vector: return self.source.make_array(np.array(U)) if len(U) > 0 else self.source.empty() else: return self.source.from_numpy(U)
def apply_inverse(self, V, mu=None, least_squares=False): assert V in self.range assert not self.functional and not self.vector if V.dim == 0: if self.source.dim == 0 and least_squares: return self.source.make_array([np.zeros(0) for _ in range(len(V))]) else: raise InversionError op = NumpyMatrixOperator(self.matrix, solver_options=self.solver_options) return self.source.make_array([op.apply_inverse(NumpyVectorSpace.make_array(v._array), least_squares=least_squares).to_numpy().ravel() for v in V._list])
def action_NumpyMatrixOperator(self, op, dim_range=None, dim_source=None): # copy instead of just slicing the matrix to ensure contiguous memory return NumpyMatrixOperator(op._matrix[:dim_range, :dim_source].copy(), source_id=op.source.id, range_id=op.range.id, solver_options=op.solver_options, name=op.name)
def thermalblock_vector_factory(xblocks, yblocks, diameter, seed): from pymor.operators.constructions import VectorOperator _, _, U, V, sp, rp = thermalblock_factory(xblocks, yblocks, diameter, seed) op = VectorOperator(U[0]) U = op.source.make_array(np.random.random((7, 1))) sp = NumpyMatrixOperator(np.eye(1) * 2) return op, None, U, V, sp, rp
def numpy_matrix_operator_with_arrays_factory(dim_source, dim_range, count_source, count_range, seed, source_id=None, range_id=None): np.random.seed(seed) op = NumpyMatrixOperator(np.random.random((dim_range, dim_source)), source_id=source_id, range_id=range_id) s = op.source.make_array(np.random.random((count_source, dim_source))) r = op.range.make_array(np.random.random((count_range, dim_range))) return op, None, s, r
def test_samdp(n, m, k, wanted, with_E, which): if not with_E: A = conv_diff_1d_fd(n, 1, 1) E = sps.eye(n) Eop = None else: A, E = conv_diff_1d_fem(n, 1, 1) Eop = NumpyMatrixOperator(E) np.random.seed(0) B = np.random.randn(n, m) C = np.random.randn(k, n) Aop = NumpyMatrixOperator(A) Bva = Aop.source.from_numpy(B.T) Cva = Aop.source.from_numpy(C) dom_poles, dom_res, dom_rev, dom_lev = samdp(Aop, Eop, Bva, Cva, wanted, which=which) dom_absres = spla.norm(dom_res, ord=2, axis=(1, 2)) poles, lev, rev = spla.eig(A.toarray(), E.toarray(), left=True) absres = np.empty(len(poles)) for i in range(len(poles)): lev[:, i] = lev[:, i] * (1 / lev[:, i].conj().dot(E @ rev[:, i])) absres[i] = spla.norm(np.outer(C @ rev[:, i], lev[:, i] @ B), ord=2) if which == 'NR': val = absres / np.abs(np.real(poles)) dom_val = dom_absres / np.abs(np.real(dom_poles)) elif which == 'NS': val = absres / np.abs(poles) dom_val = dom_absres / np.abs(dom_poles) elif which == 'NM': val = absres dom_val = dom_absres # check if computed poles are approximately more dominant than others on average assert np.average(val) * 0.9 < np.average(dom_val)
def jacobian(self, U, mu=None): assert U in self.source and len(U) == 1 UU = self.op.source.zeros() UU._list[0].real_part.impl[:] = np.ascontiguousarray( U.to_numpy()[0]) JJ = self.op.jacobian(UU, mu=mu) return NumpyMatrixOperator( JJ.matrix.array()[self.restricted_range_dofs, :])
def unblock_op(op, sparse=False): assert op._blocks[0][0] is not None if isinstance(op._blocks[0][0], LincombOperator): coefficients = op._blocks[0][0].coefficients operators = [ None for kk in np.arange(len(op._blocks[0][0].operators)) ] for kk in np.arange(len(op._blocks[0][0].operators)): ops = [[ op._blocks[ii][jj].operators[kk] if op._blocks[ii][jj] is not None else None for jj in np.arange(op.num_source_blocks) ] for ii in np.arange(op.num_range_blocks)] operators[kk] = unblock_op(BlockOperator(ops)) return LincombOperator(operators=operators, coefficients=coefficients) else: assert all( all([ isinstance(block, NumpyMatrixOperator ) if block is not None else True for block in row ]) for row in op._blocks) if op.source.dim == 0 and op.range.dim == 0: return NumpyMatrixOperator(np.zeros((0, 0))) elif op.source.dim == 1: mat = np.concatenate([ op._blocks[ii][0]._matrix for ii in np.arange(op.num_range_blocks) ], axis=1) elif op.range.dim == 1: mat = np.concatenate([ op._blocks[0][jj]._matrix for jj in np.arange(op.num_source_blocks) ], axis=1) else: mat = bmat([[ coo_matrix(op._blocks[ii][jj]._matrix) if op._blocks[ii][jj] is not None else coo_matrix( (op._range_dims[ii], op._source_dims[jj])) for jj in np.arange(op.num_source_blocks) ] for ii in np.arange(op.num_range_blocks)]) mat = mat.toarray() return NumpyMatrixOperator(mat)
def _generate_reduced_localized_operators(self): for key in self.reduction_todo_list: self.reduced_localized_operators[key] = NumpyMatrixOperator( self.localized_operators[key].apply2( self.range_bases[self.range_spaces.index(key[2])], self.source_bases[self.source_spaces.index(key[1])])) self.reduction_todo_list = set()
def numpy_matrix_operator_with_arrays_factory(dim_source, dim_range, count_source, count_range, seed): np.random.seed(seed) op = NumpyMatrixOperator(np.random.random((dim_range, dim_source))) s = NumpyVectorArray(np.random.random((count_source, dim_source)), copy=False) r = NumpyVectorArray(np.random.random((count_range, dim_range)), copy=False) return op, None, s, r
def thermalblock_vectorfunc_factory(product, xblocks, yblocks, diameter, seed): from pymor.operators.constructions import VectorFunctional _, _, U, V, sp, rp = thermalblock_factory(xblocks, yblocks, diameter, seed) op = VectorFunctional(U.copy(ind=0), product=sp if product else None) U = V V = NumpyVectorArray(np.random.random((7, 1)), copy=False) sp = rp rp = NumpyMatrixOperator(np.eye(1) * 2) return op, None, U, V, sp, rp
def jacobian(self, U, mu=None): if self.reaction_function_derivative is None: raise NotImplementedError U = U.to_numpy() A = dia_matrix((self.reaction_function_derivative.evaluate(U.reshape(U.shape + (1,)), mu=mu), [0]), shape=(self.grid.size(0),) * 2) return NumpyMatrixOperator(A, source_id=self.source.id, range_id=self.range.id)
def test_ricc_lrcf(n, m, p, with_E, with_R, with_S, trans, solver): _check_availability(solver) if not with_E: A = conv_diff_1d_fd(n, 1, 1) E = None else: A, E = conv_diff_1d_fem(n, 1, 1) np.random.seed(0) B = np.random.randn(n, m) C = np.random.randn(p, n) D = np.random.randn(p, m) if not trans: R0 = np.random.randn(p, p) R = D.dot(D.T) + R0.dot(R0.T) if with_R else None S = B.dot(D.T) if with_S else None else: R0 = np.random.randn(m, m) R = D.T.dot(D) + R0.dot(R0.T) if with_R else None S = C.T.dot(D) if with_S else None Aop = NumpyMatrixOperator(A) Eop = NumpyMatrixOperator(E) if with_E else None Bva = Aop.source.from_numpy(B.T) Cva = Aop.source.from_numpy(C) Sva = Aop.source.from_numpy(S.T) if with_S else None try: Zva = solve_ricc_lrcf(Aop, Eop, Bva, Cva, R, Sva, trans=trans, options=solver) except NotImplementedError: return assert len(Zva) <= n Z = Zva.to_numpy().T assert relative_residual(A, E, B, C, R, S, Z, trans) < 1e-8
def apply_inverse(self, V, mu=None, least_squares=False): assert V in self.range assert not self.functional and not self.vector if V.dim == 0: if self.source.dim == 0 and least_squares: return self.source.make_array( [np.zeros(0) for _ in range(len(V))]) else: raise InversionError op = NumpyMatrixOperator(self.matrix, solver_options=self.solver_options) return self.source.make_array([ op.apply_inverse(NumpyVectorSpace.make_array(v._array), least_squares=least_squares).data.ravel() for v in V._list ])
def restricted_to_subbasis(self, dim, d): cr = 1 if not d.rhs.parametric else len(d.rhs.operators) co = 1 if not d.operator.parametric else len(d.operator.operators) old_dim = d.operator.source.dim indices = np.concatenate((np.arange(cr), ((np.arange(co)*old_dim)[..., np.newaxis] + np.arange(dim)).ravel() + cr)) matrix = self.estimator_matrix.matrix[indices, :][:, indices] return SimpleCoerciveRBEstimator(NumpyMatrixOperator(matrix), self.coercivity_estimator)
def test_project_array_with_product(): np.random.seed(123) U = NumpyVectorSpace.from_numpy(np.random.random((1, 10))) basis = NumpyVectorSpace.from_numpy(np.random.random((3, 10))) product = np.random.random((10, 10)) product = NumpyMatrixOperator(product.T.dot(product)) U_p = project_array(U, basis, product=product, orthonormal=False) onb = gram_schmidt(basis, product=product) U_p2 = project_array(U, onb, product=product, orthonormal=True) assert np.all(relative_error(U_p, U_p2, product) < 1e-10)
def action_ZeroOperator(self, op): range_basis, source_basis = self.range_basis, self.source_basis if source_basis is not None and range_basis is not None: from pymor.operators.numpy import NumpyMatrixOperator return NumpyMatrixOperator(np.zeros((len(range_basis), len(source_basis))), name=op.name) else: new_source = NumpyVectorSpace(len(source_basis)) if source_basis is not None else op.source new_range = NumpyVectorSpace(len(range_basis)) if range_basis is not None else op.range return ZeroOperator(new_range, new_source, name=op.name)
def apply_adjoint(self, V, mu=None): assert V in self.range if self.functional: U = super().apply_adjoint(V, mu=mu) return self.source.from_numpy(U.to_numpy()) adj_op = NumpyMatrixOperator(self.matrix).H U = [ adj_op.apply(adj_op.source.make_array( v._array)).to_numpy().ravel() for v in V._list ] if self.vector: return self.source.make_array( np.array(U)) if len(U) > 0 else self.source.empty() else: return self.source.from_numpy(U)
def numpy_matrix_operator_with_arrays_and_products_factory( dim_source, dim_range, count_source, count_range, seed, source_id=None, range_id=None): from scipy.linalg import eigh op, _, U, V = numpy_matrix_operator_with_arrays_factory( dim_source, dim_range, count_source, count_range, seed, source_id=source_id, range_id=range_id) if dim_source > 0: while True: sp = np.random.random((dim_source, dim_source)) sp = sp.T.dot(sp) evals = eigh(sp, eigvals_only=True) if np.min(evals) > 1e-6: break sp = NumpyMatrixOperator(sp, source_id=source_id, range_id=source_id) else: sp = NumpyMatrixOperator(np.zeros((0, 0)), source_id=source_id, range_id=source_id) if dim_range > 0: while True: rp = np.random.random((dim_range, dim_range)) rp = rp.T.dot(rp) evals = eigh(rp, eigvals_only=True) if np.min(evals) > 1e-6: break rp = NumpyMatrixOperator(rp, source_id=range_id, range_id=range_id) else: rp = NumpyMatrixOperator(np.zeros((0, 0)), source_id=range_id, range_id=range_id) return op, None, U, V, sp, rp
def test_identity_numpy_lincomb(): n = 2 space = NumpyVectorSpace(n) identity = IdentityOperator(space) numpy_operator = NumpyMatrixOperator(np.ones((n, n))) for alpha in [-1, 0, 1]: for beta in [-1, 0, 1]: idop = alpha * identity + beta * numpy_operator mat1 = alpha * np.eye(n) + beta * np.ones((n, n)) mat2 = to_matrix(idop.assemble(), format='dense') assert np.array_equal(mat1, mat2)
def construct_operators_and_vectorarrays(m, n, r, k, seed=0): space_m = NumpyVectorSpace(m) space_n = NumpyVectorSpace(n) rng = np.random.RandomState(seed) A = NumpyMatrixOperator(rng.randn(m, n)) L = space_m.random(r, distribution='normal', random_state=rng) C = rng.randn(r, r) R = space_n.random(r, distribution='normal', random_state=rng) U = space_n.random(k, distribution='normal', random_state=rng) V = space_m.random(k, distribution='normal', random_state=rng) return A, L, C, R, U, V
class GenericOperator(OperatorBase): source = range = NumpyVectorSpace(10) op = NumpyMatrixOperator(np.eye(10) * np.arange(1, 11)) linear = True def apply(self, U, ind=None, mu=None): return self.op.apply(U, ind=ind, mu=mu) def apply_adjoint(self, U, ind=None, mu=None): return self.op.apply_adjoint(U, ind=ind, mu=mu)
def apply_inverse_adjoint(self, U, mu=None, initial_guess=None, least_squares=False): return NumpyMatrixOperator.apply_inverse_adjoint( self, U, mu=mu, initial_guess=initial_guess, least_squares=least_squares)
def test_blk_diag_apply_inverse_adjoint(): np.random.seed(0) A = np.random.randn(2, 2) B = np.random.randn(3, 3) C = spla.block_diag(A, B) Aop = NumpyMatrixOperator(A) Bop = NumpyMatrixOperator(B) Cop = BlockDiagonalOperator((Aop, Bop)) v1 = np.random.randn(2) v2 = np.random.randn(3) v = np.hstack((v1, v2)) v1va = NumpyVectorSpace.from_numpy(v1) v2va = NumpyVectorSpace.from_numpy(v2) vva = BlockVectorSpace.make_array((v1va, v2va)) wva = Cop.apply_inverse_adjoint(vva) w = np.hstack((wva.block(0).to_numpy(), wva.block(1).to_numpy())) assert np.allclose(spla.solve(C.T, v), w)
def jacobian(self, U, mu=None): assert len(U) == 1 mu = self.parse_parameter(mu) options = self.solver_options.get( 'jacobian') if self.solver_options else None if self.interpolation_matrix.shape[0] == 0: return NumpyMatrixOperator(np.zeros( (self.range.dim, self.source.dim)), solver_options=options, source_id=self.source.id, range_id=self.range.id, name=self.name + '_jacobian') U_dofs = self.source_basis_dofs.lincomb(U.to_numpy()[0]) J = self.restricted_operator.jacobian(U_dofs, mu=mu).apply( self.source_basis_dofs) try: if self.triangular: interpolation_coefficients = solve_triangular( self.interpolation_matrix, J.to_numpy().T, lower=True, unit_diagonal=True).T else: interpolation_coefficients = np.linalg.solve( self.interpolation_matrix, J.to_numpy().T).T except ValueError: # this exception occurs when J contains NaNs ... interpolation_coefficients = (np.empty( (len(self.source_basis_dofs), len(self.projected_collateral_basis))) + np.nan) M = self.projected_collateral_basis.lincomb(interpolation_coefficients) if isinstance(M.space, NumpyVectorSpace): return NumpyMatrixOperator(M.to_numpy().T, source_id=self.source.id, range_id=self.range.id, solver_options=options) else: assert not options return VectorArrayOperator(M)
def _reduce(self): d = self.d self.logger.info('Computing oswald interpolations ...') oi = d.estimator.oswald_interpolation_error oi_red = [] for i, OI_i_space in enumerate(oi.range.subspaces): oi_i = oi._blocks[i, i] basis = self.bases[oi_i.source.id] self.bases[OI_i_space.id] = oi_i.apply(basis) oi_red.append(NumpyMatrixOperator(np.eye(len(basis)), source_id=oi_i.source.id, range_id=oi_i.range.id)) oi_red = unblock(BlockDiagonalOperator(oi_red)) self.logger.info('Computing flux reconstructions ...') fr = d.estimator.flux_reconstruction for i, RT_i_space in enumerate(fr.range.subspaces): self.bases[RT_i_space.id] = RT_i_space.empty() red_aff_components = [] for i_aff, aff_component in enumerate(fr.operators): red_aff_component = [] for i, RT_i_space in enumerate(aff_component.range.subspaces): fr_i = aff_component._blocks[i, i] basis = self.bases[fr_i.source.id] self.bases[RT_i_space.id].append(fr_i.apply(basis)) M = np.zeros((len(basis) * len(fr.operators), len(basis))) M[i_aff * len(basis): (i_aff+1) * len(basis), :] = np.eye(len(basis)) red_aff_component.append(NumpyMatrixOperator(M, source_id=fr_i.source.id, range_id=fr_i.range.id)) red_aff_components.append(BlockDiagonalOperator(red_aff_component)) fr_red = LincombOperator(red_aff_components, fr.coefficients) fr_red = unblock(fr_red) red_estimator = d.estimator.with_(flux_reconstruction=fr_red, oswald_interpolation_error=oi_red) rd = super()._reduce() rd = rd.with_(estimator=red_estimator) return rd
def test_sylv_schur_V_E(n, r, m): np.random.seed(0) A, E = diff_conv_1d_fem(n, 1, 1) B = np.random.randn(n, m) Ar = np.random.randn(r, r) - r * np.eye(r) Er = np.random.randn(r, r) Er = (Er + Er.T) / 2 Er += r * np.eye(r) Br = np.random.randn(r, m) Aop = NumpyMatrixOperator(A) Eop = NumpyMatrixOperator(E) Bop = NumpyMatrixOperator(B) Arop = NumpyMatrixOperator(Ar) Erop = NumpyMatrixOperator(Er) Brop = NumpyMatrixOperator(Br) Vva = solve_sylv_schur(Aop, Arop, E=Eop, Er=Erop, B=Bop, Br=Brop) V = Vva.to_numpy().T AVErT = A.dot(V.dot(Er.T)) EVArT = E.dot(V.dot(Ar.T)) BBrT = B.dot(Br.T) assert fro_norm(AVErT + EVArT + BBrT) / fro_norm(BBrT) < 1e-10
def test_sylv_schur_W_E(n, r, p): np.random.seed(0) A, E = diff_conv_1d_fem(n, 1, 1) C = np.random.randn(p, n) Ar = np.random.randn(r, r) - r * np.eye(r) Er = np.random.randn(r, r) Er = (Er + Er.T) / 2 Er += r * np.eye(r) Cr = np.random.randn(p, r) Aop = NumpyMatrixOperator(A) Eop = NumpyMatrixOperator(E) Cop = NumpyMatrixOperator(C) Arop = NumpyMatrixOperator(Ar) Erop = NumpyMatrixOperator(Er) Crop = NumpyMatrixOperator(Cr) Wva = solve_sylv_schur(Aop, Arop, E=Eop, Er=Erop, C=Cop, Cr=Crop) W = Wva.to_numpy().T ATWEr = A.T.dot(W.dot(Er)) ETWAr = E.T.dot(W.dot(Ar)) CTCr = C.T.dot(Cr) assert fro_norm(ATWEr + ETWAr + CTCr) / fro_norm(CTCr) < 1e-10
def action_apply_basis(self, op): range_basis, source_basis, product = self.range_basis, self.source_basis, self.product if source_basis is None: if range_basis is None: return op else: try: V = op.apply_adjoint( product.apply(range_basis) if product else range_basis) except NotImplementedError: raise RuleNotMatchingError('apply_adjoint not implemented') if isinstance(op.source, NumpyVectorSpace): from pymor.operators.numpy import NumpyMatrixOperator return NumpyMatrixOperator(V.to_numpy(), source_id=op.source.id, name=op.name) else: from pymor.operators.constructions import VectorArrayOperator return VectorArrayOperator(V, adjoint=True, name=op.name) else: if range_basis is None: V = op.apply(source_basis) if isinstance(op.range, NumpyVectorSpace): from pymor.operators.numpy import NumpyMatrixOperator return NumpyMatrixOperator(V.to_numpy().T, range_id=op.range.id, name=op.name) else: from pymor.operators.constructions import VectorArrayOperator return VectorArrayOperator(V, adjoint=False, name=op.name) elif product is None: from pymor.operators.numpy import NumpyMatrixOperator return NumpyMatrixOperator(op.apply2(range_basis, source_basis), name=op.name) else: from pymor.operators.numpy import NumpyMatrixOperator V = op.apply(source_basis) return NumpyMatrixOperator(product.apply2(range_basis, V), name=op.name)
def restricted_to_subbasis(self, dim, discretization): d = discretization cr = 1 if not d.rhs.parametric else len(d.rhs.operators) co = 1 if not d.operator.parametric else len(d.operator.operators) old_dim = d.operator.source.dim indices = np.concatenate( (np.arange(cr), ((np.arange(co) * old_dim)[..., np.newaxis] + np.arange(dim)).ravel() + cr)) matrix = self.estimator_matrix._matrix[indices, :][:, indices] return StationaryAffineLinearReducedEstimator( NumpyMatrixOperator(matrix), self.coercivity_estimator)
def create_bases3(gq, lq, basis_size, q, transfer='robin', silent=True): # nicht-adaptive Basiserstellung mit power-iteration if not silent: print("creating bases") bases = {} for space in gq["spaces"]: ldict = lq[space] # Basis mit Shift-Loesung initialisieren: if transfer == 'dirichlet': lsol = ldict["local_solution_dirichlet"] else: lsol = ldict["local_solution_robin"] product = ldict["range_product"] if transfer == 'dirichlet': transop = NumpyMatrixOperator(ldict["transfer_matrix_dirichlet"]) else: transop = NumpyMatrixOperator(ldict["transfer_matrix_robin"]) basis = rrf(transop, ldict["source_product"], product, q, basis_size, True) basis.append(lsol) gram_schmidt(basis, product, copy=False) bases[space] = basis return bases
def test_lrcf(n, m, with_E, trans, lyap_solver): _check_availability(lyap_solver) if not with_E: A = conv_diff_1d_fd(n, 1, 1) E = None else: A, E = conv_diff_1d_fem(n, 1, 1) np.random.seed(0) B = np.random.randn(n, m) if trans: B = B.T Aop = NumpyMatrixOperator(A) Eop = NumpyMatrixOperator(E) if with_E else None Bva = Aop.source.from_numpy(B.T if not trans else B) Zva = solve_lyap_lrcf(Aop, Eop, Bva, trans=trans, options=lyap_solver) assert len(Zva) <= n Z = Zva.to_numpy().T assert relative_residual(A, E, B, Z @ Z.T, trans=trans) < 1e-10
def test_complex(): np.random.seed(0) I = np.eye(5) A = np.random.randn(5, 5) B = np.random.randn(5, 5) C = np.random.randn(3, 5) Iop = NumpyMatrixOperator(I) Aop = NumpyMatrixOperator(A) Bop = NumpyMatrixOperator(B) Cva = NumpyVectorArray(C) # assemble_lincomb assert not np.iscomplexobj(Aop.assemble_lincomb((Iop, Bop), (1, 1))._matrix) assert not np.iscomplexobj(Aop.assemble_lincomb((Aop, Bop), (1, 1))._matrix) assert not np.iscomplexobj(Aop.assemble_lincomb((Aop, Bop), (1 + 0j, 1 + 0j))._matrix) assert np.iscomplexobj(Aop.assemble_lincomb((Aop, Bop), (1j, 1))._matrix) assert np.iscomplexobj(Aop.assemble_lincomb((Bop, Aop), (1, 1j))._matrix) # apply_inverse assert not np.iscomplexobj(Aop.apply_inverse(Cva).data) assert np.iscomplexobj((Aop * 1j).apply_inverse(Cva).data) assert np.iscomplexobj(Aop.assemble_lincomb((Aop, Bop), (1, 1j)).apply_inverse(Cva).data) assert np.iscomplexobj(Aop.apply_inverse(Cva * 1j).data) # append for rsrv in (0, 10): for o_ind in (None, [0]): va = NumpyVectorArray.make_array(subtype=5, reserve=rsrv) va.append(Cva) D = np.random.randn(1, 5) + 1j * np.random.randn(1, 5) Dva = NumpyVectorArray(D) assert not np.iscomplexobj(va.data) assert np.iscomplexobj(Dva.data) va.append(Dva, o_ind) assert np.iscomplexobj(va.data) # scal assert not np.iscomplexobj(Cva.data) assert np.iscomplexobj((Cva * 1j).data) assert np.iscomplexobj((Cva * (1 + 0j)).data) # axpy assert not np.iscomplexobj(Cva.data) Cva.axpy(1, Dva, 0) assert np.iscomplexobj(Cva.data) Cva = NumpyVectorArray(C) assert not np.iscomplexobj(Cva.data) Cva.axpy(1j, Dva, 0) assert np.iscomplexobj(Cva.data)
def discretize_stationary_from_disk(parameter_file): """Load a linear affinely decomposed |StationaryDiscretization| from file. The discretization is defined via an `.ini`-style file as follows :: [system-matrices] L_1.mat: l_1(μ_1,...,μ_n) L_2.mat: l_2(μ_1,...,μ_n) ... [rhs-vectors] F_1.mat: f_1(μ_1,...,μ_n) F_2.mat: f_2(μ_1,...,μ_n) ... [parameter] μ_1: a_1,b_1 ... μ_n: a_n,b_n [products] Prod1: P_1.mat Prod2: P_2.mat ... Here, `L_1.mat`, `L_2.mat`, ..., `F_1.mat`, `F_2.mat`, ... are files containing matrices `L_1`, `L_2`, ... and vectors `F_1.mat`, `F_2.mat`, ... which correspond to the affine components of the operator and right-hand side functional. The respective coefficient functionals, are given via the string expressions `l_1(...)`, `l_2(...)`, ..., `f_1(...)` in the (scalar-valued) |Parameter| components `w_1`, ..., `w_n`. The allowed lower and upper bounds `a_i, b_i` for the component `μ_i` are specified in the `[parameters]` section. The resulting operator and right-hand side are then of the form :: L(μ) = l_1(μ)*L_1 + l_2(μ)*L_2+ ... F(μ) = f_1(μ)*F_1 + f_2(μ)*L_2+ ... In the `[products]` section, an optional list of inner products `Prod1`, `Prod2`, .. with corresponding matrices `P_1.mat`, `P_2.mat` can be specified. Example:: [system-matrices] matrix1.mat: 1. matrix2.mat: 1. - theta**2 [rhs-vectors] rhs.mat: 1. [parameter] theta: 0, 0.5 [products] h1: h1.mat l2: mass.mat Parameters ---------- parameter_file Path to the parameter file. Returns ------- discretization The |StationaryDiscretization| that has been generated. """ assert ".ini" == parameter_file[-4:], "Given file is not an .ini file" base_path = os.path.dirname(parameter_file) # Get input from parameter file config = configparser.ConfigParser() config.optionxform = str config.read(parameter_file) # Assert that all needed entries given assert 'system-matrices' in config.sections() assert 'rhs-vectors' in config.sections() assert 'parameter' in config.sections() system_mat = config.items('system-matrices') rhs_vec = config.items('rhs-vectors') parameter = config.items('parameter') # Dict of parameters types and ranges parameter_type = {} parameter_range = {} # get parameters for i in range(len(parameter)): parameter_name = parameter[i][0] parameter_list = tuple(float(j) for j in parameter[i][1].replace(" ", "").split(',')) parameter_range[parameter_name] = parameter_list # Assume scalar parameter dependence parameter_type[parameter_name] = 0 # Create parameter space parameter_space = CubicParameterSpace(parameter_type=parameter_type, ranges=parameter_range) # Assemble operators system_operators, system_functionals = [], [] # get parameter functionals and system matrices for i in range(len(system_mat)): path = os.path.join(base_path, system_mat[i][0]) expr = system_mat[i][1] parameter_functional = ExpressionParameterFunctional(expr, parameter_type=parameter_type) system_operators.append(NumpyMatrixOperator.from_file(path)) system_functionals.append(parameter_functional) system_lincombOperator = LincombOperator(system_operators, coefficients=system_functionals) # get rhs vectors rhs_operators, rhs_functionals = [], [] for i in range(len(rhs_vec)): path = os.path.join(base_path, rhs_vec[i][0]) expr = rhs_vec[i][1] parameter_functional = ExpressionParameterFunctional(expr, parameter_type=parameter_type) op = NumpyMatrixOperator.from_file(path) assert isinstance(op._matrix, np.ndarray) op = op.with_(matrix=op._matrix.reshape((1, -1))) rhs_operators.append(op) rhs_functionals.append(parameter_functional) rhs_lincombOperator = LincombOperator(rhs_operators, coefficients=rhs_functionals) # get products if given if 'products' in config.sections(): product = config.items('products') products = {} for i in range(len(product)): product_name = product[i][0] product_path = os.path.join(base_path, product[i][1]) products[product_name] = NumpyMatrixOperator.from_file(product_path) else: products = None # Create and return stationary discretization return StationaryDiscretization(operator=system_lincombOperator, rhs=rhs_lincombOperator, parameter_space=parameter_space, products=products)
def discretize_instationary_from_disk(parameter_file, T=None, steps=None, u0=None, time_stepper=None): """Load a linear affinely decomposed |InstationaryDiscretization| from file. Similarly to :func:`discretize_stationary_from_disk`, the discretization is specified via an `ini.`-file of the following form :: [system-matrices] L_1.mat: l_1(μ_1,...,μ_n) L_2.mat: l_2(μ_1,...,μ_n) ... [rhs-vectors] F_1.mat: f_1(μ_1,...,μ_n) F_2.mat: f_2(μ_1,...,μ_n) ... [mass-matrix] D.mat [initial-solution] u0: u0.mat [parameter] μ_1: a_1,b_1 ... μ_n: a_n,b_n [products] Prod1: P_1.mat Prod2: P_2.mat ... [time] T: final time steps: number of time steps Parameters ---------- parameter_file Path to the '.ini' parameter file. T End-time of desired solution. If `None`, the value specified in the parameter file is used. steps Number of time steps to. If `None`, the value specified in the parameter file is used. u0 Initial solution. If `None` the initial solution is obtained from parameter file. time_stepper The desired :class:`time stepper <pymor.algorithms.timestepping.TimeStepperInterface>` to use. If `None`, implicit Euler time stepping is used. Returns ------- discretization The |InstationaryDiscretization| that has been generated. """ assert ".ini" == parameter_file[-4:], "Given file is not an .ini file" base_path = os.path.dirname(parameter_file) # Get input from parameter file config = configparser.ConfigParser() config.optionxform = str config.read(parameter_file) # Assert that all needed entries given assert 'system-matrices' in config.sections() assert 'mass-matrix' in config.sections() assert 'rhs-vectors' in config.sections() assert 'parameter' in config.sections() system_mat = config.items('system-matrices') mass_mat = config.items('mass-matrix') rhs_vec = config.items('rhs-vectors') parameter = config.items('parameter') # Dict of parameters types and ranges parameter_type = {} parameter_range = {} # get parameters for i in range(len(parameter)): parameter_name = parameter[i][0] parameter_list = tuple(float(j) for j in parameter[i][1].replace(" ", "").split(',')) parameter_range[parameter_name] = parameter_list # Assume scalar parameter dependence parameter_type[parameter_name] = 0 # Create parameter space parameter_space = CubicParameterSpace(parameter_type=parameter_type, ranges=parameter_range) # Assemble operators system_operators, system_functionals = [], [] # get parameter functionals and system matrices for i in range(len(system_mat)): path = os.path.join(base_path, system_mat[i][0]) expr = system_mat[i][1] parameter_functional = ExpressionParameterFunctional(expr, parameter_type=parameter_type) system_operators.append(NumpyMatrixOperator.from_file(path)) system_functionals.append(parameter_functional) system_lincombOperator = LincombOperator(system_operators, coefficients=system_functionals) # get rhs vectors rhs_operators, rhs_functionals = [], [] for i in range(len(rhs_vec)): path = os.path.join(base_path, rhs_vec[i][0]) expr = rhs_vec[i][1] parameter_functional = ExpressionParameterFunctional(expr, parameter_type=parameter_type) op = NumpyMatrixOperator.from_file(path) assert isinstance(op._matrix, np.ndarray) op = op.with_(matrix=op._matrix.reshape((1, -1))) rhs_operators.append(op) rhs_functionals.append(parameter_functional) rhs_lincombOperator = LincombOperator(rhs_operators, coefficients=rhs_functionals) # get mass matrix path = os.path.join(base_path, mass_mat[0][1]) mass_operator = NumpyMatrixOperator.from_file(path) # Obtain initial solution if not given if u0 is None: u_0 = config.items('initial-solution') path = os.path.join(base_path, u_0[0][1]) op = NumpyMatrixOperator.from_file(path) assert isinstance(op._matrix, np.ndarray) u0 = op.with_(matrix=op._matrix.reshape((-1, 1))) # get products if given if 'products' in config.sections(): product = config.items('products') products = {} for i in range(len(product)): product_name = product[i][0] product_path = os.path.join(base_path, product[i][1]) products[product_name] = NumpyMatrixOperator.from_file(product_path) else: products = None # Further specifications if 'time' in config.sections(): if T is None: assert 'T' in config.options('time') T = float(config.get('time', 'T')) if steps is None: assert 'steps' in config.options('time') steps = int(config.get('time', 'steps')) # Use implicit euler time stepper if no time-stepper given if time_stepper is None: time_stepper = ImplicitEulerTimeStepper(steps) else: time_stepper = time_stepper(steps) # Create and return instationary discretization return InstationaryDiscretization(operator=system_lincombOperator, rhs=rhs_lincombOperator, parameter_space=parameter_space, initial_data=u0, T=T, time_stepper=time_stepper, mass=mass_operator, products=products)
def discretize_instationary_from_disk(parameter_file, T=None, steps=None, u0=None, time_stepper=None): """Generates instationary discretization based on data given loaded from files. The path and further specifications to these objects are given in an '.ini' parameter file (see example below). Suitable for discrete problems given by:: M(u(t), w) + L(u(t), w, t) = F(t, w) u(0) = u_0 for t in [0,T], where L is a linear time-dependent |Operator|, F is a time-dependent linear |Functional|, u_0 the initial data and w the parameter. The mass |Operator| M is assumed to be linear, time-independent and |Parameter|-independent. Parameters ---------- parameter_file String containing the path to the '.ini' parameter file. T End-time of desired solution, if None obtained from parameter file steps Number of time steps to do, if None obtained from parameter file u0 Initial solution, if None obtained from parameter file time_stepper The desired time_stepper to use, if None an Implicit euler scheme is used. Returns ------- discretization The |Discretization| that has been generated. Example ------- Following parameter file is suitable for a discrete parabolic problem with L(u(w), w) = (f_1(w)*K1 + f_2(w)*K2+...)*u, F(w) = g_1(w)*L1+g_2(w)*L2+..., M = D and u_0(w)=u0 with parameter w_i in [a_i,b_i], where f_i(w) and g_i(w) are strings of valid python expressions. Optional products can be provided to introduce a dict of inner products on the discrete space. Time specifications like T and steps can also be provided, but are optional when already given by call of this method. The content of the file is then given as:: [system-matrices] # path_to_object: parameter_functional_associated_with_object K1.mat: f_1(w_1,...,w_n) K2.mat: f_2(w_1,...,w_n) ... [rhs-vectors] L1.mat: g_1(w_1,...,w_n) L2.mat: g_2(w_1,...,w_n) ... [mass-matrix] D.mat [initial-solution] u0: u0.mat [parameter] # Name: lower_bound,upper_bound w_1: a_1,b_1 ... w_n: a_n,b_n [products] # Name: path_to_object Prod1: S.mat Prod2: T.mat ... [time] # fixed_Name: value T: 10.0 steps: 100 """ assert ".ini" == parameter_file[-4:], "Given file is not an .ini file" base_path = os.path.dirname(parameter_file) # Get input from parameter file config = configparser.ConfigParser() config.optionxform = str config.read(parameter_file) # Assert that all needed entries given assert 'system-matrices' in config.sections() assert 'mass-matrix' in config.sections() assert 'rhs-vectors' in config.sections() assert 'parameter' in config.sections() system_mat = config.items('system-matrices') mass_mat = config.items('mass-matrix') rhs_vec = config.items('rhs-vectors') parameter = config.items('parameter') # Dict of parameters types and ranges parameter_type = {} parameter_range = {} # get parameters for i in range(len(parameter)): parameter_name = parameter[i][0] parameter_list = tuple(float(j) for j in parameter[i][1].replace(" ", "").split(',')) parameter_range[parameter_name] = parameter_list # Assume scalar parameter dependence parameter_type[parameter_name] = 0 # Create parameter space parameter_space = CubicParameterSpace(parameter_type=parameter_type, ranges=parameter_range) # Assemble operators system_operators, system_functionals = [], [] # get parameter functionals and system matrices for i in range(len(system_mat)): path = os.path.join(base_path, system_mat[i][0]) expr = system_mat[i][1] parameter_functional = ExpressionParameterFunctional(expr, parameter_type=parameter_type) system_operators.append(NumpyMatrixOperator.from_file(path)) system_functionals.append(parameter_functional) system_lincombOperator = LincombOperator(system_operators, coefficients=system_functionals) # get rhs vectors rhs_operators, rhs_functionals = [], [] for i in range(len(rhs_vec)): path = os.path.join(base_path, rhs_vec[i][0]) expr = rhs_vec[i][1] parameter_functional = ExpressionParameterFunctional(expr, parameter_type=parameter_type) op = NumpyMatrixOperator.from_file(path) assert isinstance(op._matrix, np.ndarray) op = op.with_(matrix=op._matrix.reshape((1, -1))) rhs_operators.append(op) rhs_functionals.append(parameter_functional) rhs_lincombOperator = LincombOperator(rhs_operators, coefficients=rhs_functionals) # get mass matrix path = os.path.join(base_path, mass_mat[0][1]) mass_operator = NumpyMatrixOperator.from_file(path) # Obtain initial solution if not given if u0 is None: u_0 = config.items('initial-solution') path = os.path.join(base_path, u_0[0][1]) op = NumpyMatrixOperator.from_file(path) assert isinstance(op._matrix, np.ndarray) u0 = op.with_(matrix=op._matrix.reshape((-1, 1))) # get products if given if 'products' in config.sections(): product = config.items('products') products = {} for i in range(len(product)): product_name = product[i][0] product_path = os.path.join(base_path, product[i][1]) products[product_name] = NumpyMatrixOperator.from_file(product_path) else: products = None # Further specifications if 'time' in config.sections(): if T is None: assert 'T' in config.options('time') T = float(config.get('time', 'T')) if steps is None: assert 'steps' in config.options('time') steps = int(config.get('time', 'steps')) # Use implicit euler time stepper if no time-stepper given if time_stepper is None: time_stepper = ImplicitEulerTimeStepper(steps) else: time_stepper = time_stepper(steps) # Create and return instationary discretization return InstationaryDiscretization(operator=system_lincombOperator, rhs=rhs_lincombOperator, parameter_space=parameter_space, initial_data=u0, T=T, time_stepper=time_stepper, mass=mass_operator, products=products)
def discretize_stationary_from_disk(parameter_file): """Generates stationary discretization only based on data loaded from files. The path and further specifications to these objects are given in an '.ini' parameter file (see example below). Suitable for discrete problems given by:: L(u, w) = F(w) with an operator L and a linear functional F with a parameter w given as system matrices and rhs vectors in an affine decomposition on the hard disk. Parameters ---------- parameterFile String containing the path to the .ini parameter file. Returns ------- discretization The |Discretization| that has been generated. Example ------- Following parameter file is suitable for a discrete elliptic problem with L(u, w) = (f_1(w)*K1 + f_2(w)*K2+...)*u and F(w) = g_1(w)*L1+g_2(w)*L2+... with parameter w_i in [a_i,b_i], where f_i(w) and g_i(w) are strings of valid python expressions. Optional products can be provided to introduce a dict of inner products on the discrete space. The content of the file is then given as:: [system-matrices] # path_to_object: parameter_functional_associated_with_object K1.mat: f_1(w_1,...,w_n) K2.mat: f_2(w_1,...,w_n) ... [rhs-vectors] L1.mat: g_1(w_1,...,w_n) L2.mat: g_2(w_1,...,w_n) ... [parameter] # Name: lower_bound,upper_bound w_1: a_1,b_1 ... w_n: a_n,b_n [products] # Name: path_to_object Prod1: S.mat Prod2: T.mat ... """ assert ".ini" == parameter_file[-4:], "Given file is not an .ini file" base_path = os.path.dirname(parameter_file) # Get input from parameter file config = configparser.ConfigParser() config.optionxform = str config.read(parameter_file) # Assert that all needed entries given assert 'system-matrices' in config.sections() assert 'rhs-vectors' in config.sections() assert 'parameter' in config.sections() system_mat = config.items('system-matrices') rhs_vec = config.items('rhs-vectors') parameter = config.items('parameter') # Dict of parameters types and ranges parameter_type = {} parameter_range = {} # get parameters for i in range(len(parameter)): parameter_name = parameter[i][0] parameter_list = tuple(float(j) for j in parameter[i][1].replace(" ", "").split(',')) parameter_range[parameter_name] = parameter_list # Assume scalar parameter dependence parameter_type[parameter_name] = 0 # Create parameter space parameter_space = CubicParameterSpace(parameter_type=parameter_type, ranges=parameter_range) # Assemble operators system_operators, system_functionals = [], [] # get parameter functionals and system matrices for i in range(len(system_mat)): path = os.path.join(base_path, system_mat[i][0]) expr = system_mat[i][1] parameter_functional = ExpressionParameterFunctional(expr, parameter_type=parameter_type) system_operators.append(NumpyMatrixOperator.from_file(path)) system_functionals.append(parameter_functional) system_lincombOperator = LincombOperator(system_operators, coefficients=system_functionals) # get rhs vectors rhs_operators, rhs_functionals = [], [] for i in range(len(rhs_vec)): path = os.path.join(base_path, rhs_vec[i][0]) expr = rhs_vec[i][1] parameter_functional = ExpressionParameterFunctional(expr, parameter_type=parameter_type) op = NumpyMatrixOperator.from_file(path) assert isinstance(op._matrix, np.ndarray) op = op.with_(matrix=op._matrix.reshape((1, -1))) rhs_operators.append(op) rhs_functionals.append(parameter_functional) rhs_lincombOperator = LincombOperator(rhs_operators, coefficients=rhs_functionals) # get products if given if 'products' in config.sections(): product = config.items('products') products = {} for i in range(len(product)): product_name = product[i][0] product_path = os.path.join(base_path, product[i][1]) products[product_name] = NumpyMatrixOperator.from_file(product_path) else: products = None # Create and return stationary discretization return StationaryDiscretization(operator=system_lincombOperator, rhs=rhs_lincombOperator, parameter_space=parameter_space, products=products)
def test_numpy_dense_solvers(): op = NumpyMatrixOperator(np.eye(10) * np.arange(1, 11)) rhs = op.range.make_array(np.ones(10)) solution = op.apply_inverse(rhs) assert ((op.apply(solution) - rhs).l2_norm() / rhs.l2_norm())[0] < 1e-8
def test_numpy_dense_solvers(numpy_dense_solver): op = NumpyMatrixOperator(np.eye(10) * np.arange(1, 11), solver_options=numpy_dense_solver) rhs = NumpyVectorArray(np.ones(10)) solution = op.apply_inverse(rhs) assert ((op.apply(solution) - rhs).l2_norm() / rhs.l2_norm())[0] < 1e-8
def test_numpy_sparse_solvers(numpy_sparse_solver): op = NumpyMatrixOperator(diags([np.arange(1., 11.)], [0]), solver_options=numpy_sparse_solver) rhs = NumpyVectorArray(np.ones(10)) solution = op.apply_inverse(rhs) assert ((op.apply(solution) - rhs).l2_norm() / rhs.l2_norm())[0] < 1e-8