def visualize(self, U, codim=2, **kwargs): """Visualize scalar data associated to the grid as a patch plot. Parameters ---------- U |NumPy array| of the data to visualize. If `U.dim == 2 and len(U) > 1`, the data is visualized as a time series of plots. Alternatively, a tuple of |Numpy arrays| can be provided, in which case a subplot is created for each entry of the tuple. The lengths of all arrays have to agree. codim The codimension of the entities the data in `U` is attached to (either 0 or 2). kwargs See :func:`~pymor.gui.qt.visualize_patch` """ from pymor.gui.qt import visualize_patch from pymor.vectorarrays.interfaces import VectorArrayInterface from pymor.vectorarrays.numpy import NumpyVectorArray if isinstance(U, (np.ndarray, VectorArrayInterface)): U = (U,) assert all(isinstance(u, (np.ndarray, VectorArrayInterface)) for u in U) U = tuple(NumpyVectorArray(u) if isinstance(u, np.ndarray) else u if isinstance(u, NumpyVectorArray) else NumpyVectorArray(u.data) for u in U) bounding_box = kwargs.pop('bounding_box', self.domain) visualize_patch(self, U, codim=codim, bounding_box=bounding_box, **kwargs)
def initial_projection(U, mu): I = p.initial_data.evaluate(grid.quadrature_points(0, order=2), mu).squeeze() I = np.sum(I * grid.reference_element.quadrature(order=2)[1], axis=1) * ( 1.0 / grid.reference_element.volume ) I = NumpyVectorArray(I, copy=False) return I.lincomb(U).data
def initial_projection(U, mu): I = p.initial_data.evaluate(grid.quadrature_points(0, order=2), mu).squeeze() I = np.sum(I * grid.reference_element.quadrature(order=2)[1], axis=1) * (1. / grid.reference_element.volume) I = NumpyVectorArray(I, copy=False) return I.lincomb(U).data
def projected_to_subbasis(self, dim_range=None, dim_source=None, dim_collateral=None, name=None): assert dim_source is None or dim_source <= self.source.dim assert dim_range is None or dim_range <= self.range.dim assert dim_collateral is None or dim_collateral <= self.restricted_operator.range.dim if not isinstance(self.projected_collateral_basis, NumpyVectorArray): raise NotImplementedError name = name or '{}_projected_to_subbasis'.format(self.name) interpolation_matrix = self.interpolation_matrix[:dim_collateral, :dim_collateral] if dim_collateral is not None: restricted_operator, source_dofs = self.restricted_operator.restricted(np.arange(dim_collateral)) else: restricted_operator = self.restricted_operator old_pcb = self.projected_collateral_basis projected_collateral_basis = NumpyVectorArray(old_pcb.data[:dim_collateral, :dim_range], copy=False) old_sbd = self.source_basis_dofs source_basis_dofs = NumpyVectorArray(old_sbd.data[:dim_source], copy=False) if dim_collateral is None \ else NumpyVectorArray(old_sbd.data[:dim_source, source_dofs], copy=False) return ProjectedEmpiciralInterpolatedOperator(restricted_operator, interpolation_matrix, source_basis_dofs, projected_collateral_basis, self.triangular, solver_options=self.solver_options, name=name)
def jacobian(self, U, mu=None): mu = self.parse_parameter(mu) options = self.solver_options.get('jacobian') if self.solver_options else None if len(self.interpolation_dofs) == 0: if self.source.type == self.range.type == NumpyVectorArray: return NumpyMatrixOperator(np.zeros((self.range.dim, self.source.dim)), solver_options=options, name=self.name + '_jacobian') else: return ZeroOperator(self.source, self.range, name=self.name + '_jacobian') elif hasattr(self, 'operator'): return EmpiricalInterpolatedOperator(self.operator.jacobian(U, mu=mu), self.interpolation_dofs, self.collateral_basis, self.triangular, solver_options=options, name=self.name + '_jacobian') else: U_components = NumpyVectorArray(U.components(self.source_dofs), copy=False) JU = self.restricted_operator.jacobian(U_components, mu=mu) \ .apply(NumpyVectorArray(np.eye(len(self.source_dofs)), copy=False)) try: if self.triangular: interpolation_coefficients = solve_triangular(self.interpolation_matrix, JU.data.T, lower=True, unit_diagonal=True).T else: interpolation_coefficients = np.linalg.solve(self.interpolation_matrix, JU._array.T).T except ValueError: # this exception occurs when AU contains NaNs ... interpolation_coefficients = np.empty((len(JU), len(self.collateral_basis))) + np.nan J = self.collateral_basis.lincomb(interpolation_coefficients) if isinstance(J, NumpyVectorArray): J = NumpyMatrixOperator(J.data.T) else: J = VectorArrayOperator(J) return Concatenation(J, ComponentProjection(self.source_dofs, self.source), solver_options=options, name=self.name + '_jacobian')
def apply_adjoint(self, U, ind=None, mu=None, source_product=None, range_product=None): assert U in self.range assert source_product is None or source_product.source == source_product.range == self.source assert range_product is None or range_product.source == range_product.range == self.range if not self.transposed: if range_product: ATPrU = NumpyVectorArray(range_product.apply2(self._array, U, U_ind=ind).T, copy=False) else: ATPrU = NumpyVectorArray(self._array.dot(U, o_ind=ind).T, copy=False) if source_product: return source_product.apply_inverse(ATPrU) else: return ATPrU else: if range_product: PrU = range_product.apply(U, ind=ind) else: PrU = U.copy(ind) ATPrU = self._array.lincomb(PrU.data) if source_product: return source_product.apply_inverse(ATPrU) else: return ATPrU
def apply(self, U, ind=None, mu=None): mu = self.parse_parameter(mu) if self.source_basis is None: if self.range_basis is None: return self.operator.apply(U, ind=ind, mu=mu) elif self.product is None: return NumpyVectorArray( self.operator.apply2(self.range_basis, U, U_ind=ind, mu=mu).T) else: V = self.operator.apply(U, ind=ind, mu=mu) return NumpyVectorArray( self.product.apply2(V, self.range_basis)) else: U_array = U._array[:U._len] if ind is None else U._array[ind] UU = self.source_basis.lincomb(U_array) if self.range_basis is None: return self.operator.apply(UU, mu=mu) elif self.product is None: return NumpyVectorArray( self.operator.apply2(self.range_basis, UU, mu=mu).T) else: V = self.operator.apply(UU, mu=mu) return NumpyVectorArray( self.product.apply2(V, self.range_basis))
def test_gram_schmidt(): for i in (1, 32): b = NumpyVectorArray(np.identity(i, dtype=np.float)) a = gram_schmidt(b) assert np.all(b.almost_equal(a)) c = NumpyVectorArray([[1.0, 0], [0.0, 0]]) a = gram_schmidt(c) assert (a.data == np.array([[1.0, 0]])).all()
def test_gram_schmidt(): for i in (1, 32): b = NumpyVectorArray(np.identity(i, dtype=np.float)) a = gram_schmidt(b) assert np.all(almost_equal(b, a)) c = NumpyVectorArray([[1.0, 0], [0., 0]]) a = gram_schmidt(c) assert (a.data == np.array([[1.0, 0]])).all()
def test_projected(operator_with_arrays): op, mu, U, V = operator_with_arrays op_UV = op.projected(V, U) np.random.seed(4711 + U.dim + len(V)) coeffs = np.random.random(len(U)) X = op_UV.apply(NumpyVectorArray(coeffs, copy=False), mu=mu) Y = NumpyVectorArray(V.dot(op.apply(U.lincomb(coeffs), mu=mu)).T, copy=False) assert np.all(almost_equal(X, Y))
def test_projected_with_product(operator_with_arrays_and_products): op, mu, U, V, sp, rp = operator_with_arrays_and_products op_UV = op.projected(V, U, product=rp) np.random.seed(4711 + U.dim + len(V)) coeffs = np.random.random(len(U)) X = op_UV.apply(NumpyVectorArray(coeffs, copy=False), mu=mu) Y = NumpyVectorArray(rp.apply2(op.apply(U.lincomb(coeffs), mu=mu), V), copy=False) assert np.all(almost_equal(X, Y))
def test_ext(extension_alg): size = 5 ident = np.identity(size) current = ident[0] for i in range(1, size): c = NumpyVectorArray(current) n, _ = extension_alg(c, NumpyVectorArray(ident[i])) assert np.allclose(n.data, ident[0:i+1]) current = ident[0:i+1]
def apply(self, U, ind=None, mu=None): assert U in self.source assert U.check_ind(ind) U_array = U._array[:U._len] if ind is None else U._array[ind] if self.parametric: mu = self.parse_parameter(mu) return NumpyVectorArray(self._mapping(U_array, mu=mu), copy=False) else: return NumpyVectorArray(self._mapping(U_array), copy=False)
def test_scal(): v = np.array([[1, 2, 3], [4, 5, 6]], dtype=float) v = NumpyVectorArray(v) v.scal(1j) k = 0 for i in range(2): for j in range(3): k += 1 assert v.data[i, j] == k * 1j
def test_axpy(): x = NumpyVectorArray(np.array([1.])) y = NumpyVectorArray(np.array([1.])) y.axpy(1 + 1j, x) assert y.data[0, 0] == 2 + 1j x = NumpyVectorArray(np.array([1 + 1j])) y = NumpyVectorArray(np.array([1.])) y.axpy(-1, x) assert y.data[0, 0] == -1j
def as_vector(self, mu=None): if not self.linear: raise TypeError( 'This nonlinear operator does not represent a vector or linear functional.' ) elif self.source.dim == 1 and self.source.type is NumpyVectorArray: return self.apply(NumpyVectorArray(1), mu=mu) elif self.range.dim == 1 and self.range.type is NumpyVectorArray: return self.apply_adjoint(NumpyVectorArray(1), mu=mu) else: raise TypeError( 'This operator does not represent a vector or linear functional.' )
def thermalblock_vectorarray_factory(transposed, xblocks, yblocks, diameter, seed): from pymor.operators.constructions import VectorArrayOperator _, _, U, V, sp, rp = thermalblock_factory(xblocks, yblocks, diameter, seed) op = VectorArrayOperator(U, transposed) if transposed: U = V V = NumpyVectorArray(np.random.random((7, op.range.dim)), copy=False) sp = rp rp = NumpyMatrixOperator(np.eye(op.range.dim) * 2) else: U = NumpyVectorArray(np.random.random((7, op.source.dim)), copy=False) sp = NumpyMatrixOperator(np.eye(op.source.dim) * 2) return op, None, U, V, sp, rp
def test_restricted(operator_with_arrays): op, mu, U, _, = operator_with_arrays if op.range.dim == 0: return np.random.seed(4711 + U.dim) for num in [0, 1, 3, 7]: components = np.random.randint(0, op.range.dim, num) try: rop, source_dofs = op.restricted(components) except NotImplementedError: return op_U = NumpyVectorArray(op.apply(U, mu=mu).components(components)) rop_U = rop.apply(NumpyVectorArray(U.components(source_dofs)), mu=mu) assert np.all(almost_equal(op_U, rop_U))
def as_vector(self, mu=None): matrix = self._matrix if matrix.shape[0] != 1 and matrix.shape[1] != 1: raise TypeError( 'This operator does not represent a vector or linear functional.' ) return NumpyVectorArray(matrix.ravel(), copy=True)
def test_to_matrix(): np.random.seed(0) A = np.random.randn(2, 2) B = np.random.randn(3, 3) C = np.random.randn(3, 3) X = np.bmat([[np.eye(2) + A, np.zeros((2, 3))], [np.zeros((3, 2)), B.dot(C.T)]]) C = sps.csc_matrix(C) Aop = NumpyMatrixOperator(A) Bop = NumpyMatrixOperator(B) Cop = NumpyMatrixOperator(C) Xop = BlockDiagonalOperator([ LincombOperator([IdentityOperator(NumpyVectorSpace(2)), Aop], [1, 1]), Concatenation(Bop, AdjointOperator(Cop)) ]) assert np.allclose(X, to_matrix(Xop)) assert np.allclose(X, to_matrix(Xop, format='csr').toarray()) np.random.seed(0) V = np.random.randn(10, 2) Vva = NumpyVectorArray(V.T) Vop = VectorArrayOperator(Vva) assert np.allclose(V, to_matrix(Vop)) Vop = VectorArrayOperator(Vva, transposed=True) assert np.allclose(V, to_matrix(Vop).T)
def thermalblock_vector_factory(xblocks, yblocks, diameter, seed): from pymor.operators.constructions import VectorOperator _, _, U, V, sp, rp = thermalblock_factory(xblocks, yblocks, diameter, seed) op = VectorOperator(U.copy(ind=0)) U = NumpyVectorArray(np.random.random((7, 1)), copy=False) sp = NumpyMatrixOperator(np.eye(1) * 2) return op, None, U, V, sp, rp
def apply(self, U, ind=None, mu=None): assert isinstance(U, NumpyVectorArray) assert U in self.source mu = self.parse_parameter(mu) if not hasattr(self, '_grid_data'): self._fetch_grid_data() ind = range(len(U)) if ind is None else ind U = U.data R = np.zeros((len(ind), self.source.dim)) bi = self.boundary_info gd = self._grid_data SUPE = gd['SUPE'] VOLS0 = gd['VOLS0'] VOLS1 = gd['VOLS1'] BOUNDARIES = gd['BOUNDARIES'] CENTERS = gd['CENTERS'] DIRICHLET_BOUNDARIES = gd['DIRICHLET_BOUNDARIES'] NEUMANN_BOUNDARIES = gd['NEUMANN_BOUNDARIES'] UNIT_OUTER_NORMALS = gd['UNIT_OUTER_NORMALS'] if bi.has_dirichlet: if hasattr(self, '_dirichlet_values'): dirichlet_values = self._dirichlet_values elif self.dirichlet_data is not None: dirichlet_values = self.dirichlet_data( CENTERS[DIRICHLET_BOUNDARIES], mu=mu) else: dirichlet_values = np.zeros_like(DIRICHLET_BOUNDARIES) F_dirichlet = self.numerical_flux.evaluate_stage1( dirichlet_values, mu) for i, j in enumerate(ind): Ui = U[j] Ri = R[i] F = self.numerical_flux.evaluate_stage1(Ui, mu) F_edge = [f[SUPE] for f in F] for f in F_edge: f[BOUNDARIES, 1] = f[BOUNDARIES, 0] if bi.has_dirichlet: for f, f_d in zip(F_edge, F_dirichlet): f[DIRICHLET_BOUNDARIES, 1] = f_d NUM_FLUX = self.numerical_flux.evaluate_stage2( F_edge, UNIT_OUTER_NORMALS, VOLS1, mu) if bi.has_neumann: NUM_FLUX[NEUMANN_BOUNDARIES] = 0 iadd_masked(Ri, NUM_FLUX, SUPE[:, 0]) isub_masked(Ri, NUM_FLUX, SUPE[:, 1]) R /= VOLS0 return NumpyVectorArray(R)
def apply(self, U, ind=None, mu=None): assert U in self.source if not self.transposed: if ind is not None: U = U.copy(ind) return self._array.lincomb(U.data) else: return NumpyVectorArray(U.dot(self._array, ind=ind), copy=False)
def test_induced(): grid = TriaGrid(num_intervals=(10, 10)) boundary_info = AllDirichletBoundaryInfo(grid) product = L2ProductP1(grid, boundary_info) zero = NumpyVectorArray(np.zeros(grid.size(2))) norm = induced_norm(product) value = norm(zero) np.testing.assert_almost_equal(value, 0.0)
def thermalblock_vectorfunc_factory(product, xblocks, yblocks, diameter, seed): from pymor.operators.constructions import VectorFunctional _, _, U, V, sp, rp = thermalblock_factory(xblocks, yblocks, diameter, seed) op = VectorFunctional(U.copy(ind=0), product=sp if product else None) U = V V = NumpyVectorArray(np.random.random((7, 1)), copy=False) sp = rp rp = NumpyMatrixOperator(np.eye(1) * 2) return op, None, U, V, sp, rp
def restricted(self, dofs): assert all(0 <= c < self.range.dim for c in dofs) if not self.transposed: restricted_value = NumpyVectorArray(self._array.components(dofs)) return VectorArrayOperator(restricted_value, False), np.arange(self.source.dim, dtype=np.int32) else: raise NotImplementedError
def reconstruct(self, U): """Reconstruct high-dimensional vector from reduced vector `U`.""" assert isinstance(U, NumpyVectorArray) UU = np.zeros((len(U), self.dim)) UU[:, :self.dim_subbasis] = U.data UU = NumpyVectorArray(UU, copy=False) if self.old_recontructor: return self.old_recontructor.reconstruct(UU) else: return UU
def apply(self, U, ind=None, mu=None): mu = self.parse_parameter(mu) if len(self.interpolation_dofs) == 0: count = len(ind) if ind is not None else len(U) return self.range.zeros(count=count) if hasattr(self, 'restricted_operator'): U_components = NumpyVectorArray(U.components(self.source_dofs, ind=ind), copy=False) AU = self.restricted_operator.apply(U_components, mu=mu) else: AU = NumpyVectorArray(self.operator.apply(U, mu=mu).components(self.interpolation_dofs), copy=False) try: if self.triangular: interpolation_coefficients = solve_triangular(self.interpolation_matrix, AU.data.T, lower=True, unit_diagonal=True).T else: interpolation_coefficients = np.linalg.solve(self.interpolation_matrix, AU._array.T).T except ValueError: # this exception occurs when AU contains NaNs ... interpolation_coefficients = np.empty((len(AU), len(self.collateral_basis))) + np.nan return self.collateral_basis.lincomb(interpolation_coefficients)
def test_blk_diag_apply_inverse(): np.random.seed(0) A = np.random.randn(2, 2) B = np.random.randn(3, 3) C = spla.block_diag(A, B) Aop = NumpyMatrixOperator(A) Bop = NumpyMatrixOperator(B) Cop = BlockDiagonalOperator((Aop, Bop)) v1 = np.random.randn(2) v2 = np.random.randn(3) v = np.hstack((v1, v2)) v1va = NumpyVectorArray(v1) v2va = NumpyVectorArray(v2) vva = BlockVectorArray((v1va, v2va)) wva = Cop.apply_inverse(vva) w = np.hstack((wva.block(0).data, wva.block(1).data)) assert np.allclose(spla.solve(C, v), w)
def save(self): if not HAVE_PYVTK: msg = QMessageBox(QMessageBox.Critical, 'Error', 'VTK output disabled. Pleas install pyvtk.') msg.exec_() return filename = QFileDialog.getSaveFileName(self, 'Save as vtk file')[0] base_name = filename.split('.vtu')[0].split('.vtk')[0].split( '.pvd')[0] if base_name: if len(self.U) == 1: write_vtk(self.grid, NumpyVectorArray(self.U[0], copy=False), base_name, codim=self.codim) else: for i, u in enumerate(self.U): write_vtk(self.grid, NumpyVectorArray(u, copy=False), '{}-{}'.format(base_name, i), codim=self.codim)
def test_vtkio(rect_or_tria_grid): grid = rect_or_tria_grid steps = 4 for dim in range(1, 2): for codim, data in enumerate( (NumpyVectorArray(np.zeros((steps, grid.size(c)))) for c in range(grid.dim + 1))): with NamedTemporaryFile('wb') as out: if codim == 1: with pytest.raises(NotImplementedError): write_vtk(grid, data, out.name, codim=codim) else: write_vtk(grid, data, out.name, codim=codim)
def test_real_imag(): A = np.array([[1 + 2j, 3 + 4j], [5 + 6j, 7 + 8j], [9 + 10j, 11 + 12j]]) Ava = NumpyVectorArray(A) Bva = Ava.real Cva = Ava.imag k = 0 for i in range(3): for j in range(2): k += 1 assert Bva.data[i, j] == k k += 1 assert Cva.data[i, j] == k
def test_complex(): np.random.seed(0) I = np.eye(5) A = np.random.randn(5, 5) B = np.random.randn(5, 5) C = np.random.randn(3, 5) Iop = NumpyMatrixOperator(I) Aop = NumpyMatrixOperator(A) Bop = NumpyMatrixOperator(B) Cva = NumpyVectorArray(C) # assemble_lincomb assert not np.iscomplexobj(Aop.assemble_lincomb((Iop, Bop), (1, 1))._matrix) assert not np.iscomplexobj(Aop.assemble_lincomb((Aop, Bop), (1, 1))._matrix) assert not np.iscomplexobj(Aop.assemble_lincomb((Aop, Bop), (1 + 0j, 1 + 0j))._matrix) assert np.iscomplexobj(Aop.assemble_lincomb((Aop, Bop), (1j, 1))._matrix) assert np.iscomplexobj(Aop.assemble_lincomb((Bop, Aop), (1, 1j))._matrix) # apply_inverse assert not np.iscomplexobj(Aop.apply_inverse(Cva).data) assert np.iscomplexobj((Aop * 1j).apply_inverse(Cva).data) assert np.iscomplexobj(Aop.assemble_lincomb((Aop, Bop), (1, 1j)).apply_inverse(Cva).data) assert np.iscomplexobj(Aop.apply_inverse(Cva * 1j).data) # append for rsrv in (0, 10): for o_ind in (None, [0]): va = NumpyVectorArray.make_array(subtype=5, reserve=rsrv) va.append(Cva) D = np.random.randn(1, 5) + 1j * np.random.randn(1, 5) Dva = NumpyVectorArray(D) assert not np.iscomplexobj(va.data) assert np.iscomplexobj(Dva.data) va.append(Dva, o_ind) assert np.iscomplexobj(va.data) # scal assert not np.iscomplexobj(Cva.data) assert np.iscomplexobj((Cva * 1j).data) assert np.iscomplexobj((Cva * (1 + 0j)).data) # axpy assert not np.iscomplexobj(Cva.data) Cva.axpy(1, Dva, 0) assert np.iscomplexobj(Cva.data) Cva = NumpyVectorArray(C) assert not np.iscomplexobj(Cva.data) Cva.axpy(1j, Dva, 0) assert np.iscomplexobj(Cva.data)
def test_pairwise_dot(): x = NumpyVectorArray(np.array([1 + 1j])) y = NumpyVectorArray(np.array([1 - 1j])) z = x.pairwise_dot(y) assert z == 2j
def test_dot(): x = NumpyVectorArray(np.array([1 + 1j])) y = NumpyVectorArray(np.array([1 - 1j])) z = x.dot(y) assert z[0, 0] == 2j
def test_numpy_sparse_solvers(numpy_sparse_solver): op = NumpyMatrixOperator(diags([np.arange(1., 11.)], [0]), solver_options=numpy_sparse_solver) rhs = NumpyVectorArray(np.ones(10)) solution = op.apply_inverse(rhs) assert ((op.apply(solution) - rhs).l2_norm() / rhs.l2_norm())[0] < 1e-8
def test_numpy_dense_solvers(numpy_dense_solver): op = NumpyMatrixOperator(np.eye(10) * np.arange(1, 11), solver_options=numpy_dense_solver) rhs = NumpyVectorArray(np.ones(10)) solution = op.apply_inverse(rhs) assert ((op.apply(solution) - rhs).l2_norm() / rhs.l2_norm())[0] < 1e-8
def test_generic_solvers(generic_solver): op = GenericOperator(generic_solver) rhs = NumpyVectorArray(np.ones(10)) solution = op.apply_inverse(rhs) assert ((op.apply(solution) - rhs).l2_norm() / rhs.l2_norm())[0] < 1e-8