def _assemble(self) -> Tuple[fenics.PETScMatrix, fenics.PETScMatrix]:
        """Construct matrices for generalized eigenvalue problem.

        Assemble the left and the right hand side of the eigenfunction
        equation into the corresponding matrices.

        Returns
        -------
        A : fenics.PETScMatrix
            Matrix corresponding to the form $a(u, v)$.
        B : fenics.PETScMatrix
            Matrix corresponding to the form $b(u, v)$.

        """
        V = self.vector_space

        u = fenics.TrialFunction(V)
        v = fenics.TestFunction(V)

        a, b = self._construct_eigenproblem(u, v)

        A = fenics.PETScMatrix()
        B = fenics.PETScMatrix()

        fenics.assemble(a * fenics.dx, tensor=A)
        fenics.assemble(b * fenics.dx, tensor=B)

        return A, B
예제 #2
0
    def __init__(self,
                 lagrangian,
                 bcs_list,
                 states,
                 adjoints,
                 boundaries,
                 config,
                 ksp_options,
                 adjoint_ksp_options,
                 shape_scalar_product=None,
                 deformation_space=None):
        """Initializes the ShapeFormHandler object.

		Parameters
		----------
		lagrangian : cashocs._forms.Lagrangian
			The Lagrangian corresponding to the shape optimization problem
		bcs_list : list[list[dolfin.fem.dirichletbc.DirichletBC]]
			list of boundary conditions for the state variables
		states : list[dolfin.function.function.Function]
			list of state variables
		adjoints : list[dolfin.function.function.Function]
			list of adjoint variables
		boundaries : dolfin.cpp.mesh.MeshFunctionSizet
			a MeshFunction for the boundary markers
		config : configparser.ConfigParser
			the configparser object storing the problems config
		ksp_options : list[list[list[str]]]
			The list of command line options for the KSP for the
			state systems.
		adjoint_ksp_options : list[list[list[str]]]
			The list of command line options for the KSP for the
			adjoint systems.
		shape_scalar_product : ufl.form.Form
			The weak form of the scalar product used to determine the
			shape gradient.
		"""

        FormHandler.__init__(self, lagrangian, bcs_list, states, adjoints,
                             config, ksp_options, adjoint_ksp_options)

        self.boundaries = boundaries
        self.shape_scalar_product = shape_scalar_product

        self.degree_estimation = self.config.getboolean('ShapeGradient',
                                                        'degree_estimation',
                                                        fallback=False)
        self.use_pull_back = self.config.getboolean('ShapeGradient',
                                                    'use_pull_back',
                                                    fallback=True)

        if deformation_space is None:
            self.deformation_space = fenics.VectorFunctionSpace(
                self.mesh, 'CG', 1)
        else:
            self.deformation_space = deformation_space

        self.test_vector_field = fenics.TestFunction(self.deformation_space)

        self.regularization = Regularization(self)

        # Calculate the necessary UFL forms
        self.inhomogeneous_mu = False
        self.__compute_shape_derivative()
        self.__compute_shape_gradient_forms()
        self.__setup_mu_computation()

        if self.degree_estimation:
            self.estimated_degree = np.maximum(
                estimate_total_polynomial_degree(self.riesz_scalar_product),
                estimate_total_polynomial_degree(self.shape_derivative))
            self.assembler = fenics.SystemAssembler(self.riesz_scalar_product,
                                                    self.shape_derivative,
                                                    self.bcs_shape,
                                                    form_compiler_parameters={
                                                        'quadrature_degree':
                                                        self.estimated_degree
                                                    })
        else:
            try:
                self.assembler = fenics.SystemAssembler(
                    self.riesz_scalar_product, self.shape_derivative,
                    self.bcs_shape)
            except (AssertionError, ValueError):
                self.estimated_degree = np.maximum(
                    estimate_total_polynomial_degree(
                        self.riesz_scalar_product),
                    estimate_total_polynomial_degree(self.shape_derivative))
                self.assembler = fenics.SystemAssembler(
                    self.riesz_scalar_product,
                    self.shape_derivative,
                    self.bcs_shape,
                    form_compiler_parameters={
                        'quadrature_degree': self.estimated_degree
                    })

        self.assembler.keep_diagonal = True
        self.fe_scalar_product_matrix = fenics.PETScMatrix()
        self.fe_shape_derivative_vector = fenics.PETScVector()

        self.update_scalar_product()

        # test for symmetry
        if not self.scalar_product_matrix.isSymmetric():
            if not self.scalar_product_matrix.isSymmetric(1e-15):
                if not (self.scalar_product_matrix -
                        self.scalar_product_matrix.copy().transpose()
                        ).norm() / self.scalar_product_matrix.norm() < 1e-15:
                    raise InputError(
                        'cashocs._forms.ShapeFormHandler',
                        'shape_scalar_product',
                        'Supplied scalar product form is not symmetric.')

        if self.opt_algo == 'newton' \
          or (self.opt_algo == 'pdas' and self.inner_pdas == 'newton'):
            raise NotImplementedError(
                'Second order methods are not implemented for shape optimization yet'
            )
예제 #3
0
def damped_newton_solve(F, u, bcs, rtol=1e-10, atol=1e-10, max_iter=50, convergence_type='combined', norm_type='l2',
						damped=True, verbose=True, ksp=None, ksp_options=None):
	r"""A damped Newton method for solving nonlinear equations.

	The damped Newton method is based on the natural monotonicity test from
	`Deuflhard, Newton methods for nonlinear problems <https://doi.org/10.1007/978-3-642-23899-4>`_.
	It also allows fine tuning via a direct interface, and absolute, relative,
	and combined stopping criteria. Can also be used to specify the solver for
	the inner (linear) subproblems via petsc ksps.

	The method terminates after ``max_iter`` iterations, or if a termination criterion is
	satisfied. These criteria are given by

	- a relative one in case ``convergence_type = 'rel'``, i.e.,

	.. math:: \lvert\lvert F_{k} \rvert\rvert \leq \texttt{rtol} \lvert\lvert F_0 \rvert\rvert.

	- an absolute one in case ``convergence_type = 'abs'``, i.e.,

	.. math:: \lvert\lvert F_{k} \rvert\rvert \leq \texttt{atol}.

	- a combination of both in case ``convergence_type = 'combined'``, i.e.,

	.. math:: \lvert\lvert F_{k} \rvert\rvert \leq \texttt{atol} + \texttt{rtol} \lvert\lvert F_0 \rvert\rvert.

	The norm chosen for the termination criterion is specified via ``norm_type``.

	Parameters
	----------
	F : ufl.form.Form
		The variational form of the nonlinear problem to be solved by Newton's method.
	u : dolfin.function.function.Function
		The sought solution / initial guess. It is not assumed that the initial guess
		satisfies the Dirichlet boundary conditions, they are applied automatically.
		The method overwrites / updates this Function.
	bcs : list[dolfin.fem.dirichletbc.DirichletBC]
		A list of DirichletBCs for the nonlinear variational problem.
	rtol : float, optional
		Relative tolerance of the solver if convergence_type is either ``'combined'`` or ``'rel'``
		(default is ``rtol = 1e-10``).
	atol : float, optional
		Absolute tolerance of the solver if convergence_type is either ``'combined'`` or ``'abs'``
		(default is ``atol = 1e-10``).
	max_iter : int, optional
		Maximum number of iterations carried out by the method
		(default is ``max_iter = 50``).
	convergence_type : {'combined', 'rel', 'abs'}
		Determines the type of stopping criterion that is used.
	norm_type : {'l2', 'linf'}
		Determines which norm is used in the stopping criterion.
	damped : bool, optional
		If ``True``, then a damping strategy is used. If ``False``, the classical
		Newton-Raphson iteration (without damping) is used (default is ``True``).
	verbose : bool, optional
		If ``True``, prints status of the iteration to the console (default
		is ``True``).
	ksp : petsc4py.PETSc.KSP, optional
		The PETSc ksp object used to solve the inner (linear) problem
		if this is ``None`` it uses the direct solver MUMPS (default is
		``None``).
	ksp_options : list[list[str]]
		The list of options for the linear solver.


	Returns
	-------
	dolfin.function.function.Function
		The solution of the nonlinear variational problem, if converged.
		This overrides the input function u.


	Examples
	--------
	Consider the problem

	.. math::
		\begin{alignedat}{2}
		- \Delta u + u^3 &= 1 \quad &&\text{ in } \Omega=(0,1)^2 \\
		u &= 0 \quad &&\text{ on } \Gamma.
		\end{alignedat}

	This is solved with the code ::

		from fenics import *
		import cashocs

		mesh, _, boundaries, dx, _, _ = cashocs.regular_mesh(25)
		V = FunctionSpace(mesh, 'CG', 1)

		u = Function(V)
		v = TestFunction(V)
		F = inner(grad(u), grad(v))*dx + pow(u,3)*v*dx - Constant(1)*v*dx
		bcs = cashocs.create_bcs_list(V, Constant(0.0), boundaries, [1,2,3,4])
		cashocs.damped_newton_solve(F, u, bcs)
	"""

	if not convergence_type in ['rel', 'abs', 'combined']:
		raise InputError('cashocs.nonlinear_solvers.damped_newton_solve', 'convergence_type', 'Input convergence_type has to be one of \'rel\', \'abs\', or \'combined\'.')

	if not norm_type in ['l2', 'linf']:
		raise InputError('cashocs.nonlinear_solvers.damped_newton_solve', 'norm_type', 'Input norm_type has to be one of \'l2\' or \'linf\'.')

	# create the PETSc ksp
	if ksp is None:
		if ksp_options is None:
			ksp_options = [
				['ksp_type', 'preonly'],
				['pc_type', 'lu'],
				['pc_factor_mat_solver_type', 'mumps'],
				['mat_mumps_icntl_24', 1]
			]

		ksp = PETSc.KSP().create()
		_setup_petsc_options([ksp], [ksp_options])
		ksp.setFromOptions()

	# Calculate the Jacobian.
	dF = fenics.derivative(F, u)

	# Setup increment and function for monotonicity test
	V = u.function_space()
	du = fenics.Function(V)
	ddu = fenics.Function(V)
	u_save = fenics.Function(V)

	iterations = 0

	[bc.apply(u.vector()) for bc in bcs]
	# copy the boundary conditions and homogenize them for the increment
	bcs_hom = [fenics.DirichletBC(bc) for bc in bcs]
	[bc.homogenize() for bc in bcs_hom]

	assembler = fenics.SystemAssembler(dF, -F, bcs_hom)
	assembler.keep_diagonal = True
	A_fenics = fenics.PETScMatrix()
	residual = fenics.PETScVector()

	# Compute the initial residual
	assembler.assemble(A_fenics, residual)
	A_fenics.ident_zeros()
	A = fenics.as_backend_type(A_fenics).mat()
	b = fenics.as_backend_type(residual).vec()

	res_0 = residual.norm(norm_type)
	if res_0 == 0.0:
		if verbose:
			print('Residual vanishes, input is already a solution.')
		return u

	res = res_0
	if verbose:
		print('Newton Iteration ' + format(iterations, '2d') + ' - residual (abs):  '
			  + format(res, '.3e') + ' (tol = ' + format(atol, '.3e') + ')    residual (rel): '
			  + format(res/res_0, '.3e') + ' (tol = ' + format(rtol, '.3e') + ')')

	if convergence_type == 'abs':
		tol = atol
	elif convergence_type == 'rel':
		tol = rtol*res_0
	else:
		tol = rtol*res_0 + atol

	# While loop until termination
	while res > tol and iterations < max_iter:
		iterations += 1
		lmbd = 1.0
		breakdown = False
		u_save.vector()[:] = u.vector()[:]

		# Solve the inner problem
		_solve_linear_problem(ksp, A, b, du.vector().vec(), ksp_options)
		du.vector().apply('')

		# perform backtracking in case damping is used
		if damped:
			while True:
				u.vector()[:] += lmbd*du.vector()[:]
				assembler.assemble(residual)
				b = fenics.as_backend_type(residual).vec()
				_solve_linear_problem(ksp=ksp, b=b, x=ddu.vector().vec(), ksp_options=ksp_options)
				ddu.vector().apply('')

				if ddu.vector().norm(norm_type)/du.vector().norm(norm_type) <= 1:
					break
				else:
					u.vector()[:] = u_save.vector()[:]
					lmbd /= 2

				if lmbd < 1e-6:
					breakdown = True
					break

		else:
			u.vector()[:] += du.vector()[:]

		if breakdown:
			raise NotConvergedError('Newton solver (state system)', 'Stepsize for increment too low.')

		if iterations == max_iter:
			raise NotConvergedError('Newton solver (state system)', 'Maximum number of iterations were exceeded.')

		# compute the new residual
		assembler.assemble(A_fenics, residual)
		A_fenics.ident_zeros()
		A = fenics.as_backend_type(A_fenics).mat()
		b = fenics.as_backend_type(residual).vec()

		[bc.apply(residual) for bc in bcs_hom]

		res = residual.norm(norm_type)
		if verbose:
			print('Newton Iteration ' + format(iterations, '2d') + ' - residual (abs):  '
				  + format(res, '.3e') + ' (tol = ' + format(atol, '.3e') + ')    residual (rel): '
				  + format(res/res_0, '.3e') + ' (tol = ' + format(rtol, '.3e') + ')')

		if res < tol:
			if verbose:
				print('\nNewton Solver converged after ' + str(iterations) + ' iterations.\n')
			break

	return u
예제 #4
0
파일: utils.py 프로젝트: bdevl/PGMCPC
def AssembleBasisFunctionMatrix(V, points, *, Global_V=None, ReturnType='scipy'):

    if points.ndim != 2:
        raise Exception

    if points.shape[1] > 3:
        raise Exception

    dim = V.num_sub_spaces()
    if dim == 0:
        dim = 1

    mesh = V.mesh()
    element = V.element()
    nbasis = element.space_dimension()
    nrows = len(points)

    if Global_V is None:
        ncols = V.dim()
    else:
        ncols = Global_V.dim()

    comm_self = df.MPI.comm_self

    if dim > 1:
        mat = PETSc.Mat().createAIJ(size=(nrows * dim, ncols), comm=comm_self)
        V_subspaces = V.split()
    else:
        mat = PETSc.Mat().createAIJ(size=(nrows, ncols), comm=comm_self, )

    mat.setUp()
    mat.assemblyBegin()

    dmap = V.dofmap()
    tree = mesh.bounding_box_tree()

    for row, pt in enumerate(points):

        cell_id = tree.compute_first_entity_collision(df.Point(*pt))

        if cell_id >= mesh.num_cells():
            raise Exception('No collision with mesh for requested point')

        cell = df.Cell(mesh, cell_id)
        vertex_coordinates = cell.get_vertex_coordinates()
        vertex_coordinates = np.array(vertex_coordinates, dtype=np.float64)
        cell_orientation = cell.orientation()
        basis_values = element.evaluate_basis_all(pt, vertex_coordinates, cell_orientation)
        assert basis_values.size == dim*nbasis

        if dim > 1:

            col_indices = dmap.cell_dofs(cell_id)

            for ii, vsub in enumerate(V_subspaces):
                row_indices = [dim * row + ii]
                submap = arange(ii, dim * nbasis, dim)
                mat.setValues(row_indices, col_indices, basis_values[submap], PETSc.InsertMode.INSERT_VALUES)
        else:

            row_indices = [row]
            col_indices = dmap.cell_dofs(cell_id)
            mat.setValues(row_indices, col_indices, basis_values, PETSc.InsertMode.INSERT_VALUES)

    mat.assemblyEnd()

    if ReturnType == 'scipy':
        r = mat.getValuesCSR()
        I = csr_matrix((r[2], r[1], r[0]), shape=(nrows * dim, ncols))
    elif ReturnType.lower() == 'csr_data':
        r = mat.getValuesCSR()
        return r[2], r[1], r[0]
    elif ReturnType == 'fenics':
        I = df.PETScMatrix(mat)
    else:
        raise Exception('return type not recognized')

    return I
def convert_scipy_csr_matrix_to_fenics_csr_matrix(A_scipy):
    A_petsc = PETSc.Mat().createAIJ(size=A_scipy.shape,
                                    csr=(A_scipy.indptr, A_scipy.indices,
                                         A_scipy.array))
    A_fenics = fenics.PETScMatrix(A_petsc)
    return A_fenics
예제 #6
0
파일: ksdgvar.py 프로젝트: leonavery/KSDG
 def setup_problem(self, t, debug=False):
     self.set_time(t)
     #
     # assemble the matrix, if necessary (once for all time points)
     #
     if not hasattr(self, 'A'):
         logVARIABLE('making matrix A')
         self.drho_integral = sum([
             tdrho * wrho * self.dx
             for tdrho, wrho in zip(self.tdrhos, self.wrhos)
         ])
         self.dU_integral = sum(
             [tdU * wU * self.dx for tdU, wU in zip(self.tdUs, self.wUs)])
         logVARIABLE('assembling A')
         self.A = fe.PETScMatrix()
         logVARIABLE('self.A', self.A)
         fe.assemble(self.drho_integral + self.dU_integral, tensor=self.A)
         logVARIABLE('A assembled. Applying BCs')
         pA = fe.as_backend_type(self.A).mat()
         Adiag = pA.getDiagonal()
         logVARIABLE('Adiag.array', Adiag.array)
         # self.A = fe.assemble(self.drho_integral + self.dU_integral +
         #                      self.dP_integral)
         for bc in self.bcs:
             bc.apply(self.A)
         Adiag = pA.getDiagonal()
         logVARIABLE('Adiag.array', Adiag.array)
         self.dsol = Function(self.VS)
         dsolsplit = self.dsol.split()
         self.drhos, self.dUs = (dsolsplit[:2**self.dim],
                                 dsolsplit[2**self.dim:])
     #
     # assemble RHS (for each time point, but compile only once)
     #
     #
     # These are the values of rho and U themselves (not their
     # symmetrized versions) on all subdomains of the original
     # domain.
     #
     if not hasattr(self, 'rhosds'):
         self.rhosds = matmul(self.eomat, self.irhos)
     # self.Usds is a list of nligands lists. Sublist i is of
     # length 2**dim and lists the value of ligand i on each of the
     # 2**dim subdomains.
     #
     if not hasattr(self, 'Usds'):
         self.Usds = [
             matmul(self.eomat,
                    self.iUs[i * 2**self.dim:(i + 1) * 2**self.dim])
             for i in range(self.nligands)
         ]
     if not hasattr(self, 'rho_terms'):
         logVARIABLE('making rho_terms')
         self.sigma = self.iparams['sigma']
         self.s2 = self.sigma * self.sigma / 2
         self.rhomin = self.iparams['rhomin']
         self.rhopen = self.iparams['rhopen']
         self.grhopen = self.iparams['grhopen']
         #
         # Compute fluxes on subdomains.
         # Vsds is a list of length 2**dim, the value of V on each
         # subdomain.
         #
         self.Vsds = []
         for Usd, rhosd in zip(zip(*self.Usds), self.rhosds):
             self.Vsds.append(self.V(Usd, ufl.max_value(rhosd,
                                                        self.rhomin)))
         self.vsds = [
             -ufl.grad(Vsd) -
             (self.s2 * ufl.grad(rhosd) / ufl.max_value(rhosd, self.rhomin))
             for Vsd, rhosd in zip(self.Vsds, self.rhosds)
         ]
         self.fluxsds = [
             vsd * rhosd for vsd, rhosd in zip(self.vsds, self.rhosds)
         ]
         self.vnsds = [
             ufl.max_value(ufl.dot(vsd, self.n), 0) for vsd in self.vsds
         ]
         self.facet_fluxsds = [
             (vnsd('+') * ufl.max_value(rhosd('+'), 0.0) -
              vnsd('-') * ufl.max_value(rhosd('-'), 0.0))
             for vnsd, rhosd in zip(self.vnsds, self.rhosds)
         ]
         #
         # Now combine the subdomain fluxes to get the fluxes for
         # the symmetrized functions
         #
         self.fluxs = matmul((2.0**-self.dim) * self.eomat, self.fluxsds)
         self.facet_fluxs = matmul((2.0**-self.dim) * self.eomat,
                                   self.facet_fluxsds)
         self.rho_flux_jump = sum([
             -facet_flux * ufl.jump(wrho) * self.dS
             for facet_flux, wrho in zip(self.facet_fluxs, self.wrhos)
         ])
         self.rho_grad_move = sum([
             ufl.dot(flux, ufl.grad(wrho)) * self.dx
             for flux, wrho in zip(self.fluxs, self.wrhos)
         ])
         self.rho_penalty = sum([
             -(self.degree**2 / self.havg) *
             ufl.dot(ufl.jump(rho, self.n),
                     ufl.jump(self.rhopen * wrho, self.n)) * self.dS
             for rho, wrho in zip(self.irhos, self.wrhos)
         ])
         self.grho_penalty = sum([
             self.degree**2 *
             (ufl.jump(ufl.grad(rho), self.n) *
              ufl.jump(ufl.grad(-self.grhopen * wrho), self.n)) * self.dS
             for rho, wrho in zip(self.irhos, self.wrhos)
         ])
         self.rho_terms = (self.rho_flux_jump + self.rho_grad_move +
                           self.rho_penalty + self.grho_penalty)
         logVARIABLE('rho_terms made')
     if not hasattr(self, 'U_terms'):
         logVARIABLE('making U_terms')
         self.Umin = self.iparams['Umin']
         self.Upen = self.iparams['Upen']
         self.gUpen = self.iparams['gUpen']
         self.U_decay = 0.0
         self.U_secretion = 0.0
         self.jump_gUw = 0.0
         self.U_diffusion = 0.0
         self.U_penalty = 0.0
         self.gU_penalty = 0.0
         for j, lig in enumerate(self.iligands.ligands()):
             sl = slice(j * 2**self.dim, (j + 1) * 2**self.dim)
             self.U_decay += sum([
                 -lig.gamma * iUi * wUi * self.dx
                 for iUi, wUi in zip(self.iUs[sl], self.wUs[sl])
             ])
             self.U_secretion += sum([
                 lig.s * rho * wU * self.dx
                 for rho, wU in zip(self.irhos, self.wUs[sl])
             ])
             self.jump_gUw += sum([
                 ufl.jump(lig.D * wU * ufl.grad(U), self.n) * self.dS
                 for wU, U in zip(self.wUs[sl], self.iUs[sl])
             ])
             self.U_diffusion += sum([
                 -lig.D * ufl.dot(ufl.grad(U), ufl.grad(wU)) * self.dx
                 for U, wU in zip(self.iUs[sl], self.wUs[sl])
             ])
             self.U_penalty += sum([
                 (-self.degree**2 / self.havg) *
                 ufl.dot(ufl.jump(U, self.n),
                         ufl.jump(self.Upen * wU, self.n)) * self.dS
                 for U, wU in zip(self.iUs[sl], self.wUs[sl])
             ])
             self.gU_penalty += sum([
                 -self.degree**2 * ufl.jump(ufl.grad(U), self.n) *
                 ufl.jump(ufl.grad(self.gUpen * wU), self.n) * self.dS
                 for U, wU in zip(self.iUs[sl], self.wUs[sl])
             ])
         self.U_terms = (
             # decay and secretion
             self.U_decay + self.U_secretion +
             # diffusion
             self.jump_gUw + self.U_diffusion +
             # penalties (to enforce continuity)
             self.U_penalty + self.gU_penalty)
         logVARIABLE('U_terms made')
     if not hasattr(self, 'all_terms'):
         logVARIABLE('making all_terms')
         self.all_terms = self.rho_terms + self.U_terms
     if not hasattr(self, 'J_terms'):
         logVARIABLE('making J_terms')
         self.J_terms = fe.derivative(self.all_terms, self.sol)